GPTcord/src/configDefault.ts
2023-09-27 17:14:17 +02:00

92 lines
3 KiB
TypeScript

import { Message } from "discord.js";
import {
ChatCompletionMessageParam as OpenAIMessage,
ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData,
} from "openai/resources/chat";
import IQuota from "./IQuota";
import MessageCount from "./quota/messageCount";
export interface IConfigRequired {
readonly calendarParams: Intl.DateTimeFormatOptions;
/** Tokens to authentiate with */
readonly tokens: {
readonly Discord: string;
readonly OpenAI: string;
};
/** Messages to append at the start of every chat history when sending to API */
systemPrompt(context: Message): OpenAIMessage[];
/** OpenAI model config */
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "functions" | "n">;
/** Limits for message selection */
readonly readLimits: {
/** Maximum message age to include (in miliseconds) */
readonly time: number;
/** Maximum number of messages to select (maximum 100) */
readonly messages: number;
/** Maximum total token usage for messages (counted locally) */
readonly tokens: number;
};
/**
* Quota parameters to use when checking limits
* you can find some in `./quota`
*/
readonly quota: IQuota;
}
export type IConfig = Partial<IConfigRequired>;
export default function newConfig(config?: IConfig): IConfigRequired {
return { ...defaultConfig, ...config };
}
function envAsString(key: string): string | undefined {
key = key.toLocaleUpperCase();
return process.env[key];
}
function envAsNumber(key: string): number | undefined {
key = key.toLocaleUpperCase();
const value = Number(process.env[key]);
return !Number.isNaN(value) ? value : undefined;
}
const defaultConfig: IConfigRequired = {
calendarParams: {
weekday: "short",
year: "numeric",
month: "short",
day: "numeric",
hour: "2-digit",
minute: "2-digit",
hour12: false,
},
tokens: {
Discord: envAsString("TOKENS__DISCORD") ?? "",
OpenAI: envAsString("TOKENS__OPENAI") ?? "",
},
systemPrompt(): OpenAIMessage[] {
return [
{
role: "system",
content:
envAsString("SYSTEM_PROMPT") ??
`You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`,
},
];
},
chatCompletionParams: {
model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo",
max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384,
frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"),
presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"),
temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"),
top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"),
},
readLimits: {
time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000,
messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50,
tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048,
},
quota: new MessageCount(envAsNumber("QUOTA__DEFAULT_QUOTA"), envAsNumber("QUOTA__LOOKBACK"))
};