Configuration: recactor how it is written and handled
This commit is contained in:
parent
13e993b964
commit
7ff4abc3c0
10 changed files with 130 additions and 61 deletions
91
src/configDefault.ts
Normal file
91
src/configDefault.ts
Normal file
|
@ -0,0 +1,91 @@
|
|||
import { Message } from "discord.js";
|
||||
import {
|
||||
ChatCompletionRequestMessage as OpenAIMessage,
|
||||
CreateChatCompletionRequest as ChatCompletionRequestData,
|
||||
} from "openai";
|
||||
|
||||
export interface IConfigRequired {
|
||||
readonly calendarParams: Intl.DateTimeFormatOptions;
|
||||
/** Tokens to authentiate with */
|
||||
readonly tokens: {
|
||||
readonly Discord: string;
|
||||
readonly OpenAI: string;
|
||||
};
|
||||
/** Messages to append at the start of every chat history when sending to API */
|
||||
systemPrompt(context: Message): OpenAIMessage[];
|
||||
/** OpenAI model config */
|
||||
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "functions" | "n">;
|
||||
/** Limits for message selection */
|
||||
readonly readLimits: {
|
||||
/** Maximum message age to include (in miliseconds) */
|
||||
readonly time: number;
|
||||
/** Maximum number of messages to select (maximum 100) */
|
||||
readonly messages: number;
|
||||
/** Maximum total token usage for messages (counted locally) */
|
||||
readonly tokens: number;
|
||||
};
|
||||
/** Default limits for user inteacting with the bot */
|
||||
readonly userLimits: {
|
||||
/** How much requests can an user make if it's not overriden in a database entry */
|
||||
readonly requests: number;
|
||||
};
|
||||
}
|
||||
|
||||
export type IConfig = Partial<IConfigRequired>;
|
||||
|
||||
export default function newConfig(config?: IConfig): IConfigRequired {
|
||||
return { ...defaultConfig, ...config };
|
||||
}
|
||||
|
||||
function envAsString(key: string): string | undefined {
|
||||
key = key.toLocaleUpperCase();
|
||||
return process.env[key];
|
||||
}
|
||||
|
||||
function envAsNumber(key: string): number | undefined {
|
||||
key = key.toLocaleUpperCase();
|
||||
const value = Number(process.env[key]);
|
||||
return !Number.isNaN(value) ? value : undefined;
|
||||
}
|
||||
|
||||
const defaultConfig: IConfigRequired = {
|
||||
calendarParams: {
|
||||
weekday: "short",
|
||||
year: "numeric",
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
hour12: false,
|
||||
},
|
||||
tokens: {
|
||||
Discord: envAsString("TOKENS__DISCORD") ?? "",
|
||||
OpenAI: envAsString("TOKENS__OPENAI") ?? "",
|
||||
},
|
||||
systemPrompt(): OpenAIMessage[] {
|
||||
return [
|
||||
{
|
||||
role: "system",
|
||||
content:
|
||||
envAsString("SYSTEM_PROMPT") ??
|
||||
`You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`,
|
||||
},
|
||||
];
|
||||
},
|
||||
chatCompletionParams: {
|
||||
model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo",
|
||||
max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384,
|
||||
frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"),
|
||||
presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"),
|
||||
temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"),
|
||||
top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"),
|
||||
},
|
||||
readLimits: {
|
||||
time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000,
|
||||
messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50,
|
||||
tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048,
|
||||
},
|
||||
userLimits: {
|
||||
requests: envAsNumber("USER_LIMITS__REQUESTS") ?? 25,
|
||||
},
|
||||
};
|
Loading…
Add table
Add a link
Reference in a new issue