GPTcord/src/configDefault.ts

150 lines
4.5 KiB
TypeScript

import {
ActivityType
, type PresenceStatusData
, type PresenceData
} from "discord.js";
import {
ChatCompletionMessageParam as OpenAIMessage,
ChatCompletionCreateParamsNonStreaming as ChatCompletionRequestData,
} from "openai/resources/chat";
import IQuota from "./IQuota";
import MessageCount from "./quota/messageCount";
import { apiRequest } from "./execution";
export interface IConfigRequired {
/** Tokens to authentiate with */
readonly tokens: {
readonly Discord: string;
readonly OpenAI: string;
};
/** Discord bot status */
readonly status: PresenceData
/** Messages to append at the start of every chat history when sending to API */
systemPrompt(context: apiRequest): OpenAIMessage[];
/** OpenAI model config */
readonly chatCompletionParams: Omit<ChatCompletionRequestData, "messages" | "function_call" | "tool_call" | "functions" | "n">;
/** Limits for message selection */
readonly readLimits: {
/** Maximum message age to include (in miliseconds) */
readonly time: number;
/** Maximum number of messages to select (maximum 100) */
readonly messages: number;
/** Maximum total token usage for messages (counted locally) */
readonly tokens: number;
};
/**
* Quota parameters to use when checking limits
* you can find some in `./quota`
*/
readonly quota: IQuota;
}
export type IConfig = Partial<IConfigRequired>;
export default function newConfig(config?: IConfig): IConfigRequired {
return { ...defaultConfig, ...config };
}
function isEnvDefined(key: string): boolean {
return process.env[key] !== undefined;
}
function envAsString(key: string): string | undefined {
key = key.toLocaleUpperCase();
return process.env[key];
}
function envAsNumber(key: string): number | undefined {
key = key.toLocaleUpperCase();
const value = Number(process.env[key]);
return !Number.isNaN(value) ? value : undefined;
}
function envAsBoolean(key: string): boolean | undefined {
key = key.toUpperCase();
const value = process.env[key];
return !(value === "false" || value === "0");
}
function envAsActivityType(key: string): ActivityType | undefined {
key = key.toUpperCase();
const value = process.env[key]?.toUpperCase();
switch (value) {
case "0":
case "PLAYING":
return ActivityType.Playing;
case "1":
case "STREAMING":
return ActivityType.Streaming;
case "2":
case "LISTENING":
return ActivityType.Listening;
case "3":
case "WATCHING":
return ActivityType.Watching;
case "4":
case "CUSTOM":
return ActivityType.Custom;
case "5":
case "COMPETING":
return ActivityType.Competing;
default:
return undefined;
}
}
function envAsPresenceStatusData(key: string): PresenceStatusData | undefined {
key = key.toUpperCase();
const value = process.env[key]?.toLowerCase();
switch (value) {
case "online":
case "idle":
case "dnd":
case "invisible":
return value;
default:
return undefined;
}
}
const defaultConfig: IConfigRequired = {
tokens: {
Discord: envAsString("TOKENS__DISCORD") ?? "",
OpenAI: envAsString("TOKENS__OPENAI") ?? "",
},
systemPrompt(): OpenAIMessage[] {
return [
{
role: "system",
content:
envAsString("SYSTEM_PROMPT") ??
`You are GPTcord, an AI built on top of ChatGPT (a large language model trained by OpenAI) for Discord. Answer as concisely as possible.`,
},
];
},
status: {
activities: isEnvDefined("STATUS__NAME") ? [{
name: envAsString("STATUS__NAME") as string,
type: envAsActivityType("STATUS__TYPE") ?? ActivityType.Custom,
state: envAsString("STATUS__STATE"),
url: envAsString("STATUS__URL"),
}] : undefined,
status: envAsPresenceStatusData("STATUS__STATUS"),
afk: envAsBoolean("STATUS__AFK"),
},
chatCompletionParams: {
model: envAsString("CHAT_COMPLETION_PARAMS__MODEL") ?? "gpt-3.5-turbo",
max_tokens: envAsNumber("CHAT_COMPLETION_PARAMS__MAX_TOKENS") ?? 384,
frequency_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__FREQUENCY_PENALTY"),
presence_penalty: envAsNumber("CHAT_COMPLETION_PARAMS__PRESENCE_PENALTY"),
temperature: envAsNumber("CHAT_COMPLETION_PARAMS__TEMPERATURE"),
top_p: envAsNumber("CHAT_COMPLETION_PARAMS__TOP_P"),
},
readLimits: {
time: envAsNumber("READ_LIMITS__TIME") ?? 60 * 60 * 1000,
messages: envAsNumber("READ_LIMITS__MESSAGES") ?? 50,
tokens: envAsNumber("READ_LIMITS__TOKENS") ?? 2048,
},
quota: new MessageCount(envAsNumber("QUOTA__DEFAULT_QUOTA"), envAsNumber("QUOTA__LOOKBACK"))
};