Wroclaw
2a38ae4a95
This also solves the issue where we would request more tokens, than the model is capable of (over 4096)
5 lines
135 B
TypeScript
5 lines
135 B
TypeScript
import { encode } from "gpt-3-encoder";
|
|
|
|
export default function countTokens(text: string): number {
|
|
return encode(text).length;
|
|
}
|