Skip to content

Commit 6d8b36d

Browse files
committed
Prettier
1 parent 84cf23a commit 6d8b36d

File tree

5 files changed

+194
-158
lines changed

5 files changed

+194
-158
lines changed

package-lock.json

Lines changed: 16 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@
1010
"scripts": {
1111
"test": "jest",
1212
"build": "tsc",
13-
"prepublishOnly": "npm run build"
13+
"prepublishOnly": "npm run build",
14+
"lint": "prettier --check src tests"
1415
},
1516
"author": {
1617
"email": "[email protected]",
@@ -26,10 +27,14 @@
2627
"@types/jest": "^29.5.3",
2728
"jest": "^29.6.1",
2829
"openai": "^4.2.0",
30+
"prettier": "^3.0.2",
2931
"ts-jest": "^29.1.1",
3032
"typescript": "^5.1.6"
3133
},
3234
"dependencies": {
3335
"js-tiktoken": "^1.0.7"
36+
},
37+
"prettier": {
38+
"trailingComma": "all"
3439
}
35-
}
40+
}

src/functions.ts

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,24 +22,24 @@ interface ObjectProp {
2222
type Prop = {
2323
description?: string;
2424
} & (
25-
| ObjectProp
26-
| {
25+
| ObjectProp
26+
| {
2727
type: "string";
2828
enum?: string[];
2929
}
30-
| {
30+
| {
3131
type: "number" | "integer";
3232
minimum?: number;
3333
maximum?: number;
3434
enum?: number[];
3535
}
36-
| { type: "boolean" }
37-
| { type: "null" }
38-
| {
36+
| { type: "boolean" }
37+
| { type: "null" }
38+
| {
3939
type: "array";
4040
items?: Prop;
4141
}
42-
);
42+
);
4343

4444
// When OpenAI use functions in the prompt, they format them as TypeScript definitions rather than OpenAPI JSON schemas.
4545
// This function converts the JSON schemas into TypeScript definitions.
@@ -75,7 +75,7 @@ function formatObjectProperties(obj: ObjectProp, indent: number): string {
7575
lines.push(`${name}?: ${formatType(param, indent)},`);
7676
}
7777
}
78-
return lines.map(line => ' '.repeat(indent) + line).join("\n");
78+
return lines.map((line) => " ".repeat(indent) + line).join("\n");
7979
}
8080

8181
// Format a single property type
@@ -108,4 +108,4 @@ function formatType(param: Prop, indent: number): string {
108108
}
109109
return "any[]";
110110
}
111-
}
111+
}

src/index.ts

Lines changed: 19 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,25 @@ let encoder: Tiktoken | undefined;
1414
* @param {Function[]} prompt.functions OpenAI function definitions
1515
* @returns An estimate for the number of tokens the prompt will use
1616
*/
17-
export function promptTokensEstimate({ messages, functions }: { messages: Message[], functions?: Function[] }): number {
17+
export function promptTokensEstimate({
18+
messages,
19+
functions,
20+
}: {
21+
messages: Message[];
22+
functions?: Function[];
23+
}): number {
1824
// It appears that if functions are present, the first system message is padded with a trailing newline. This
1925
// was inferred by trying lots of combinations of messages and functions and seeing what the token counts were.
2026
let paddedSystem = false;
21-
let tokens = messages.map(m => {
22-
if (m.role === "system" && functions && !paddedSystem) {
23-
m = { ...m, content: m.content + "\n" }
24-
paddedSystem = true;
25-
}
26-
return messageTokensEstimate(m);
27-
}).reduce((a, b) => a + b, 0);
27+
let tokens = messages
28+
.map((m) => {
29+
if (m.role === "system" && functions && !paddedSystem) {
30+
m = { ...m, content: m.content + "\n" };
31+
paddedSystem = true;
32+
}
33+
return messageTokensEstimate(m);
34+
})
35+
.reduce((a, b) => a + b, 0);
2836

2937
// Each completion (vs message) seems to carry a 3-token overhead
3038
tokens += 3;
@@ -37,7 +45,7 @@ export function promptTokensEstimate({ messages, functions }: { messages: Messag
3745
// If there's a system message _and_ functions are present, subtract four tokens. I assume this is because
3846
// functions typically add a system message, but reuse the first one if it's already there. This offsets
3947
// the extra 9 tokens added by the function definitions.
40-
if (functions && messages.find(m => m.role === "system")) {
48+
if (functions && messages.find((m) => m.role === "system")) {
4149
tokens -= 4;
4250
}
4351

@@ -68,7 +76,7 @@ export function messageTokensEstimate(message: Message): number {
6876
message.content,
6977
message.name,
7078
message.function_call?.name,
71-
message.function_call?.arguments
79+
message.function_call?.arguments,
7280
].filter((v): v is string => !!v);
7381
let tokens = components.map(stringTokens).reduce((a, b) => a + b, 0);
7482
tokens += 3; // Add three per message
@@ -85,7 +93,7 @@ export function messageTokensEstimate(message: Message): number {
8593
}
8694

8795
/**
88-
* Estimate the number of tokens a function definition will use. Note that using the function definition within
96+
* Estimate the number of tokens a function definition will use. Note that using the function definition within
8997
* a prompt will add extra tokens, so you might want to use `promptTokensEstimate` instead.
9098
* @param funcs An array of OpenAI function definitions
9199
* @returns An estimate for the number of tokens the function definitions will use

0 commit comments

Comments
 (0)