Skip to content

Commit a50123a

Browse files
authored
🤖 feat: o3-mini (danny-avila#5581)
* 🤖 feat: `o3-mini` * chore: re-order vision models list to prioritize gpt-4o as a vision model over o1
1 parent 3e9d27c commit a50123a

File tree

6 files changed

+17
-13
lines changed

6 files changed

+17
-13
lines changed

api/app/clients/OpenAIClient.js

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class OpenAIClient extends BaseClient {
6565
/** @type {OpenAIUsageMetadata | undefined} */
6666
this.usage;
6767
/** @type {boolean|undefined} */
68-
this.isO1Model;
68+
this.isOmni;
6969
/** @type {SplitStreamHandler | undefined} */
7070
this.streamHandler;
7171
}
@@ -105,8 +105,8 @@ class OpenAIClient extends BaseClient {
105105
this.checkVisionRequest(this.options.attachments);
106106
}
107107

108-
const o1Pattern = /\bo1\b/i;
109-
this.isO1Model = o1Pattern.test(this.modelOptions.model);
108+
const omniPattern = /\b(o1|o3)\b/i;
109+
this.isOmni = omniPattern.test(this.modelOptions.model);
110110

111111
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
112112
if (OPENROUTER_API_KEY && !this.azure) {
@@ -146,7 +146,7 @@ class OpenAIClient extends BaseClient {
146146
const { model } = this.modelOptions;
147147

148148
this.isChatCompletion =
149-
o1Pattern.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy;
149+
omniPattern.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy;
150150
this.isChatGptModel = this.isChatCompletion;
151151
if (
152152
model.includes('text-davinci') ||
@@ -475,7 +475,7 @@ class OpenAIClient extends BaseClient {
475475
promptPrefix = this.augmentedPrompt + promptPrefix;
476476
}
477477

478-
if (promptPrefix && this.isO1Model !== true) {
478+
if (promptPrefix && this.isOmni !== true) {
479479
promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
480480
instructions = {
481481
role: 'system',
@@ -503,7 +503,7 @@ class OpenAIClient extends BaseClient {
503503
};
504504

505505
/** EXPERIMENTAL */
506-
if (promptPrefix && this.isO1Model === true) {
506+
if (promptPrefix && this.isOmni === true) {
507507
const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user');
508508
if (lastUserMessageIndex !== -1) {
509509
payload[
@@ -1200,7 +1200,7 @@ ${convo}
12001200
opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
12011201
}
12021202

1203-
if (this.isO1Model === true && modelOptions.max_tokens != null) {
1203+
if (this.isOmni === true && modelOptions.max_tokens != null) {
12041204
modelOptions.max_completion_tokens = modelOptions.max_tokens;
12051205
delete modelOptions.max_tokens;
12061206
}
@@ -1280,13 +1280,13 @@ ${convo}
12801280
let streamResolve;
12811281

12821282
if (
1283-
this.isO1Model === true &&
1283+
this.isOmni === true &&
12841284
(this.azure || /o1(?!-(?:mini|preview)).*$/.test(modelOptions.model)) &&
12851285
modelOptions.stream
12861286
) {
12871287
delete modelOptions.stream;
12881288
delete modelOptions.stop;
1289-
} else if (!this.isO1Model && modelOptions.reasoning_effort != null) {
1289+
} else if (!this.isOmni && modelOptions.reasoning_effort != null) {
12901290
delete modelOptions.reasoning_effort;
12911291
}
12921292

@@ -1366,7 +1366,7 @@ ${convo}
13661366
for await (const chunk of stream) {
13671367
// Add finish_reason: null if missing in any choice
13681368
if (chunk.choices) {
1369-
chunk.choices.forEach(choice => {
1369+
chunk.choices.forEach((choice) => {
13701370
if (!('finish_reason' in choice)) {
13711371
choice.finish_reason = null;
13721372
}

api/models/tx.js

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,9 @@ const tokenValues = Object.assign(
7575
'4k': { prompt: 1.5, completion: 2 },
7676
'16k': { prompt: 3, completion: 4 },
7777
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
78+
'o3-mini': { prompt: 1.1, completion: 4.4 },
79+
'o1-mini': { prompt: 1.1, completion: 4.4 },
7880
'o1-preview': { prompt: 15, completion: 60 },
79-
'o1-mini': { prompt: 3, completion: 12 },
8081
o1: { prompt: 15, completion: 60 },
8182
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
8283
'gpt-4o': { prompt: 2.5, completion: 10 },

api/utils/tokens.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ const z = require('zod');
22
const { EModelEndpoint } = require('librechat-data-provider');
33

44
const openAIModels = {
5+
'o3-mini': 195000, // -5000 from max
56
o1: 195000, // -5000 from max
67
'o1-mini': 127500, // -500 from max
78
'o1-preview': 127500, // -500 from max

client/src/components/Endpoints/MessageEndpointIcon.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ type EndpointIcon = {
2525

2626
function getOpenAIColor(_model: string | null | undefined) {
2727
const model = _model?.toLowerCase() ?? '';
28-
if (model && /\bo1\b/i.test(model)) {
28+
if (model && /\b(o1|o3)\b/i.test(model)) {
2929
return '#000000';
3030
}
3131
return model.includes('gpt-4') ? '#AB68FF' : '#19C37D';

packages/data-provider/src/config.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -778,9 +778,9 @@ export const supportsBalanceCheck = {
778778
};
779779

780780
export const visionModels = [
781-
'o1',
782781
'gpt-4o',
783782
'gpt-4o-mini',
783+
'o1',
784784
'gpt-4-turbo',
785785
'gpt-4-vision',
786786
'llava',

packages/data-provider/src/parsers.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,8 @@ export const getResponseSender = (endpointOption: t.TEndpointOption): string =>
240240
return modelLabel;
241241
} else if (model && /\bo1\b/i.test(model)) {
242242
return 'o1';
243+
} else if (model && /\bo3\b/i.test(model)) {
244+
return 'o3';
243245
} else if (model && model.includes('gpt-3')) {
244246
return 'GPT-3.5';
245247
} else if (model && model.includes('gpt-4o')) {

0 commit comments

Comments
 (0)