Skip to content

Commit 8e440ee

Browse files
authored
feat: gpt-3.5-turbo-instruct support, refactor: try fetching models if OpenRouter is set (danny-avila#981)
* refactor: try fetching if OpenRouter api key is set * feat: gpt-3.5-turbo-instruct support * fix: use new assignment in getTokenizer
1 parent 90ab516 commit 8e440ee

File tree

3 files changed

+18
-10
lines changed

3 files changed

+18
-10
lines changed

api/app/clients/ChatGPTClient.js

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,11 @@ class ChatGPTClient extends BaseClient {
153153
} else {
154154
modelOptions.prompt = input;
155155
}
156+
157+
if (this.useOpenRouter && modelOptions.prompt) {
158+
delete modelOptions.stop;
159+
}
160+
156161
const { debug } = this.options;
157162
const url = this.completionsUrl;
158163
if (debug) {

api/app/clients/OpenAIClient.js

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -73,21 +73,22 @@ class OpenAIClient extends BaseClient {
7373
this.useOpenRouter = true;
7474
}
7575

76+
const { model } = this.modelOptions;
77+
7678
this.isChatCompletion =
7779
this.useOpenRouter ||
7880
this.options.reverseProxyUrl ||
7981
this.options.localAI ||
80-
this.modelOptions.model.startsWith('gpt-');
82+
model.includes('gpt-');
8183
this.isChatGptModel = this.isChatCompletion;
82-
if (this.modelOptions.model === 'text-davinci-003') {
84+
if (model.includes('text-davinci-003') || model.includes('instruct')) {
8385
this.isChatCompletion = false;
8486
this.isChatGptModel = false;
8587
}
8688
const { isChatGptModel } = this;
8789
this.isUnofficialChatGptModel =
88-
this.modelOptions.model.startsWith('text-chat') ||
89-
this.modelOptions.model.startsWith('text-davinci-002-render');
90-
this.maxContextTokens = maxTokensMap[this.modelOptions.model] ?? 4095; // 1 less than maximum
90+
model.startsWith('text-chat') || model.startsWith('text-davinci-002-render');
91+
this.maxContextTokens = maxTokensMap[model] ?? 4095; // 1 less than maximum
9192
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
9293
this.maxPromptTokens =
9394
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
@@ -168,8 +169,9 @@ class OpenAIClient extends BaseClient {
168169
tokenizer = this.constructor.getTokenizer(this.encoding, true, extendSpecialTokens);
169170
} else {
170171
try {
171-
this.encoding = this.modelOptions.model;
172-
tokenizer = this.constructor.getTokenizer(this.modelOptions.model, true);
172+
const { model } = this.modelOptions;
173+
this.encoding = model.includes('instruct') ? 'text-davinci-003' : model;
174+
tokenizer = this.constructor.getTokenizer(this.encoding, true);
173175
} catch {
174176
tokenizer = this.constructor.getTokenizer(this.encoding, true);
175177
}
@@ -354,6 +356,8 @@ class OpenAIClient extends BaseClient {
354356
if (this.isChatCompletion) {
355357
token =
356358
progressMessage.choices?.[0]?.delta?.content ?? progressMessage.choices?.[0]?.text;
359+
} else {
360+
token = progressMessage.choices?.[0]?.text;
357361
}
358362

359363
if (!token && this.useOpenRouter) {

api/server/services/ModelService.js

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,12 +88,11 @@ const getOpenAIModels = async (opts = { azure: false, plugins: false }) => {
8888
return models;
8989
}
9090

91-
if (userProvidedOpenAI) {
91+
if (userProvidedOpenAI && !OPENROUTER_API_KEY) {
9292
return models;
9393
}
9494

95-
models = await fetchOpenAIModels(opts, models);
96-
return models;
95+
return await fetchOpenAIModels(opts, models);
9796
};
9897

9998
const getChatGPTBrowserModels = () => {

0 commit comments

Comments
 (0)