Skip to content

Commit 9ef295d

Browse files
committed
添加 LM Studio
1 parent 9b303e0 commit 9ef295d

File tree

10 files changed

+335
-0
lines changed

10 files changed

+335
-0
lines changed

src/config/aiModels/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import { default as higress } from './higress';
1717
import { default as huggingface } from './huggingface';
1818
import { default as hunyuan } from './hunyuan';
1919
import { default as internlm } from './internlm';
20+
import { default as lmstudio } from './lmstudio';
2021
import { default as minimax } from './minimax';
2122
import { default as mistral } from './mistral';
2223
import { default as moonshot } from './moonshot';
@@ -75,6 +76,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
7576
huggingface,
7677
hunyuan,
7778
internlm,
79+
lmstudio,
7880
minimax,
7981
mistral,
8082
moonshot,
@@ -114,6 +116,7 @@ export { default as higress } from './higress';
114116
export { default as huggingface } from './huggingface';
115117
export { default as hunyuan } from './hunyuan';
116118
export { default as internlm } from './internlm';
119+
export { default as lmstudio } from './lmstudio';
117120
export { default as minimax } from './minimax';
118121
export { default as mistral } from './mistral';
119122
export { default as moonshot } from './moonshot';

src/config/aiModels/lmstudio.ts

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import { AIChatModelCard } from '@/types/aiModel';
2+
3+
const lmStudioChatModels: AIChatModelCard[] = [
4+
{
5+
abilities: {},
6+
contextWindowTokens: 128_000,
7+
description:
8+
'Llama 3.1 是 Meta 推出的领先模型,支持高达 405B 参数,可应用于复杂对话、多语言翻译和数据分析领域。',
9+
displayName: 'Llama 3.1 8B',
10+
enabled: true,
11+
id: 'llama3.1',
12+
type: 'chat',
13+
},
14+
{
15+
abilities: {},
16+
contextWindowTokens: 128_000,
17+
description: 'Qwen2.5 是阿里巴巴的新一代大规模语言模型,以优异的性能支持多元化的应用需求。',
18+
displayName: 'Qwen2.5 14B',
19+
enabled: true,
20+
id: 'qwen2.5-14b-instruct',
21+
type: 'chat',
22+
},
23+
];
24+
25+
export const allModels = [...lmStudioChatModels];
26+
27+
export default allModels;

src/config/modelProviders/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import HigressProvider from './higress';
1717
import HuggingFaceProvider from './huggingface';
1818
import HunyuanProvider from './hunyuan';
1919
import InternLMProvider from './internlm';
20+
import LMStudioProvider from './lmstudio';
2021
import MinimaxProvider from './minimax';
2122
import MistralProvider from './mistral';
2223
import MoonshotProvider from './moonshot';
@@ -118,6 +119,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
118119
SiliconCloudProvider,
119120
HigressProvider,
120121
GiteeAIProvider,
122+
LMStudioProvider,
121123
];
122124

123125
export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -146,6 +148,7 @@ export { default as HigressProviderCard } from './higress';
146148
export { default as HuggingFaceProviderCard } from './huggingface';
147149
export { default as HunyuanProviderCard } from './hunyuan';
148150
export { default as InternLMProviderCard } from './internlm';
151+
export { default as LMStudioProviderCard } from './lmstudio';
149152
export { default as MinimaxProviderCard } from './minimax';
150153
export { default as MistralProviderCard } from './mistral';
151154
export { default as MoonshotProviderCard } from './moonshot';
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import { ModelProviderCard } from '@/types/llm';
2+
3+
// ref: https://ollama.com/library
4+
const LMStudio: ModelProviderCard = {
5+
chatModels: [],
6+
id: 'lmstudio',
7+
modelsUrl: 'https://lmstudio.ai/models',
8+
name: 'LM Studio',
9+
settings: {
10+
defaultShowBrowserRequest: true,
11+
proxyUrl: {
12+
placeholder: 'http://127.0.0.1:1234/v1',
13+
},
14+
showApiKey: false,
15+
showModelFetcher: true,
16+
smoothing: {
17+
speed: 2,
18+
text: true,
19+
},
20+
},
21+
url: 'https://lmstudio.ai',
22+
};
23+
24+
export default LMStudio;

src/const/settings/llm.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@ import { ModelProvider } from '@/libs/agent-runtime';
22
import { genUserLLMConfig } from '@/utils/genUserLLMConfig';
33

44
export const DEFAULT_LLM_CONFIG = genUserLLMConfig({
5+
lmstudio: {
6+
fetchOnClient: true,
7+
},
58
ollama: {
69
enabled: true,
710
fetchOnClient: true,

src/libs/agent-runtime/AgentRuntime.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import { LobeHigressAI } from './higress';
2020
import { LobeHuggingFaceAI } from './huggingface';
2121
import { LobeHunyuanAI } from './hunyuan';
2222
import { LobeInternLMAI } from './internlm';
23+
import { LobeLMStudioAI } from './lmstudio';
2324
import { LobeMinimaxAI } from './minimax';
2425
import { LobeMistralAI } from './mistral';
2526
import { LobeMoonshotAI } from './moonshot';
@@ -147,6 +148,7 @@ class AgentRuntime {
147148
huggingface: { apiKey?: string; baseURL?: string };
148149
hunyuan: Partial<ClientOptions>;
149150
internlm: Partial<ClientOptions>;
151+
lmstudio: Partial<ClientOptions>;
150152
minimax: Partial<ClientOptions>;
151153
mistral: Partial<ClientOptions>;
152154
moonshot: Partial<ClientOptions>;
@@ -207,6 +209,11 @@ class AgentRuntime {
207209
break;
208210
}
209211

212+
case ModelProvider.LMStudio: {
213+
runtimeModel = new LobeLMStudioAI(params.lmstudio);
214+
break;
215+
}
216+
210217
case ModelProvider.Ollama: {
211218
runtimeModel = new LobeOllamaAI(params.ollama);
212219
break;
Lines changed: 255 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,255 @@
1+
// @vitest-environment node
2+
import OpenAI from 'openai';
3+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4+
5+
import {
6+
ChatStreamCallbacks,
7+
LobeOpenAICompatibleRuntime,
8+
ModelProvider,
9+
} from '@/libs/agent-runtime';
10+
11+
import * as debugStreamModule from '../utils/debugStream';
12+
import { LobeLMStudioAI } from './index';
13+
14+
const provider = ModelProvider.LMStudio;
15+
const defaultBaseURL = 'http://localhost:1234/v1';
16+
17+
const bizErrorType = 'ProviderBizError';
18+
const invalidErrorType = 'InvalidProviderAPIKey';
19+
20+
// Mock the console.error to avoid polluting test output
21+
vi.spyOn(console, 'error').mockImplementation(() => {});
22+
23+
let instance: LobeOpenAICompatibleRuntime;
24+
25+
beforeEach(() => {
26+
instance = new LobeLMStudioAI({ apiKey: 'test' });
27+
28+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
29+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30+
new ReadableStream() as any,
31+
);
32+
});
33+
34+
afterEach(() => {
35+
vi.clearAllMocks();
36+
});
37+
38+
describe('LobeLMStudioAI', () => {
39+
describe('init', () => {
40+
it('should correctly initialize with an API key', async () => {
41+
const instance = new LobeLMStudioAI({ apiKey: 'test_api_key' });
42+
expect(instance).toBeInstanceOf(LobeLMStudioAI);
43+
expect(instance.baseURL).toEqual(defaultBaseURL);
44+
});
45+
});
46+
47+
describe('chat', () => {
48+
describe('Error', () => {
49+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50+
// Arrange
51+
const apiError = new OpenAI.APIError(
52+
400,
53+
{
54+
status: 400,
55+
error: {
56+
message: 'Bad Request',
57+
},
58+
},
59+
'Error message',
60+
{},
61+
);
62+
63+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64+
65+
// Act
66+
try {
67+
await instance.chat({
68+
messages: [{ content: 'Hello', role: 'user' }],
69+
model: 'deepseek-chat',
70+
temperature: 0,
71+
});
72+
} catch (e) {
73+
expect(e).toEqual({
74+
endpoint: defaultBaseURL,
75+
error: {
76+
error: { message: 'Bad Request' },
77+
status: 400,
78+
},
79+
errorType: bizErrorType,
80+
provider,
81+
});
82+
}
83+
});
84+
85+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86+
try {
87+
new LobeLMStudioAI({});
88+
} catch (e) {
89+
expect(e).toEqual({ errorType: invalidErrorType });
90+
}
91+
});
92+
93+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94+
// Arrange
95+
const errorInfo = {
96+
stack: 'abc',
97+
cause: {
98+
message: 'api is undefined',
99+
},
100+
};
101+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102+
103+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104+
105+
// Act
106+
try {
107+
await instance.chat({
108+
messages: [{ content: 'Hello', role: 'user' }],
109+
model: 'deepseek-chat',
110+
temperature: 0,
111+
});
112+
} catch (e) {
113+
expect(e).toEqual({
114+
endpoint: defaultBaseURL,
115+
error: {
116+
cause: { message: 'api is undefined' },
117+
stack: 'abc',
118+
},
119+
errorType: bizErrorType,
120+
provider,
121+
});
122+
}
123+
});
124+
125+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126+
// Arrange
127+
const errorInfo = {
128+
stack: 'abc',
129+
cause: { message: 'api is undefined' },
130+
};
131+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132+
133+
instance = new LobeLMStudioAI({
134+
apiKey: 'test',
135+
136+
baseURL: 'https://api.abc.com/v1',
137+
});
138+
139+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140+
141+
// Act
142+
try {
143+
await instance.chat({
144+
messages: [{ content: 'Hello', role: 'user' }],
145+
model: 'deepseek-chat',
146+
temperature: 0,
147+
});
148+
} catch (e) {
149+
expect(e).toEqual({
150+
endpoint: 'https://api.***.com/v1',
151+
error: {
152+
cause: { message: 'api is undefined' },
153+
stack: 'abc',
154+
},
155+
errorType: bizErrorType,
156+
provider,
157+
});
158+
}
159+
});
160+
161+
it('should throw an InvalidDeepSeekAPIKey error type on 401 status code', async () => {
162+
// Mock the API call to simulate a 401 error
163+
const error = new Error('Unauthorized') as any;
164+
error.status = 401;
165+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166+
167+
try {
168+
await instance.chat({
169+
messages: [{ content: 'Hello', role: 'user' }],
170+
model: 'deepseek-chat',
171+
temperature: 0,
172+
});
173+
} catch (e) {
174+
// Expect the chat method to throw an error with InvalidDeepSeekAPIKey
175+
expect(e).toEqual({
176+
endpoint: defaultBaseURL,
177+
error: new Error('Unauthorized'),
178+
errorType: invalidErrorType,
179+
provider,
180+
});
181+
}
182+
});
183+
184+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185+
// Arrange
186+
const genericError = new Error('Generic Error');
187+
188+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189+
190+
// Act
191+
try {
192+
await instance.chat({
193+
messages: [{ content: 'Hello', role: 'user' }],
194+
model: 'deepseek-chat',
195+
temperature: 0,
196+
});
197+
} catch (e) {
198+
expect(e).toEqual({
199+
endpoint: defaultBaseURL,
200+
errorType: 'AgentRuntimeError',
201+
provider,
202+
error: {
203+
name: genericError.name,
204+
cause: genericError.cause,
205+
message: genericError.message,
206+
stack: genericError.stack,
207+
},
208+
});
209+
}
210+
});
211+
});
212+
213+
describe('DEBUG', () => {
214+
it('should call debugStream and return StreamingTextResponse when DEBUG_LMSTUDIO_CHAT_COMPLETION is 1', async () => {
215+
// Arrange
216+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217+
const mockDebugStream = new ReadableStream({
218+
start(controller) {
219+
controller.enqueue('Debug stream content');
220+
controller.close();
221+
},
222+
}) as any;
223+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224+
225+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
227+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228+
});
229+
230+
// 保存原始环境变量值
231+
const originalDebugValue = process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION;
232+
233+
// 模拟环境变量
234+
process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION = '1';
235+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236+
237+
// 执行测试
238+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
239+
// 假设的测试函数调用,你可能需要根据实际情况调整
240+
await instance.chat({
241+
messages: [{ content: 'Hello', role: 'user' }],
242+
model: 'deepseek-chat',
243+
stream: true,
244+
temperature: 0,
245+
});
246+
247+
// 验证 debugStream 被调用
248+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
249+
250+
// 恢复原始环境变量值
251+
process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION = originalDebugValue;
252+
});
253+
});
254+
});
255+
});

0 commit comments

Comments
 (0)