Skip to content

✨ feat: add support InternLM (书生浦语) provider #4711

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Nov 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,8 @@ ENV \
HUGGINGFACE_API_KEY="" HUGGINGFACE_MODEL_LIST="" HUGGINGFACE_PROXY_URL="" \
# Hunyuan
HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
# InternLM
INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
# Minimax
MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
# Mistral
Expand Down
2 changes: 2 additions & 0 deletions Dockerfile.database
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,8 @@ ENV \
HUGGINGFACE_API_KEY="" HUGGINGFACE_MODEL_LIST="" HUGGINGFACE_PROXY_URL="" \
# Hunyuan
HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
# InternLM
INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
# Minimax
MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
# Mistral
Expand Down
2 changes: 2 additions & 0 deletions src/app/(main)/settings/llm/ProviderList/providers.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
GoogleProviderCard,
GroqProviderCard,
HunyuanProviderCard,
InternLMProviderCard,
MinimaxProviderCard,
MistralProviderCard,
MoonshotProviderCard,
Expand Down Expand Up @@ -85,6 +86,7 @@ export const useProviderList = (): ProviderItem[] => {
MinimaxProviderCard,
Ai360ProviderCard,
TaichuProviderCard,
InternLMProviderCard,
SiliconCloudProviderCard,
],
[
Expand Down
6 changes: 6 additions & 0 deletions src/config/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,9 @@ export const getLLMConfig = () => {

ENABLED_XAI: z.boolean(),
XAI_API_KEY: z.string().optional(),

ENABLED_INTERNLM: z.boolean(),
INTERNLM_API_KEY: z.string().optional(),
},
runtimeEnv: {
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
Expand Down Expand Up @@ -246,6 +249,9 @@ export const getLLMConfig = () => {

ENABLED_XAI: !!process.env.XAI_API_KEY,
XAI_API_KEY: process.env.XAI_API_KEY,

ENABLED_INTERNLM: !!process.env.INTERNLM_API_KEY,
INTERNLM_API_KEY: process.env.INTERNLM_API_KEY,
},
});
};
Expand Down
4 changes: 4 additions & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import GoogleProvider from './google';
import GroqProvider from './groq';
import HuggingFaceProvider from './huggingface';
import HunyuanProvider from './hunyuan';
import InternLMProvider from './internlm';
import MinimaxProvider from './minimax';
import MistralProvider from './mistral';
import MoonshotProvider from './moonshot';
Expand Down Expand Up @@ -69,6 +70,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
HunyuanProvider.chatModels,
WenxinProvider.chatModels,
SenseNovaProvider.chatModels,
InternLMProvider.chatModels,
].flat();

export const DEFAULT_MODEL_PROVIDER_LIST = [
Expand Down Expand Up @@ -105,6 +107,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
MinimaxProvider,
Ai360Provider,
TaichuProvider,
InternLMProvider,
SiliconCloudProvider,
];

Expand All @@ -131,6 +134,7 @@ export { default as GoogleProviderCard } from './google';
export { default as GroqProviderCard } from './groq';
export { default as HuggingFaceProviderCard } from './huggingface';
export { default as HunyuanProviderCard } from './hunyuan';
export { default as InternLMProviderCard } from './internlm';
export { default as MinimaxProviderCard } from './minimax';
export { default as MistralProviderCard } from './mistral';
export { default as MoonshotProviderCard } from './moonshot';
Expand Down
42 changes: 42 additions & 0 deletions src/config/modelProviders/internlm.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import { ModelProviderCard } from '@/types/llm';

const InternLM: ModelProviderCard = {
chatModels: [
{
description: '我们最新的模型系列,有着卓越的推理性能,支持 1M 的上下文长度以及更强的指令跟随和工具调用能力。',
displayName: 'InternLM2.5',
enabled: true,
functionCall: true,
id: 'internlm2.5-latest',
maxOutput: 4096,
pricing: {
input: 0,
output: 0,
},
tokens: 32_768,
},
{
description: '我们仍在维护的老版本模型,有 7B、20B 多种模型参数量可选。',
displayName: 'InternLM2 Pro Chat',
functionCall: true,
id: 'internlm2-pro-chat',
maxOutput: 4096,
pricing: {
input: 0,
output: 0,
},
tokens: 32_768,
},
],
checkModel: 'internlm2.5-latest',
description:
'致力于大模型研究与开发工具链的开源组织。为所有 AI 开发者提供高效、易用的开源平台,让最前沿的大模型与算法技术触手可及',
disableBrowserRequest: true,
id: 'internlm',
modelList: { showModelFetcher: true },
modelsUrl: 'https://internlm.intern-ai.org.cn/doc/docs/Models#%E8%8E%B7%E5%8F%96%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8',
name: 'InternLM',
url: 'https://internlm.intern-ai.org.cn',
};

export default InternLM;
7 changes: 7 additions & 0 deletions src/libs/agent-runtime/AgentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import { LobeGoogleAI } from './google';
import { LobeGroq } from './groq';
import { LobeHuggingFaceAI } from './huggingface';
import { LobeHunyuanAI } from './hunyuan';
import { LobeInternLMAI } from './internlm';
import { LobeMinimaxAI } from './minimax';
import { LobeMistralAI } from './mistral';
import { LobeMoonshotAI } from './moonshot';
Expand Down Expand Up @@ -141,6 +142,7 @@ class AgentRuntime {
groq: Partial<ClientOptions>;
huggingface: { apiKey?: string; baseURL?: string };
hunyuan: Partial<ClientOptions>;
internlm: Partial<ClientOptions>;
minimax: Partial<ClientOptions>;
mistral: Partial<ClientOptions>;
moonshot: Partial<ClientOptions>;
Expand Down Expand Up @@ -335,6 +337,11 @@ class AgentRuntime {
runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
break;
}

case ModelProvider.InternLM: {
runtimeModel = new LobeInternLMAI(params.internlm);
break;
}
}
return new AgentRuntime(runtimeModel);
}
Expand Down
Loading
Loading