Skip to content
4 changes: 4 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,10 @@ OPENAI_API_KEY=sk-xxxxxxxxx

# QWEN_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

### SiliconCloud AI ####

# SILICONCLOUD_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

########################################
############ Market Service ############
########################################
Expand Down
7 changes: 7 additions & 0 deletions src/app/(main)/settings/llm/ProviderList/providers.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import {
Novita,
OpenRouter,
Perplexity,
SiliconCloud,
Stepfun,
Together,
Tongyi,
Expand All @@ -40,6 +41,7 @@ import {
OpenRouterProviderCard,
PerplexityProviderCard,
QwenProviderCard,
SiliconCloudProviderCard,
StepfunProviderCard,
TaichuProviderCard,
TogetherAIProviderCard,
Expand Down Expand Up @@ -198,6 +200,11 @@ export const useProviderList = (): ProviderItem[] => {
docUrl: urlJoin(BASE_DOC_URL, 'ai360'),
title: <Ai360.Combine size={ 20 } type={ 'color' } />,
},
{
...SiliconCloudProviderCard,
docUrl: urlJoin(BASE_DOC_URL, 'siliconcloud'),
title: <SiliconCloud.Combine size={20} type={'color'} />,
},
],
[azureProvider, ollamaProvider, ollamaProvider, bedrockProvider],
);
Expand Down
7 changes: 7 additions & 0 deletions src/app/api/chat/agentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {

const apiKey = apiKeyManager.pick(payload?.apiKey || AI360_API_KEY);

return { apiKey };
}
case ModelProvider.SiliconCloud: {
const { SILICONCLOUD_API_KEY } = getLLMConfig();

const apiKey = apiKeyManager.pick(payload?.apiKey || SILICONCLOUD_API_KEY);

return { apiKey };
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/components/ModelIcon/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model: originModel, size = 12
// currently supported models, maybe not in its own provider
if (model.includes('gpt-3')) return <OpenAI.Avatar size={size} type={'gpt3'} />;
if (model.includes('gpt-4')) return <OpenAI.Avatar size={size} type={'gpt4'} />;
if (model.startsWith('glm') || model.includes('chatglm')) return <ChatGLM.Avatar size={size} />;
if (model.includes('glm-') || model.includes('chatglm')) return <ChatGLM.Avatar size={size} />;
if (model.startsWith('codegeex')) return <CodeGeeX.Avatar size={size} />;
if (model.includes('deepseek')) return <DeepSeek.Avatar size={size} />;
if (model.includes('claude')) return <Claude.Avatar size={size} />;
Expand Down
5 changes: 5 additions & 0 deletions src/components/ModelProviderIcon/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import {
OpenAI,
OpenRouter,
Perplexity,
SiliconCloud,
Stepfun,
Together,
Tongyi,
Expand Down Expand Up @@ -134,6 +135,10 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
return <Ai360 size={20} />;
}

case ModelProvider.SiliconCloud: {
return <SiliconCloud size={20} />;
}

default: {
return null;
}
Expand Down
6 changes: 6 additions & 0 deletions src/config/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,9 @@ export const getLLMConfig = () => {

ENABLED_AI360: z.boolean(),
AI360_API_KEY: z.string().optional(),

ENABLED_SILICONCLOUD: z.boolean(),
SILICONCLOUD_API_KEY: z.string().optional(),
},
runtimeEnv: {
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
Expand Down Expand Up @@ -171,6 +174,9 @@ export const getLLMConfig = () => {

ENABLED_AI360: !!process.env.AI360_API_KEY,
AI360_API_KEY: process.env.AI360_API_KEY,

ENABLED_SILICONCLOUD: !!process.env.SILICONCLOUD_API_KEY,
SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
},
});
};
Expand Down
4 changes: 4 additions & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import OpenAIProvider from './openai';
import OpenRouterProvider from './openrouter';
import PerplexityProvider from './perplexity';
import QwenProvider from './qwen';
import SiliconCloudProvider from './siliconcloud';
import StepfunProvider from './stepfun';
import TaichuProvider from './taichu';
import TogetherAIProvider from './togetherai';
Expand Down Expand Up @@ -45,6 +46,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
BaichuanProvider.chatModels,
TaichuProvider.chatModels,
Ai360Provider.chatModels,
SiliconCloudProvider.chatModels,
].flat();

export const DEFAULT_MODEL_PROVIDER_LIST = [
Expand All @@ -70,6 +72,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
BaichuanProvider,
TaichuProvider,
Ai360Provider,
SiliconCloudProvider,
];

export const filterEnabledModels = (provider: ModelProviderCard) => {
Expand Down Expand Up @@ -98,6 +101,7 @@ export { default as OpenAIProviderCard } from './openai';
export { default as OpenRouterProviderCard } from './openrouter';
export { default as PerplexityProviderCard } from './perplexity';
export { default as QwenProviderCard } from './qwen';
export { default as SiliconCloudProviderCard } from './siliconcloud';
export { default as StepfunProviderCard } from './stepfun';
export { default as TaichuProviderCard } from './taichu';
export { default as TogetherAIProviderCard } from './togetherai';
Expand Down
109 changes: 109 additions & 0 deletions src/config/modelProviders/siliconcloud.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
import { ModelProviderCard } from '@/types/llm';

// ref https://siliconflow.cn/zh-cn/models
const SiliconCloud: ModelProviderCard = {
chatModels: [
{
enabled: true,
id: 'Qwen/Qwen2-72B-Instruct',
tokens: 32_768,
},
{
enabled: true,
id: 'Qwen/Qwen2-57B-A14B-Instruct',
tokens: 32_768,
},
{
id: 'Qwen/Qwen2-7B-Instruct',
tokens: 32_768,
},
{
id: 'Qwen/Qwen2-1.5B-Instruct',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-110B-Chat',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-32B-Chat',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-14B-Chat',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-7B-Chat',
tokens: 32_768,
},
{
id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
tokens: 32_768,
},
{
enabled: true,
id: 'deepseek-ai/DeepSeek-V2-Chat',
tokens: 32_768,
},
{
id: 'deepseek-ai/deepseek-llm-67b-chat',
tokens: 4096,
},
{
id: 'THUDM/glm-4-9b-chat',
tokens: 32_768,
},
{
id: 'THUDM/chatglm3-6b',
tokens: 32_768,
},
{
enabled: true,
id: '01-ai/Yi-1.5-34B-Chat-16K',
tokens: 16_384,
},
{
id: '01-ai/Yi-1.5-9B-Chat-16K',
tokens: 16_384,
},
{
id: '01-ai/Yi-1.5-6B-Chat',
tokens: 4096,
},
{
id: 'internlm/internlm2_5-7b-chat',
tokens: 32_768,
},
{
id: 'google/gemma-2-9b-it',
tokens: 8192,
},
{
id: 'google/gemma-2-27b-it',
tokens: 8192,
},
{
id: 'meta-llama/Meta-Llama-3.1-8B-Instruct',
tokens: 32_768,
},
{
id: 'meta-llama/Meta-Llama-3-70B-Instruct',
tokens: 8192,
},
{
id: 'mistralai/Mistral-7B-Instruct-v0.2',
tokens: 32_768,
},
{
id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
tokens: 32_768,
},
],
checkModel: 'Qwen/Qwen2-1.5B-Instruct',
id: 'siliconcloud',
modelList: { showModelFetcher: true },
name: 'SiliconCloud',
};

export default SiliconCloud;
5 changes: 5 additions & 0 deletions src/const/settings/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import {
OpenRouterProviderCard,
PerplexityProviderCard,
QwenProviderCard,
SiliconCloudProviderCard,
StepfunProviderCard,
TaichuProviderCard,
TogetherAIProviderCard,
Expand Down Expand Up @@ -94,6 +95,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
enabled: false,
enabledModels: filterEnabledModels(QwenProviderCard),
},
siliconcloud: {
enabled: false,
enabledModels: filterEnabledModels(SiliconCloudProviderCard),
},
stepfun: {
enabled: false,
enabledModels: filterEnabledModels(StepfunProviderCard),
Expand Down
7 changes: 7 additions & 0 deletions src/libs/agent-runtime/AgentRuntime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import { LobeOpenAI } from './openai';
import { LobeOpenRouterAI } from './openrouter';
import { LobePerplexityAI } from './perplexity';
import { LobeQwenAI } from './qwen';
import { LobeSiliconCloudAI } from './siliconcloud';
import { LobeStepfunAI } from './stepfun';
import { LobeTaichuAI } from './taichu';
import { LobeTogetherAI } from './togetherai';
Expand Down Expand Up @@ -122,6 +123,7 @@ class AgentRuntime {
openrouter: Partial<ClientOptions>;
perplexity: Partial<ClientOptions>;
qwen: Partial<ClientOptions>;
siliconcloud: Partial<ClientOptions>;
stepfun: Partial<ClientOptions>;
taichu: Partial<ClientOptions>;
togetherai: Partial<ClientOptions>;
Expand Down Expand Up @@ -247,6 +249,11 @@ class AgentRuntime {
runtimeModel = new LobeAi360AI(params.ai360 ?? {});
break
}

case ModelProvider.SiliconCloud: {
runtimeModel = new LobeSiliconCloudAI(params.siliconcloud ?? {});
break
}
}

return new AgentRuntime(runtimeModel);
Expand Down
10 changes: 10 additions & 0 deletions src/libs/agent-runtime/siliconcloud/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import { ModelProvider } from '../types';
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';

export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
baseURL: 'https://api.siliconflow.cn/v1',
debug: {
chatCompletion: () => process.env.DEBUG_SILICONCLOUD_CHAT_COMPLETION === '1',
},
provider: ModelProvider.SiliconCloud,
});
1 change: 1 addition & 0 deletions src/libs/agent-runtime/types/type.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ export enum ModelProvider {
OpenRouter = 'openrouter',
Perplexity = 'perplexity',
Qwen = 'qwen',
SiliconCloud = 'siliconcloud',
Stepfun = 'stepfun',
Taichu = 'taichu',
TogetherAI = 'togetherai',
Expand Down
3 changes: 2 additions & 1 deletion src/server/globalConfig/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ export const getServerGlobalConfig = () => {
ENABLED_BAICHUAN,
ENABLED_TAICHU,
ENABLED_AI360,
ENABLED_SILICONCLOUD,

ENABLED_AZURE_OPENAI,
AZURE_MODEL_LIST,
Expand Down Expand Up @@ -111,7 +112,7 @@ export const getServerGlobalConfig = () => {
},
perplexity: { enabled: ENABLED_PERPLEXITY },
qwen: { enabled: ENABLED_QWEN },

siliconcloud: { enabled: ENABLED_SILICONCLOUD },
stepfun: { enabled: ENABLED_STEPFUN },

taichu: { enabled: ENABLED_TAICHU },
Expand Down
1 change: 1 addition & 0 deletions src/types/user/settings/keyVaults.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ export interface UserKeyVaults {
password?: string;
perplexity?: OpenAICompatibleKeyVault;
qwen?: OpenAICompatibleKeyVault;
siliconcloud?: OpenAICompatibleKeyVault;
stepfun?: OpenAICompatibleKeyVault;
taichu?: OpenAICompatibleKeyVault;
togetherai?: OpenAICompatibleKeyVault;
Expand Down