Skip to content

Commit 5842a18

Browse files
authored
✨ feat: add v0 (Vercel) provider support (#8235)
* ✨ feat: add v0 (Vercel) provider support * ♻️ refactor: rebrand to `v0` * 🔨 chore: update Dockerfile ENV * 🔨 chore: fix desc * 💄 style: support model fetch * 💄 style: fix reasoning tag * 🐛 fix: fix build error * 💄 style: better branding
1 parent 61c2c3c commit 5842a18

File tree

14 files changed

+132
-0
lines changed

14 files changed

+132
-0
lines changed

Dockerfile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,8 @@ ENV \
226226
TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
227227
# Upstage
228228
UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
229+
# v0 (Vercel)
230+
V0_API_KEY="" V0_MODEL_LIST="" \
229231
# vLLM
230232
VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
231233
# Wenxin

Dockerfile.database

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,8 @@ ENV \
270270
TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
271271
# Upstage
272272
UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
273+
# v0 (Vercel)
274+
V0_API_KEY="" V0_MODEL_LIST="" \
273275
# vLLM
274276
VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
275277
# Wenxin

Dockerfile.pglite

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,8 @@ ENV \
224224
TOGETHERAI_API_KEY="" TOGETHERAI_MODEL_LIST="" \
225225
# Upstage
226226
UPSTAGE_API_KEY="" UPSTAGE_MODEL_LIST="" \
227+
# v0 (Vercel)
228+
V0_API_KEY="" V0_MODEL_LIST="" \
227229
# vLLM
228230
VLLM_API_KEY="" VLLM_MODEL_LIST="" VLLM_PROXY_URL="" \
229231
# Wenxin

src/app/[variants]/(main)/settings/llm/ProviderList/providers.tsx

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ import {
3535
TaichuProviderCard,
3636
TogetherAIProviderCard,
3737
UpstageProviderCard,
38+
V0ProviderCard,
3839
VLLMProviderCard,
3940
WenxinProviderCard,
4041
XAIProviderCard,
@@ -90,6 +91,7 @@ export const useProviderList = (): ProviderItem[] => {
9091
SambaNovaProviderCard,
9192
Search1APIProviderCard,
9293
CohereProviderCard,
94+
V0ProviderCard,
9395
QiniuProviderCard,
9496
QwenProviderCard,
9597
WenxinProviderCard,

src/config/aiModels/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ import { default as taichu } from './taichu';
4545
import { default as tencentcloud } from './tencentcloud';
4646
import { default as togetherai } from './togetherai';
4747
import { default as upstage } from './upstage';
48+
import { default as v0 } from './v0';
4849
import { default as vertexai } from './vertexai';
4950
import { default as vllm } from './vllm';
5051
import { default as volcengine } from './volcengine';
@@ -119,6 +120,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
119120
tencentcloud,
120121
togetherai,
121122
upstage,
123+
v0,
122124
vertexai,
123125
vllm,
124126
volcengine,
@@ -174,6 +176,7 @@ export { default as taichu } from './taichu';
174176
export { default as tencentcloud } from './tencentcloud';
175177
export { default as togetherai } from './togetherai';
176178
export { default as upstage } from './upstage';
179+
export { default as v0 } from './v0';
177180
export { default as vertexai } from './vertexai';
178181
export { default as vllm } from './vllm';
179182
export { default as volcengine } from './volcengine';

src/config/aiModels/v0.ts

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
import { AIChatModelCard } from '@/types/aiModel';
2+
3+
const v0ChatModels: AIChatModelCard[] = [
4+
{
5+
abilities: {
6+
functionCall: true,
7+
reasoning: true,
8+
vision: true,
9+
},
10+
contextWindowTokens: 512_000,
11+
description:
12+
'v0-1.5-lg 模型适用于高级思考或推理任务',
13+
displayName: 'v0-1.5-lg',
14+
enabled: true,
15+
id: 'v0-1.5-lg',
16+
maxOutput: 32_000,
17+
pricing: {
18+
input: 15,
19+
output: 75,
20+
},
21+
type: 'chat',
22+
},
23+
{
24+
abilities: {
25+
functionCall: true,
26+
reasoning: true,
27+
vision: true,
28+
},
29+
contextWindowTokens: 128_000,
30+
description:
31+
'v0-1.5-md 模型适用于日常任务和用户界面(UI)生成',
32+
displayName: 'v0-1.5-md',
33+
enabled: true,
34+
id: 'v0-1.5-md',
35+
maxOutput: 32_000,
36+
pricing: {
37+
input: 3,
38+
output: 15,
39+
},
40+
type: 'chat',
41+
},
42+
{
43+
abilities: {
44+
functionCall: true,
45+
vision: true,
46+
},
47+
contextWindowTokens: 128_000,
48+
description:
49+
'v0-1.0-md 模型是通过 v0 API 提供服务的旧版模型',
50+
displayName: 'v0-1.0-md',
51+
id: 'v0-1.0-md',
52+
maxOutput: 32_000,
53+
pricing: {
54+
input: 3,
55+
output: 15,
56+
},
57+
type: 'chat',
58+
},
59+
];
60+
61+
export const allModels = [...v0ChatModels];
62+
63+
export default allModels;

src/config/llm.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,9 @@ export const getLLMConfig = () => {
165165

166166
ENABLED_MODELSCOPE: z.boolean(),
167167
MODELSCOPE_API_KEY: z.string().optional(),
168+
169+
ENABLED_V0: z.boolean(),
170+
V0_API_KEY: z.string().optional(),
168171
},
169172
runtimeEnv: {
170173
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -328,6 +331,9 @@ export const getLLMConfig = () => {
328331

329332
ENABLED_MODELSCOPE: !!process.env.MODELSCOPE_API_KEY,
330333
MODELSCOPE_API_KEY: process.env.MODELSCOPE_API_KEY,
334+
335+
ENABLED_V0: !!process.env.V0_API_KEY,
336+
V0_API_KEY: process.env.V0_API_KEY,
331337
},
332338
});
333339
};

src/config/modelProviders/index.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ import TaichuProvider from './taichu';
4545
import TencentcloudProvider from './tencentcloud';
4646
import TogetherAIProvider from './togetherai';
4747
import UpstageProvider from './upstage';
48+
import V0Provider from './v0';
4849
import VertexAIProvider from './vertexai';
4950
import VLLMProvider from './vllm';
5051
import VolcengineProvider from './volcengine';
@@ -83,6 +84,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
8384
JinaProvider.chatModels,
8485
SambaNovaProvider.chatModels,
8586
CohereProvider.chatModels,
87+
V0Provider.chatModels,
8688
ZeroOneProvider.chatModels,
8789
StepfunProvider.chatModels,
8890
NovitaProvider.chatModels,
@@ -139,6 +141,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
139141
JinaProvider,
140142
SambaNovaProvider,
141143
CohereProvider,
144+
V0Provider,
142145
QwenProvider,
143146
WenxinProvider,
144147
TencentcloudProvider,
@@ -218,6 +221,7 @@ export { default as TaichuProviderCard } from './taichu';
218221
export { default as TencentCloudProviderCard } from './tencentcloud';
219222
export { default as TogetherAIProviderCard } from './togetherai';
220223
export { default as UpstageProviderCard } from './upstage';
224+
export { default as V0ProviderCard } from './v0';
221225
export { default as VertexAIProviderCard } from './vertexai';
222226
export { default as VLLMProviderCard } from './vllm';
223227
export { default as VolcengineProviderCard } from './volcengine';

src/config/modelProviders/v0.ts

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import { ModelProviderCard } from '@/types/llm';
2+
3+
const V0: ModelProviderCard = {
4+
chatModels: [],
5+
checkModel: 'v0-1.5-md',
6+
description:
7+
'v0 是一个配对编程助手,你只需用自然语言描述想法,它就能为你的项目生成代码和用户界面(UI)',
8+
id: 'v0',
9+
modelsUrl: 'https://vercel.com/docs/v0/api#models',
10+
name: 'Vercel (v0)',
11+
settings: {
12+
sdkType: 'openai',
13+
},
14+
url: 'https://v0.dev',
15+
};
16+
17+
export default V0;

src/libs/model-runtime/runtimeMap.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ import { LobeTaichuAI } from './taichu';
4343
import { LobeTencentCloudAI } from './tencentcloud';
4444
import { LobeTogetherAI } from './togetherai';
4545
import { LobeUpstageAI } from './upstage';
46+
import { LobeV0AI } from './v0';
4647
import { LobeVLLMAI } from './vllm';
4748
import { LobeVolcengineAI } from './volcengine';
4849
import { LobeWenxinAI } from './wenxin';
@@ -97,6 +98,7 @@ export const providerRuntimeMap = {
9798
tencentcloud: LobeTencentCloudAI,
9899
togetherai: LobeTogetherAI,
99100
upstage: LobeUpstageAI,
101+
v0: LobeV0AI,
100102
vllm: LobeVLLMAI,
101103
volcengine: LobeVolcengineAI,
102104
wenxin: LobeWenxinAI,

0 commit comments

Comments
 (0)