Skip to content

Commit f5a1cd1

Browse files
committed
🐛 fix: implement the chat
1 parent a6f4791 commit f5a1cd1

File tree

2 files changed

+10
-49
lines changed

2 files changed

+10
-49
lines changed

src/libs/agent-runtime/huggingface/index.ts

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@ import {
77
ChatStreamPayload,
88
LobeRuntimeAI,
99
} from '@/libs/agent-runtime';
10-
import { OpenAIStream } from '@/libs/agent-runtime/utils/streams';
1110

1211
import { debugStream } from '../utils/debugStream';
1312
import { StreamingResponse } from '../utils/response';
13+
import { OpenAIStream } from '../utils/streams';
1414
import { HuggingfaceResultToStream } from '../utils/streams/huggingface';
1515

1616
export class LobeHuggingFaceAI implements LobeRuntimeAI {
@@ -20,22 +20,22 @@ export class LobeHuggingFaceAI implements LobeRuntimeAI {
2020
constructor({ apiKey, baseURL }: { apiKey?: string; baseURL?: string } = {}) {
2121
if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
2222

23-
this.client = new HfInference(apiKey);
24-
25-
if (baseURL) {
26-
this.client.endpoint(baseURL);
27-
}
23+
this.client =
24+
// baseURL
25+
// ? (new HfInference(apiKey).endpoint(baseURL) as HfInference)
26+
new HfInference(apiKey);
27+
this.baseURL = baseURL;
2828
}
2929

3030
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
3131
try {
3232
const hfRes = this.client.chatCompletionStream({
33+
endpointUrl: this.baseURL,
3334
messages: payload.messages,
3435
model: payload.model,
35-
3636
stream: true,
37-
temperature: payload.temperature,
38-
top_p: payload.top_p,
37+
// temperature: payload.temperature,
38+
// top_p: payload.top_p,
3939
});
4040

4141
const rawStream = HuggingfaceResultToStream(hfRes);
Lines changed: 1 addition & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1,48 +1,9 @@
11
import { ChatCompletionStreamOutput } from '@huggingface/tasks';
22
import { readableFromAsyncIterable } from 'ai';
33

4-
import { ChatStreamCallbacks } from '@/libs/agent-runtime';
5-
import { nanoid } from '@/utils/uuid';
6-
7-
import { ChatResp } from '../../wenxin/type';
8-
import {
9-
StreamProtocolChunk,
10-
StreamStack,
11-
chatStreamable,
12-
createCallbacksTransformer,
13-
createSSEProtocolTransformer,
14-
} from './protocol';
15-
16-
const transformHuggingfaceStream = (chunk: ChatResp): StreamProtocolChunk => {
17-
console.log(chunk);
18-
const finished = chunk.is_end;
19-
if (finished) {
20-
return { data: chunk.finish_reason || 'stop', id: chunk.id, type: 'stop' };
21-
}
22-
23-
if (chunk.result) {
24-
return { data: chunk.result, id: chunk.id, type: 'text' };
25-
}
26-
27-
return {
28-
data: chunk,
29-
id: chunk.id,
30-
type: 'data',
31-
};
32-
};
4+
import { chatStreamable } from './protocol';
335

346
export const HuggingfaceResultToStream = (stream: AsyncIterable<ChatCompletionStreamOutput>) => {
357
// make the response to the streamable format
368
return readableFromAsyncIterable(chatStreamable(stream));
379
};
38-
39-
export const HuggingFaceStream = (
40-
rawStream: ReadableStream<ChatResp>,
41-
callbacks?: ChatStreamCallbacks,
42-
) => {
43-
const streamStack: StreamStack = { id: 'chat_' + nanoid() };
44-
45-
return rawStream
46-
.pipeThrough(createSSEProtocolTransformer(transformHuggingfaceStream, streamStack))
47-
.pipeThrough(createCallbacksTransformer(callbacks));
48-
};

0 commit comments

Comments
 (0)