Skip to content

Commit 4d436ad

Browse files
committed
feat: add Ollama and Claude adapters to LLM preset adapters
1 parent 644e4e2 commit 4d436ad

File tree

4 files changed

+180
-1
lines changed

4 files changed

+180
-1
lines changed

plugins/llm_preset_adapters/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,21 @@
44
from llm_preset_adapters.gemini_adapter import GeminiAdapter, GeminiConfig
55
from llm_preset_adapters.deepseek_adapter import DeepSeekAdapter, DeepSeekConfig
66
from llm_preset_adapters.openai_adapter import OpenAIAdapter, OpenAIConfig
7+
from llm_preset_adapters.ollama_adapter import OllamaAdapter, OllamaConfig
8+
from llm_preset_adapters.claude_adapter import ClaudeAdapter, ClaudeConfig
79

810
logger = get_logger("LLMPresetAdapters")
911
class LLMPresetAdaptersPlugin(Plugin):
1012
def __init__(self):
1113
pass
1214

15+
1316
def on_load(self):
1417
self.llm_registry.register("openai", OpenAIAdapter, OpenAIConfig, LLMAbility.TextChat)
1518
self.llm_registry.register("deepseek", DeepSeekAdapter, DeepSeekConfig, LLMAbility.TextChat)
1619
self.llm_registry.register("gemini", GeminiAdapter, GeminiConfig, LLMAbility.TextChat)
20+
self.llm_registry.register("ollama", OllamaAdapter, OllamaConfig, LLMAbility.TextChat)
21+
self.llm_registry.register("claude", ClaudeAdapter, ClaudeConfig, LLMAbility.TextChat)
1722
logger.info("LLMPresetAdaptersPlugin loaded")
1823

1924
def on_start(self):
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
from pydantic import ConfigDict, BaseModel
2+
import requests
3+
from framework.llm.adapter import LLMBackendAdapter
4+
from framework.llm.format.request import LLMChatRequest
5+
from framework.llm.format.response import LLMChatResponse
6+
from framework.logger import get_logger
7+
8+
class ClaudeConfig(BaseModel):
9+
api_key: str
10+
api_base: str = "https://api.anthropic.com/v1"
11+
model_config = ConfigDict(frozen=True)
12+
13+
def convert_messages_to_claude_prompt(messages) -> str:
14+
"""将消息列表转换为 Claude 的对话格式"""
15+
prompt = ""
16+
for msg in messages:
17+
if msg.role == "system":
18+
# Claude 没有专门的系统消息,我们将其作为 Human 的第一条消息
19+
prompt += f"Human: {msg.content}\n\nAssistant: I understand. I'll follow these instructions.\n\n"
20+
elif msg.role == "user":
21+
prompt += f"Human: {msg.content}\n\n"
22+
elif msg.role == "assistant":
23+
prompt += f"Assistant: {msg.content}\n\n"
24+
# 添加最后的 Assistant: 前缀来获取回复
25+
prompt += "Assistant: "
26+
return prompt
27+
28+
class ClaudeAdapter(LLMBackendAdapter):
29+
def __init__(self, config: ClaudeConfig):
30+
self.config = config
31+
self.logger = get_logger("ClaudeAdapter")
32+
33+
def chat(self, req: LLMChatRequest) -> LLMChatResponse:
34+
api_url = f"{self.config.api_base}/messages"
35+
headers = {
36+
"x-api-key": self.config.api_key,
37+
"anthropic-version": "2023-06-01",
38+
"content-type": "application/json"
39+
}
40+
41+
# 构建请求数据
42+
data = {
43+
"model": req.model,
44+
"messages": [
45+
{
46+
"role": "user" if msg.role == "user" else "assistant",
47+
"content": msg.content
48+
}
49+
for msg in req.messages
50+
if msg.role in ["user", "assistant"] # 跳过 system 消息,因为 Claude API 不支持
51+
],
52+
"max_tokens": req.max_tokens,
53+
"temperature": req.temperature,
54+
"top_p": req.top_p,
55+
"stream": req.stream
56+
}
57+
58+
# 如果有系统消息,将其添加到第一个用户消息前面
59+
system_messages = [msg for msg in req.messages if msg.role == "system"]
60+
if system_messages:
61+
if len(data["messages"]) > 0 and data["messages"][0]["role"] == "user":
62+
data["messages"][0]["content"] = f"{system_messages[0].content}\n\n{data['messages'][0]['content']}"
63+
64+
# Remove None fields
65+
data = {k: v for k, v in data.items() if v is not None}
66+
67+
response = requests.post(api_url, json=data, headers=headers)
68+
try:
69+
response.raise_for_status()
70+
response_data = response.json()
71+
except Exception as e:
72+
self.logger.error(f"API Response: {response.text}")
73+
raise e
74+
75+
# 转换 Claude 响应格式为标准的 LLMChatResponse 格式
76+
transformed_response = {
77+
"id": response_data.get("id", ""),
78+
"object": "chat.completion",
79+
"created": response_data.get("created_at", 0),
80+
"model": req.model,
81+
"choices": [{
82+
"index": 0,
83+
"message": {
84+
"role": "assistant",
85+
"content": response_data["content"][0]["text"]
86+
},
87+
"finish_reason": response_data.get("stop_reason", "stop")
88+
}],
89+
"usage": {
90+
"prompt_tokens": 0, # Claude API 目前不返回 token 使用量
91+
"completion_tokens": 0,
92+
"total_tokens": 0
93+
}
94+
}
95+
96+
return LLMChatResponse(**transformed_response)
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
from pydantic import ConfigDict, BaseModel
2+
import requests
3+
from framework.llm.adapter import LLMBackendAdapter
4+
from framework.llm.format.request import LLMChatRequest
5+
from framework.llm.format.response import LLMChatResponse
6+
from framework.logger import get_logger
7+
8+
class OllamaConfig(BaseModel):
9+
api_base: str = "http://localhost:11434"
10+
model_config = ConfigDict(frozen=True)
11+
12+
class OllamaAdapter(LLMBackendAdapter):
13+
def __init__(self, config: OllamaConfig):
14+
self.config = config
15+
self.logger = get_logger("OllamaAdapter")
16+
17+
def chat(self, req: LLMChatRequest) -> LLMChatResponse:
18+
api_url = f"{self.config.api_base}/api/chat"
19+
headers = {
20+
"Content-Type": "application/json"
21+
}
22+
23+
# 将消息转换为 Ollama 格式
24+
messages = []
25+
for msg in req.messages:
26+
messages.append({
27+
"role": msg.role,
28+
"content": msg.content
29+
})
30+
31+
data = {
32+
"model": req.model,
33+
"messages": messages,
34+
"stream": req.stream,
35+
"options": {
36+
"temperature": req.temperature,
37+
"top_p": req.top_p,
38+
"num_predict": req.max_tokens,
39+
"stop": req.stop
40+
}
41+
}
42+
43+
# Remove None fields
44+
data = {k: v for k, v in data.items() if v is not None}
45+
if "options" in data:
46+
data["options"] = {k: v for k, v in data["options"].items() if v is not None}
47+
48+
response = requests.post(api_url, json=data, headers=headers)
49+
try:
50+
response.raise_for_status()
51+
response_data = response.json()
52+
except Exception as e:
53+
print(f"API Response: {response.text}")
54+
raise e
55+
56+
# 转换 Ollama 响应格式为标准的 LLMChatResponse 格式
57+
transformed_response = {
58+
"id": "ollama-" + req.model,
59+
"object": "chat.completion",
60+
"created": 0,
61+
"model": req.model,
62+
"choices": [{
63+
"index": 0,
64+
"message": {
65+
"role": "assistant",
66+
"content": response_data["message"]["content"]
67+
},
68+
"finish_reason": "stop"
69+
}],
70+
"usage": {
71+
"prompt_tokens": 0,
72+
"completion_tokens": 0,
73+
"total_tokens": 0
74+
}
75+
}
76+
77+
return LLMChatResponse(**transformed_response)

plugins/llm_preset_adapters/setup.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
install_requires=[
1010
"openai",
1111
"google-generativeai",
12-
"anthropic"
12+
"anthropic",
13+
"requests"
1314
],
1415
entry_points={
1516
"chatgpt_mirai.plugins": [

0 commit comments

Comments
 (0)