Skip to content

Commit 84694a5

Browse files
authored
Merge pull request #362 from edenai/provider/iointelligence
New provider iointelligence
2 parents 8d260bb + 649a682 commit 84694a5

File tree

8 files changed

+7388
-0
lines changed

8 files changed

+7388
-0
lines changed
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"api_key": ""
3+
}

edenai_apis/apis/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,5 +69,6 @@
6969
from .groq import GroqApi
7070
from .picsart import PicsartApi
7171
from .minimax import MinimaxApi
72+
from .iointelligence import IointelligenceApi
7273

7374
# THIS NEEDS TO BE DONE AUTOMATICALLY
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .iointelligence_api import IointelligenceApi
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
from edenai_apis.utils.exception import (
2+
ProviderErrorLists,
3+
)
4+
5+
# NOTE: error messages should be regex patterns
6+
ERRORS: ProviderErrorLists = {}
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
{
2+
"llm": {
3+
"chat": {
4+
"version": "v1"
5+
}
6+
},
7+
"text": {
8+
"embeddings": {
9+
"version": "v1"
10+
}
11+
}
12+
}
Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
import json
2+
from typing import Dict, List, Literal, Optional, Type, Union
3+
4+
import requests
5+
import httpx
6+
from openai import BaseModel, OpenAI
7+
8+
from edenai_apis.features import ProviderInterface, LlmInterface, TextInterface
9+
from edenai_apis.features.text.embeddings.embeddings_dataclass import (
10+
EmbeddingsDataClass,
11+
EmbeddingDataClass,
12+
)
13+
from edenai_apis.loaders.data_loader import ProviderDataEnum
14+
from edenai_apis.loaders.loaders import load_provider
15+
from edenai_apis.features.llm.chat.chat_dataclass import (
16+
ChatDataClass,
17+
StreamChat as StreamChatCompletion,
18+
)
19+
from edenai_apis.utils.exception import ProviderException
20+
from edenai_apis.llmengine.types.response_types import ResponseModel
21+
from edenai_apis.utils.types import ResponseType
22+
23+
24+
class IointelligenceApi(ProviderInterface, LlmInterface, TextInterface):
25+
provider_name = "iointelligence"
26+
27+
def __init__(self, api_keys: Dict = {}) -> None:
28+
self.api_settings = load_provider(
29+
ProviderDataEnum.KEY, self.provider_name, api_keys=api_keys
30+
)
31+
self.api_key = self.api_settings["api_key"]
32+
self.base_url = "https://api.intelligence.io.solutions/api/v1/"
33+
self.client = OpenAI(
34+
api_key=self.api_key,
35+
base_url=self.base_url,
36+
)
37+
38+
def llm__chat(
39+
self,
40+
messages: List = [],
41+
model: Optional[str] = None,
42+
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
43+
timeout: Optional[Union[float, str, httpx.Timeout]] = None,
44+
temperature: Optional[float] = None,
45+
top_p: Optional[float] = None,
46+
n: Optional[int] = None,
47+
stream: Optional[bool] = None,
48+
stream_options: Optional[dict] = None,
49+
stop: Optional[str] = None,
50+
stop_sequences: Optional[any] = None,
51+
max_tokens: Optional[int] = None,
52+
presence_penalty: Optional[float] = None,
53+
frequency_penalty: Optional[float] = None,
54+
logit_bias: Optional[dict] = None,
55+
modalities: Optional[List[Literal["text", "audio", "image"]]] = None,
56+
audio: Optional[Dict] = None,
57+
# openai v1.0+ new params
58+
response_format: Optional[
59+
Union[dict, Type[BaseModel]]
60+
] = None, # Structured outputs
61+
seed: Optional[int] = None,
62+
tools: Optional[List] = None,
63+
tool_choice: Optional[Union[str, dict]] = None,
64+
logprobs: Optional[bool] = None,
65+
top_logprobs: Optional[int] = None,
66+
parallel_tool_calls: Optional[bool] = None,
67+
deployment_id=None,
68+
extra_headers: Optional[dict] = None,
69+
# soon to be deprecated params by OpenAI -> This should be replaced by tools
70+
functions: Optional[List] = None,
71+
function_call: Optional[str] = None,
72+
base_url: Optional[str] = None,
73+
api_version: Optional[str] = None,
74+
api_key: Optional[str] = None,
75+
model_list: Optional[list] = None, # pass in a list of api_base,keys, etc.
76+
drop_invalid_params: bool = True, # If true, all the invalid parameters will be ignored (dropped) before sending to the model
77+
user: str | None = None,
78+
# Optional parameters
79+
**kwargs,
80+
) -> ChatDataClass:
81+
completion_params = {"messages": messages, "model": model}
82+
if response_format is not None:
83+
completion_params["response_format"] = response_format
84+
if max_tokens is not None:
85+
completion_params["max_tokens"] = max_tokens
86+
if temperature is not None:
87+
completion_params["temperature"] = temperature
88+
if tools is not None:
89+
completion_params["tools"] = tools
90+
if top_p is not None:
91+
completion_params["top_p"] = top_p
92+
if stream is not None:
93+
completion_params["stream"] = stream
94+
if frequency_penalty is not None:
95+
completion_params["frequency_penalty"] = frequency_penalty
96+
if logprobs is not None:
97+
completion_params["logprobs"] = logprobs
98+
if top_logprobs is not None:
99+
completion_params["top_logprobs"] = top_logprobs
100+
if n is not None:
101+
completion_params["n"] = n
102+
if presence_penalty is not None:
103+
completion_params["presence_penalty"] = presence_penalty
104+
if seed is not None:
105+
completion_params["seed"] = seed
106+
if stop is not None:
107+
completion_params["stop"] = stop
108+
if tool_choice is not None:
109+
completion_params["tool_choice"] = tool_choice
110+
if parallel_tool_calls is not None:
111+
completion_params["parallel_tool_calls"] = parallel_tool_calls
112+
if user is not None:
113+
completion_params["user"] = user
114+
try:
115+
response = self.client.chat.completions.create(**completion_params)
116+
if stream:
117+
118+
def generate_chunks():
119+
for chunk in response:
120+
if chunk is not None:
121+
yield chunk.to_dict()
122+
# yield ModelResponseStream.model(data)
123+
124+
return StreamChatCompletion(stream=generate_chunks())
125+
else:
126+
response = response.to_dict()
127+
response_model = ResponseModel.model_validate(response)
128+
except Exception as exc:
129+
raise ProviderException(str(exc)) from exc
130+
131+
return response_model
132+
133+
def text__embeddings(
134+
self, texts: List[str], model: Optional[str] = None, **kwargs
135+
) -> ResponseType[EmbeddingsDataClass]:
136+
url = f"{self.base_url}embeddings"
137+
payload = {"model": model, "input": texts}
138+
response = requests.post(
139+
url, json=payload, headers={"Authorization": f"Bearer {self.api_key}"}
140+
)
141+
try:
142+
original_response = response.json()
143+
except json.JSONDecodeError as exc:
144+
raise ProviderException(message="Internal Server Error", code=500) from exc
145+
if response.status_code != 200:
146+
raise ProviderException(
147+
message=original_response["detail"], code=response.status_code
148+
)
149+
items = []
150+
embeddings = original_response.get("data", [{}])
151+
for embedding in embeddings:
152+
items.append(EmbeddingDataClass(embedding=embedding["embedding"]))
153+
standardized_response = EmbeddingsDataClass(items=items)
154+
return ResponseType[EmbeddingsDataClass](
155+
original_response=original_response,
156+
standardized_response=standardized_response,
157+
usage=original_response.get("usage"),
158+
# cost=response.cost,
159+
)

0 commit comments

Comments
 (0)