Skip to content

Commit a27ba0a

Browse files
Merge pull request #369 from edenai/add-remaining-provider-to-async-llm-chat
add llm__achat to all (but one) supported providers in llm__chat
2 parents b5e7706 + f2cd100 commit a27ba0a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+3978
-2524
lines changed

edenai_apis/apis/amazon/amazon_llm_api.py

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,3 +88,84 @@ def llm__chat(
8888
**kwargs,
8989
)
9090
return response
91+
92+
async def llm__achat(
93+
self,
94+
messages: List = [],
95+
model: Optional[str] = None,
96+
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
97+
timeout: Optional[Union[float, str, httpx.Timeout]] = None,
98+
temperature: Optional[float] = None,
99+
top_p: Optional[float] = None,
100+
n: Optional[int] = None,
101+
stream: Optional[bool] = None,
102+
stream_options: Optional[dict] = None,
103+
stop: Optional[str] = None,
104+
stop_sequences: Optional[any] = None,
105+
max_tokens: Optional[int] = None,
106+
presence_penalty: Optional[float] = None,
107+
frequency_penalty: Optional[float] = None,
108+
logit_bias: Optional[dict] = None,
109+
modalities: Optional[List[Literal["text", "audio", "image"]]] = None,
110+
audio: Optional[Dict] = None,
111+
# openai v1.0+ new params
112+
response_format: Optional[
113+
Union[dict, Type[BaseModel]]
114+
] = None, # Structured outputs
115+
seed: Optional[int] = None,
116+
tools: Optional[List] = None,
117+
tool_choice: Optional[Union[str, dict]] = None,
118+
logprobs: Optional[bool] = None,
119+
top_logprobs: Optional[int] = None,
120+
parallel_tool_calls: Optional[bool] = None,
121+
deployment_id=None,
122+
extra_headers: Optional[dict] = None,
123+
# soon to be deprecated params by OpenAI -> This should be replaced by tools
124+
functions: Optional[List] = None,
125+
function_call: Optional[str] = None,
126+
base_url: Optional[str] = None,
127+
api_version: Optional[str] = None,
128+
api_key: Optional[str] = None,
129+
model_list: Optional[list] = None, # pass in a list of api_base,keys, etc.
130+
drop_invalid_params: bool = True, # If true, all the invalid parameters will be ignored (dropped) before sending to the model
131+
user: str | None = None,
132+
# Optional parameters
133+
**kwargs,
134+
) -> ChatDataClass:
135+
response = await self.llm_client.acompletion(
136+
messages=messages,
137+
model=model,
138+
timeout=timeout,
139+
temperature=temperature,
140+
top_p=top_p,
141+
n=n,
142+
stream=stream,
143+
stream_options=stream_options,
144+
stop=stop,
145+
stop_sequences=stop_sequences,
146+
max_tokens=max_tokens,
147+
presence_penalty=presence_penalty,
148+
frequency_penalty=frequency_penalty,
149+
logit_bias=logit_bias,
150+
response_format=response_format,
151+
seed=seed,
152+
tools=tools,
153+
tool_choice=tool_choice,
154+
logprobs=logprobs,
155+
top_logprobs=top_logprobs,
156+
parallel_tool_calls=parallel_tool_calls,
157+
deployment_id=deployment_id,
158+
extra_headers=extra_headers,
159+
functions=functions,
160+
function_call=function_call,
161+
base_url=base_url,
162+
api_version=api_version,
163+
api_key=api_key,
164+
model_list=model_list,
165+
drop_invalid_params=drop_invalid_params,
166+
user=user,
167+
modalities=modalities,
168+
audio=audio,
169+
**kwargs,
170+
)
171+
return response

0 commit comments

Comments
 (0)