Skip to content

Commit 64823c1

Browse files
Update the response class
1 parent 8fb9db6 commit 64823c1

File tree

2 files changed

+147
-47
lines changed

2 files changed

+147
-47
lines changed

edenai_apis/features/llm/chat/chat_dataclass.py

Lines changed: 96 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,32 @@
22
from enum import Enum
33
from pydantic import BaseModel, Field, model_validator
44

5+
56
class ChatRole(str, Enum):
67
SYSTEM = "system"
78
USER = "user"
89
ASSISTANT = "assistant"
910
FUNCTION = "function"
1011
TOOL = "tool"
1112

13+
1214
class ChatMessage(BaseModel):
1315
role: ChatRole = Field(..., description="The role of the message author")
1416
content: Optional[str] = Field(None, description="The content of the message")
15-
name: Optional[str] = Field(None, description="The name of the author of this message")
16-
17+
name: Optional[str] = Field(
18+
None, description="The name of the author of this message"
19+
)
20+
1721
# For function calls
18-
function_call: Optional[Dict[str, Any]] = Field(None, description="The function call information")
19-
22+
function_call: Optional[Dict[str, Any]] = Field(
23+
None, description="The function call information"
24+
)
25+
2026
# For tool calls
21-
tool_calls: Optional[List[Dict[str, Any]]] = Field(None, description="The tool call information")
27+
tool_calls: Optional[List[Dict[str, Any]]] = Field(
28+
None, description="The tool call information"
29+
)
30+
2231

2332
class ChatCompletionModel(str, Enum):
2433
GPT_4 = "gpt-4"
@@ -28,17 +37,27 @@ class ChatCompletionModel(str, Enum):
2837
GPT_3_5_TURBO = "gpt-3.5-turbo"
2938
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
3039

40+
3141
class FunctionDefinition(BaseModel):
3242
name: str = Field(..., description="The name of the function to be called")
33-
description: Optional[str] = Field(None, description="A description of what the function does")
34-
parameters: Dict[str, Any] = Field(..., description="The parameters the function accepts, in JSON Schema format")
43+
description: Optional[str] = Field(
44+
None, description="A description of what the function does"
45+
)
46+
parameters: Dict[str, Any] = Field(
47+
..., description="The parameters the function accepts, in JSON Schema format"
48+
)
49+
3550

3651
class ToolDefinition(BaseModel):
3752
type: Literal["function"] = Field("function", description="The type of tool")
3853
function: FunctionDefinition = Field(..., description="The function definition")
3954

55+
4056
class ResponseFormat(BaseModel):
41-
type: Literal["text", "json_object"] = Field("text", description="The format of the response")
57+
type: Literal["text", "json_object"] = Field(
58+
"text", description="The format of the response"
59+
)
60+
4261

4362
class ChatCompletionRequest(BaseModel):
4463
model: Union[ChatCompletionModel, str] = Field(
@@ -57,95 +76,138 @@ class ChatCompletionRequest(BaseModel):
5776
None, description="Controls how the model responds to function calls"
5877
)
5978
temperature: Optional[float] = Field(
60-
1.0,
61-
description="What sampling temperature to use, between 0 and 2",
62-
ge=0,
63-
le=2
79+
1.0, description="What sampling temperature to use, between 0 and 2", ge=0, le=2
6480
)
6581
top_p: Optional[float] = Field(
6682
1.0,
6783
description="An alternative to sampling with temperature, called nucleus sampling",
6884
ge=0,
69-
le=1
85+
le=1,
7086
)
7187
n: Optional[int] = Field(
72-
1, description="How many chat completion choices to generate for each input message"
88+
1,
89+
description="How many chat completion choices to generate for each input message",
7390
)
7491
stream: Optional[bool] = Field(
7592
False, description="If set, partial message deltas will be sent"
7693
)
7794
stop: Optional[Union[str, List[str]]] = Field(
78-
None, description="Up to 4 sequences where the API will stop generating further tokens"
95+
None,
96+
description="Up to 4 sequences where the API will stop generating further tokens",
7997
)
8098
max_tokens: Optional[int] = Field(
81-
None, description="The maximum number of tokens to generate in the chat completion"
99+
None,
100+
description="The maximum number of tokens to generate in the chat completion",
82101
)
83102
presence_penalty: Optional[float] = Field(
84103
0,
85104
description="Number between -2.0 and 2.0 to penalize tokens based on their presence so far",
86105
ge=-2.0,
87-
le=2.0
106+
le=2.0,
88107
)
89108
frequency_penalty: Optional[float] = Field(
90109
0,
91110
description="Number between -2.0 and 2.0 to penalize tokens based on their frequency so far",
92111
ge=-2.0,
93-
le=2.0
112+
le=2.0,
94113
)
95114
logit_bias: Optional[Dict[str, float]] = Field(
96-
None, description="Modify the likelihood of specified tokens appearing in the completion"
115+
None,
116+
description="Modify the likelihood of specified tokens appearing in the completion",
97117
)
98118
user: Optional[str] = Field(
99119
None, description="A unique identifier representing your end-user"
100120
)
101121
response_format: Optional[ResponseFormat] = Field(
102122
None, description="An object specifying the format that the model must output"
103123
)
104-
seed: Optional[int] = Field(
105-
None, description="A seed for deterministic sampling"
106-
)
107-
124+
seed: Optional[int] = Field(None, description="A seed for deterministic sampling")
125+
108126
@model_validator(mode="after")
109127
def check_functions_and_tools(cls, values):
110128
functions = values.get("functions")
111129
tools = values.get("tools")
112-
130+
113131
if functions is not None and tools is not None:
114132
raise ValueError("You cannot provide both 'functions' and 'tools'")
115-
133+
116134
return values
117135

136+
118137
class ToolCallFunction(BaseModel):
119138
name: str = Field(..., description="The name of the function to call")
120-
arguments: str = Field(..., description="The arguments to call the function with, as a JSON string")
139+
arguments: str = Field(
140+
..., description="The arguments to call the function with, as a JSON string"
141+
)
142+
121143

122144
class ToolCall(BaseModel):
123145
id: str = Field(..., description="The ID of the tool call")
124146
type: Literal["function"] = Field(..., description="The type of tool call")
125147
function: ToolCallFunction = Field(..., description="The function to call")
126148

149+
127150
class ChatCompletionChoice(BaseModel):
128151
index: int = Field(..., description="The index of this completion choice")
129152
message: ChatMessage = Field(..., description="The chat completion message")
130153
finish_reason: str = Field(
131-
...,
132-
description="The reason the completion finished: 'stop', 'length', 'tool_calls', 'content_filter', or 'function_call'"
154+
...,
155+
description="The reason the completion finished: 'stop', 'length', 'tool_calls', 'content_filter', or 'function_call'",
133156
)
134157

158+
159+
class UsageTokensDetails(BaseModel):
160+
audio_tokens: Optional[int] = Field(
161+
..., description="Number of audio tokens in the prompt"
162+
)
163+
cached_tokens: Optional[int] = Field(
164+
..., description="Number of cached tokens in the prompt"
165+
)
166+
prompt_tokens: Optional[int] = Field(
167+
..., description="Number of tokens in the prompt"
168+
)
169+
completion_tokens: Optional[int] = Field(
170+
..., description="Number of tokens in the generated completion"
171+
)
172+
total_tokens: Optional[int] = Field(
173+
..., description="Total number of tokens used (prompt + completion)"
174+
)
175+
accepted_prediction_tokens: Optional[int] = Field(
176+
..., description="Number of accepted tokens in the prompt"
177+
)
178+
reasoning_tokens: Optional[int] = Field(
179+
..., description="Number of reasoning tokens in the prompt"
180+
)
181+
rejected_prediction_tokens: Optional[int] = Field(
182+
..., description="Number of rejected tokens in the prompt"
183+
)
184+
185+
135186
class ChatCompletionUsage(BaseModel):
136-
prompt_tokens: int = Field(..., description="Number of tokens in the prompt")
137-
completion_tokens: int = Field(..., description="Number of tokens in the generated completion")
138-
total_tokens: int = Field(..., description="Total number of tokens used (prompt + completion)")
187+
completion_tokens_details: Optional[UsageTokensDetails] = Field(
188+
..., description="Number of tokens in the generated completion"
189+
)
190+
prompt_tokens_details: Optional[UsageTokensDetails] = Field(
191+
..., description="Number of tokens in the prompt"
192+
)
193+
total_tokens: int = Field(
194+
..., description="Total number of tokens used (prompt + completion)"
195+
)
196+
139197

140198
class ChatCompletionResponse(BaseModel):
141199
id: str = Field(..., description="Unique identifier for this completion")
142200
object: str = Field(..., description="Object type, always 'chat.completion'")
143-
created: int = Field(..., description="Unix timestamp for when the completion was created")
201+
created: int = Field(
202+
..., description="Unix timestamp for when the completion was created"
203+
)
144204
model: str = Field(..., description="The model used for completion")
145205
choices: List[ChatCompletionChoice] = Field(
146206
..., description="List of chat completion choices generated by the model"
147207
)
148-
usage: ChatCompletionUsage = Field(..., description="Usage statistics for the completion request")
208+
usage: ChatCompletionUsage = Field(
209+
..., description="Usage statistics for the completion request"
210+
)
149211
system_fingerprint: Optional[str] = Field(
150212
None, description="Identifier for the system version that processed the request"
151-
)
213+
)
Lines changed: 51 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,68 @@
11
{
2-
"id": "19e72f45c5724a6a9eb708dd717d1ac5",
3-
"created": 1741009631,
4-
"model": "mistral/mistral-saba-latest",
2+
"id": "chatcmpl-B71RkHwax2QEWCrZXYl3mZh20fC5y",
3+
"created": 1741013556,
4+
"model": "gpt-4o-mini-2024-07-18",
55
"object": "chat.completion",
6-
"system_fingerprint": null,
6+
"system_fingerprint": "fp_7fcd609668",
77
"choices": [
88
{
99
"finish_reason": "stop",
1010
"index": 0,
1111
"message": {
12-
"content": "Hello! How can I assist you today? If you're up for it, let's share a fun fact or a light joke to start. Here's one from me:\n\nDid you know that a day on Venus is longer than a year on Venus? It takes Venus about 243 Earth days to rotate once on its axis, but it only takes around 225 Earth days for Venus to orbit the Sun. Isn't that amazing?\n\nNow it's your turn if you'd like!",
12+
"content": "Arrr matey! In this fine image, ye be seein' a wooden pathway leadin' through a grand expanse of green grass and wild foliage. The sky above be painted in shades of blue, with fluffy clouds driftin' lazily. The boardwalk be settin' sail through this scenic wonder, invitin' ye to wander and explore the treasures of nature that lie ahead. Aye, 'tis a sight to behold for any landlubber or sea dog!",
1313
"role": "assistant",
1414
"tool_calls": null,
1515
"function_call": null
1616
}
1717
}
1818
],
19-
"provider_time": 1536966881,
19+
"provider_time": 3751955066,
2020
"edenai_time": null,
2121
"usage": {
22-
"completion_tokens": 101,
23-
"prompt_tokens": 4,
24-
"total_tokens": 105,
25-
"completion_tokens_details": null,
26-
"prompt_tokens_details": null
22+
"completion_tokens": 100,
23+
"prompt_tokens": 1170,
24+
"total_tokens": 1270,
25+
"completion_tokens_details": [
26+
[
27+
"accepted_prediction_tokens",
28+
0
29+
],
30+
[
31+
"audio_tokens",
32+
0
33+
],
34+
[
35+
"reasoning_tokens",
36+
0
37+
],
38+
[
39+
"rejected_prediction_tokens",
40+
0
41+
],
42+
[
43+
"text_tokens",
44+
null
45+
]
46+
],
47+
"prompt_tokens_details": [
48+
[
49+
"audio_tokens",
50+
0
51+
],
52+
[
53+
"cached_tokens",
54+
0
55+
],
56+
[
57+
"text_tokens",
58+
null
59+
],
60+
[
61+
"image_tokens",
62+
null
63+
]
64+
]
2765
},
28-
"service_tier": null,
29-
"cost": 6.14e-05
66+
"service_tier": "default",
67+
"cost": 0.00023549999999999998
3068
}

0 commit comments

Comments
 (0)