2
2
from enum import Enum
3
3
from pydantic import BaseModel , Field , model_validator
4
4
5
+
5
6
class ChatRole (str , Enum ):
6
7
SYSTEM = "system"
7
8
USER = "user"
8
9
ASSISTANT = "assistant"
9
10
FUNCTION = "function"
10
11
TOOL = "tool"
11
12
13
+
12
14
class ChatMessage (BaseModel ):
13
15
role : ChatRole = Field (..., description = "The role of the message author" )
14
16
content : Optional [str ] = Field (None , description = "The content of the message" )
15
- name : Optional [str ] = Field (None , description = "The name of the author of this message" )
16
-
17
+ name : Optional [str ] = Field (
18
+ None , description = "The name of the author of this message"
19
+ )
20
+
17
21
# For function calls
18
- function_call : Optional [Dict [str , Any ]] = Field (None , description = "The function call information" )
19
-
22
+ function_call : Optional [Dict [str , Any ]] = Field (
23
+ None , description = "The function call information"
24
+ )
25
+
20
26
# For tool calls
21
- tool_calls : Optional [List [Dict [str , Any ]]] = Field (None , description = "The tool call information" )
27
+ tool_calls : Optional [List [Dict [str , Any ]]] = Field (
28
+ None , description = "The tool call information"
29
+ )
30
+
22
31
23
32
class ChatCompletionModel (str , Enum ):
24
33
GPT_4 = "gpt-4"
@@ -28,17 +37,27 @@ class ChatCompletionModel(str, Enum):
28
37
GPT_3_5_TURBO = "gpt-3.5-turbo"
29
38
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k"
30
39
40
+
31
41
class FunctionDefinition (BaseModel ):
32
42
name : str = Field (..., description = "The name of the function to be called" )
33
- description : Optional [str ] = Field (None , description = "A description of what the function does" )
34
- parameters : Dict [str , Any ] = Field (..., description = "The parameters the function accepts, in JSON Schema format" )
43
+ description : Optional [str ] = Field (
44
+ None , description = "A description of what the function does"
45
+ )
46
+ parameters : Dict [str , Any ] = Field (
47
+ ..., description = "The parameters the function accepts, in JSON Schema format"
48
+ )
49
+
35
50
36
51
class ToolDefinition (BaseModel ):
37
52
type : Literal ["function" ] = Field ("function" , description = "The type of tool" )
38
53
function : FunctionDefinition = Field (..., description = "The function definition" )
39
54
55
+
40
56
class ResponseFormat (BaseModel ):
41
- type : Literal ["text" , "json_object" ] = Field ("text" , description = "The format of the response" )
57
+ type : Literal ["text" , "json_object" ] = Field (
58
+ "text" , description = "The format of the response"
59
+ )
60
+
42
61
43
62
class ChatCompletionRequest (BaseModel ):
44
63
model : Union [ChatCompletionModel , str ] = Field (
@@ -57,95 +76,138 @@ class ChatCompletionRequest(BaseModel):
57
76
None , description = "Controls how the model responds to function calls"
58
77
)
59
78
temperature : Optional [float ] = Field (
60
- 1.0 ,
61
- description = "What sampling temperature to use, between 0 and 2" ,
62
- ge = 0 ,
63
- le = 2
79
+ 1.0 , description = "What sampling temperature to use, between 0 and 2" , ge = 0 , le = 2
64
80
)
65
81
top_p : Optional [float ] = Field (
66
82
1.0 ,
67
83
description = "An alternative to sampling with temperature, called nucleus sampling" ,
68
84
ge = 0 ,
69
- le = 1
85
+ le = 1 ,
70
86
)
71
87
n : Optional [int ] = Field (
72
- 1 , description = "How many chat completion choices to generate for each input message"
88
+ 1 ,
89
+ description = "How many chat completion choices to generate for each input message" ,
73
90
)
74
91
stream : Optional [bool ] = Field (
75
92
False , description = "If set, partial message deltas will be sent"
76
93
)
77
94
stop : Optional [Union [str , List [str ]]] = Field (
78
- None , description = "Up to 4 sequences where the API will stop generating further tokens"
95
+ None ,
96
+ description = "Up to 4 sequences where the API will stop generating further tokens" ,
79
97
)
80
98
max_tokens : Optional [int ] = Field (
81
- None , description = "The maximum number of tokens to generate in the chat completion"
99
+ None ,
100
+ description = "The maximum number of tokens to generate in the chat completion" ,
82
101
)
83
102
presence_penalty : Optional [float ] = Field (
84
103
0 ,
85
104
description = "Number between -2.0 and 2.0 to penalize tokens based on their presence so far" ,
86
105
ge = - 2.0 ,
87
- le = 2.0
106
+ le = 2.0 ,
88
107
)
89
108
frequency_penalty : Optional [float ] = Field (
90
109
0 ,
91
110
description = "Number between -2.0 and 2.0 to penalize tokens based on their frequency so far" ,
92
111
ge = - 2.0 ,
93
- le = 2.0
112
+ le = 2.0 ,
94
113
)
95
114
logit_bias : Optional [Dict [str , float ]] = Field (
96
- None , description = "Modify the likelihood of specified tokens appearing in the completion"
115
+ None ,
116
+ description = "Modify the likelihood of specified tokens appearing in the completion" ,
97
117
)
98
118
user : Optional [str ] = Field (
99
119
None , description = "A unique identifier representing your end-user"
100
120
)
101
121
response_format : Optional [ResponseFormat ] = Field (
102
122
None , description = "An object specifying the format that the model must output"
103
123
)
104
- seed : Optional [int ] = Field (
105
- None , description = "A seed for deterministic sampling"
106
- )
107
-
124
+ seed : Optional [int ] = Field (None , description = "A seed for deterministic sampling" )
125
+
108
126
@model_validator (mode = "after" )
109
127
def check_functions_and_tools (cls , values ):
110
128
functions = values .get ("functions" )
111
129
tools = values .get ("tools" )
112
-
130
+
113
131
if functions is not None and tools is not None :
114
132
raise ValueError ("You cannot provide both 'functions' and 'tools'" )
115
-
133
+
116
134
return values
117
135
136
+
118
137
class ToolCallFunction (BaseModel ):
119
138
name : str = Field (..., description = "The name of the function to call" )
120
- arguments : str = Field (..., description = "The arguments to call the function with, as a JSON string" )
139
+ arguments : str = Field (
140
+ ..., description = "The arguments to call the function with, as a JSON string"
141
+ )
142
+
121
143
122
144
class ToolCall (BaseModel ):
123
145
id : str = Field (..., description = "The ID of the tool call" )
124
146
type : Literal ["function" ] = Field (..., description = "The type of tool call" )
125
147
function : ToolCallFunction = Field (..., description = "The function to call" )
126
148
149
+
127
150
class ChatCompletionChoice (BaseModel ):
128
151
index : int = Field (..., description = "The index of this completion choice" )
129
152
message : ChatMessage = Field (..., description = "The chat completion message" )
130
153
finish_reason : str = Field (
131
- ...,
132
- description = "The reason the completion finished: 'stop', 'length', 'tool_calls', 'content_filter', or 'function_call'"
154
+ ...,
155
+ description = "The reason the completion finished: 'stop', 'length', 'tool_calls', 'content_filter', or 'function_call'" ,
133
156
)
134
157
158
+
159
+ class UsageTokensDetails (BaseModel ):
160
+ audio_tokens : Optional [int ] = Field (
161
+ ..., description = "Number of audio tokens in the prompt"
162
+ )
163
+ cached_tokens : Optional [int ] = Field (
164
+ ..., description = "Number of cached tokens in the prompt"
165
+ )
166
+ prompt_tokens : Optional [int ] = Field (
167
+ ..., description = "Number of tokens in the prompt"
168
+ )
169
+ completion_tokens : Optional [int ] = Field (
170
+ ..., description = "Number of tokens in the generated completion"
171
+ )
172
+ total_tokens : Optional [int ] = Field (
173
+ ..., description = "Total number of tokens used (prompt + completion)"
174
+ )
175
+ accepted_prediction_tokens : Optional [int ] = Field (
176
+ ..., description = "Number of accepted tokens in the prompt"
177
+ )
178
+ reasoning_tokens : Optional [int ] = Field (
179
+ ..., description = "Number of reasoning tokens in the prompt"
180
+ )
181
+ rejected_prediction_tokens : Optional [int ] = Field (
182
+ ..., description = "Number of rejected tokens in the prompt"
183
+ )
184
+
185
+
135
186
class ChatCompletionUsage (BaseModel ):
136
- prompt_tokens : int = Field (..., description = "Number of tokens in the prompt" )
137
- completion_tokens : int = Field (..., description = "Number of tokens in the generated completion" )
138
- total_tokens : int = Field (..., description = "Total number of tokens used (prompt + completion)" )
187
+ completion_tokens_details : Optional [UsageTokensDetails ] = Field (
188
+ ..., description = "Number of tokens in the generated completion"
189
+ )
190
+ prompt_tokens_details : Optional [UsageTokensDetails ] = Field (
191
+ ..., description = "Number of tokens in the prompt"
192
+ )
193
+ total_tokens : int = Field (
194
+ ..., description = "Total number of tokens used (prompt + completion)"
195
+ )
196
+
139
197
140
198
class ChatCompletionResponse (BaseModel ):
141
199
id : str = Field (..., description = "Unique identifier for this completion" )
142
200
object : str = Field (..., description = "Object type, always 'chat.completion'" )
143
- created : int = Field (..., description = "Unix timestamp for when the completion was created" )
201
+ created : int = Field (
202
+ ..., description = "Unix timestamp for when the completion was created"
203
+ )
144
204
model : str = Field (..., description = "The model used for completion" )
145
205
choices : List [ChatCompletionChoice ] = Field (
146
206
..., description = "List of chat completion choices generated by the model"
147
207
)
148
- usage : ChatCompletionUsage = Field (..., description = "Usage statistics for the completion request" )
208
+ usage : ChatCompletionUsage = Field (
209
+ ..., description = "Usage statistics for the completion request"
210
+ )
149
211
system_fingerprint : Optional [str ] = Field (
150
212
None , description = "Identifier for the system version that processed the request"
151
- )
213
+ )
0 commit comments