Skip to content

Commit 574174b

Browse files
author
github-actions[bot]
committed
feat: Updated OpenAPI spec
1 parent b176606 commit 574174b

File tree

43 files changed

+2244
-133
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+2244
-133
lines changed

src/libs/Ollama/Generated/Ollama..JsonSerializerContext.g.cs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,20 @@ namespace Ollama
1313
DefaultIgnoreCondition = global::System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull,
1414
Converters = new global::System.Type[]
1515
{
16+
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestFormatEnumJsonConverter),
17+
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestFormatEnumNullableJsonConverter),
18+
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestThinkJsonConverter),
19+
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestThinkNullableJsonConverter),
1620
typeof(global::Ollama.JsonConverters.ResponseFormatEnumJsonConverter),
1721
typeof(global::Ollama.JsonConverters.ResponseFormatEnumNullableJsonConverter),
1822
typeof(global::Ollama.JsonConverters.MessageRoleJsonConverter),
1923
typeof(global::Ollama.JsonConverters.MessageRoleNullableJsonConverter),
24+
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestFormatEnumJsonConverter),
25+
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestFormatEnumNullableJsonConverter),
2026
typeof(global::Ollama.JsonConverters.ToolTypeJsonConverter),
2127
typeof(global::Ollama.JsonConverters.ToolTypeNullableJsonConverter),
28+
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestThinkJsonConverter),
29+
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestThinkNullableJsonConverter),
2230
typeof(global::Ollama.JsonConverters.DoneReasonEnumJsonConverter),
2331
typeof(global::Ollama.JsonConverters.DoneReasonEnumNullableJsonConverter),
2432
typeof(global::Ollama.JsonConverters.CreateModelStatusEnumJsonConverter),
@@ -33,6 +41,10 @@ namespace Ollama
3341
typeof(global::Ollama.JsonConverters.DoneReasonJsonConverter),
3442
typeof(global::Ollama.JsonConverters.CreateModelStatusJsonConverter),
3543
typeof(global::Ollama.JsonConverters.PullModelStatusJsonConverter),
44+
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<global::Ollama.GenerateCompletionRequestFormatEnum?, object>),
45+
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<bool?, global::Ollama.GenerateCompletionRequestThink?>),
46+
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<global::Ollama.GenerateChatCompletionRequestFormatEnum?, object>),
47+
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<bool?, global::Ollama.GenerateChatCompletionRequestThink?>),
3648
typeof(global::Ollama.JsonConverters.AnyOfJsonConverter<string, global::Ollama.PushModelResponseStatus?>),
3749
typeof(global::Ollama.JsonConverters.UnixTimestampJsonConverter),
3850
})]

src/libs/Ollama/Generated/Ollama.ChatClient.GenerateChatCompletion.g.cs

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,11 @@ partial void ProcessGenerateChatCompletionResponse(
123123
/// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.<br/>
124124
/// Default Value: true
125125
/// </param>
126-
/// <param name="format"></param>
126+
/// <param name="format">
127+
/// The format to return a response in. Can be:<br/>
128+
/// - "json" string to enable JSON mode<br/>
129+
/// - JSON schema object for structured output validation
130+
/// </param>
127131
/// <param name="keepAlive">
128132
/// How long (in minutes) to keep the model loaded in memory.<br/>
129133
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
@@ -138,22 +142,30 @@ partial void ProcessGenerateChatCompletionResponse(
138142
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
139143
/// </param>
140144
/// <param name="think">
141-
/// Think controls whether thinking/reasoning models will think before<br/>
142-
/// responding. Needs to be a pointer so we can distinguish between false<br/>
143-
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
144-
/// before this option was introduced).
145+
/// Controls whether thinking/reasoning models will think before responding.<br/>
146+
/// Can be:<br/>
147+
/// - boolean: true/false to enable/disable thinking<br/>
148+
/// - string: "high", "medium", "low" to set thinking intensity level
149+
/// </param>
150+
/// <param name="truncate">
151+
/// Truncates the end of the chat history if it exceeds the context length
152+
/// </param>
153+
/// <param name="shift">
154+
/// Shifts the oldest messages out of the context window when the context limit is reached
145155
/// </param>
146156
/// <param name="cancellationToken">The token to cancel the operation with</param>
147157
/// <exception cref="global::System.InvalidOperationException"></exception>
148158
public async global::System.Collections.Generic.IAsyncEnumerable<global::Ollama.GenerateChatCompletionResponse> GenerateChatCompletionAsync(
149159
string model,
150160
global::System.Collections.Generic.IList<global::Ollama.Message> messages,
151161
bool? stream = default,
152-
global::Ollama.ResponseFormat? format = default,
162+
global::Ollama.OneOf<global::Ollama.GenerateChatCompletionRequestFormatEnum?, object>? format = default,
153163
int? keepAlive = default,
154164
global::System.Collections.Generic.IList<global::Ollama.Tool>? tools = default,
155165
global::Ollama.RequestOptions? options = default,
156-
bool? think = default,
166+
global::Ollama.OneOf<bool?, global::Ollama.GenerateChatCompletionRequestThink?>? think = default,
167+
bool? truncate = default,
168+
bool? shift = default,
157169
[global::System.Runtime.CompilerServices.EnumeratorCancellation] global::System.Threading.CancellationToken cancellationToken = default)
158170
{
159171
var __request = new global::Ollama.GenerateChatCompletionRequest
@@ -166,6 +178,8 @@ partial void ProcessGenerateChatCompletionResponse(
166178
Tools = tools,
167179
Options = options,
168180
Think = think,
181+
Truncate = truncate,
182+
Shift = shift,
169183
};
170184

171185
var __enumerable = GenerateChatCompletionAsync(

src/libs/Ollama/Generated/Ollama.CompletionsClient.GenerateCompletion.g.cs

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,14 @@ partial void ProcessGenerateCompletionResponse(
137137
/// Default Value: true
138138
/// </param>
139139
/// <param name="raw">
140-
/// If `true` no formatting will be applied to the prompt and no context will be returned. <br/>
140+
/// If `true` no formatting will be applied to the prompt and no context will be returned.<br/>
141141
/// You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API, and are managing history yourself.
142142
/// </param>
143-
/// <param name="format"></param>
143+
/// <param name="format">
144+
/// The format to return a response in. Can be:<br/>
145+
/// - "json" string to enable JSON mode<br/>
146+
/// - JSON schema object for structured output validation
147+
/// </param>
144148
/// <param name="keepAlive">
145149
/// How long (in minutes) to keep the model loaded in memory.<br/>
146150
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
@@ -155,10 +159,16 @@ partial void ProcessGenerateCompletionResponse(
155159
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
156160
/// </param>
157161
/// <param name="think">
158-
/// Think controls whether thinking/reasoning models will think before<br/>
159-
/// responding. Needs to be a pointer so we can distinguish between false<br/>
160-
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
161-
/// before this option was introduced).
162+
/// Controls whether thinking/reasoning models will think before responding.<br/>
163+
/// Can be:<br/>
164+
/// - boolean: true/false to enable/disable thinking<br/>
165+
/// - string: "high", "medium", "low" to set thinking intensity level
166+
/// </param>
167+
/// <param name="truncate">
168+
/// Truncates the end of the prompt if it exceeds the context length
169+
/// </param>
170+
/// <param name="shift">
171+
/// Shifts the oldest parts out of the context window when the context limit is reached
162172
/// </param>
163173
/// <param name="cancellationToken">The token to cancel the operation with</param>
164174
/// <exception cref="global::System.InvalidOperationException"></exception>
@@ -171,11 +181,13 @@ partial void ProcessGenerateCompletionResponse(
171181
global::System.Collections.Generic.IList<long>? context = default,
172182
bool? stream = default,
173183
bool? raw = default,
174-
global::Ollama.ResponseFormat? format = default,
184+
global::Ollama.OneOf<global::Ollama.GenerateCompletionRequestFormatEnum?, object>? format = default,
175185
int? keepAlive = default,
176186
global::System.Collections.Generic.IList<string>? images = default,
177187
global::Ollama.RequestOptions? options = default,
178-
bool? think = default,
188+
global::Ollama.OneOf<bool?, global::Ollama.GenerateCompletionRequestThink?>? think = default,
189+
bool? truncate = default,
190+
bool? shift = default,
179191
[global::System.Runtime.CompilerServices.EnumeratorCancellation] global::System.Threading.CancellationToken cancellationToken = default)
180192
{
181193
var __request = new global::Ollama.GenerateCompletionRequest
@@ -193,6 +205,8 @@ partial void ProcessGenerateCompletionResponse(
193205
Images = images,
194206
Options = options,
195207
Think = think,
208+
Truncate = truncate,
209+
Shift = shift,
196210
};
197211

198212
var __enumerable = GenerateCompletionAsync(

src/libs/Ollama/Generated/Ollama.IChatClient.GenerateChatCompletion.g.cs

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,11 @@ public partial interface IChatClient
3131
/// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.<br/>
3232
/// Default Value: true
3333
/// </param>
34-
/// <param name="format"></param>
34+
/// <param name="format">
35+
/// The format to return a response in. Can be:<br/>
36+
/// - "json" string to enable JSON mode<br/>
37+
/// - JSON schema object for structured output validation
38+
/// </param>
3539
/// <param name="keepAlive">
3640
/// How long (in minutes) to keep the model loaded in memory.<br/>
3741
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
@@ -46,22 +50,30 @@ public partial interface IChatClient
4650
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
4751
/// </param>
4852
/// <param name="think">
49-
/// Think controls whether thinking/reasoning models will think before<br/>
50-
/// responding. Needs to be a pointer so we can distinguish between false<br/>
51-
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
52-
/// before this option was introduced).
53+
/// Controls whether thinking/reasoning models will think before responding.<br/>
54+
/// Can be:<br/>
55+
/// - boolean: true/false to enable/disable thinking<br/>
56+
/// - string: "high", "medium", "low" to set thinking intensity level
57+
/// </param>
58+
/// <param name="truncate">
59+
/// Truncates the end of the chat history if it exceeds the context length
60+
/// </param>
61+
/// <param name="shift">
62+
/// Shifts the oldest messages out of the context window when the context limit is reached
5363
/// </param>
5464
/// <param name="cancellationToken">The token to cancel the operation with</param>
5565
/// <exception cref="global::System.InvalidOperationException"></exception>
5666
global::System.Collections.Generic.IAsyncEnumerable<global::Ollama.GenerateChatCompletionResponse> GenerateChatCompletionAsync(
5767
string model,
5868
global::System.Collections.Generic.IList<global::Ollama.Message> messages,
5969
bool? stream = default,
60-
global::Ollama.ResponseFormat? format = default,
70+
global::Ollama.OneOf<global::Ollama.GenerateChatCompletionRequestFormatEnum?, object>? format = default,
6171
int? keepAlive = default,
6272
global::System.Collections.Generic.IList<global::Ollama.Tool>? tools = default,
6373
global::Ollama.RequestOptions? options = default,
64-
bool? think = default,
74+
global::Ollama.OneOf<bool?, global::Ollama.GenerateChatCompletionRequestThink?>? think = default,
75+
bool? truncate = default,
76+
bool? shift = default,
6577
global::System.Threading.CancellationToken cancellationToken = default);
6678
}
6779
}

src/libs/Ollama/Generated/Ollama.ICompletionsClient.GenerateCompletion.g.cs

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,14 @@ public partial interface ICompletionsClient
4545
/// Default Value: true
4646
/// </param>
4747
/// <param name="raw">
48-
/// If `true` no formatting will be applied to the prompt and no context will be returned. <br/>
48+
/// If `true` no formatting will be applied to the prompt and no context will be returned.<br/>
4949
/// You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API, and are managing history yourself.
5050
/// </param>
51-
/// <param name="format"></param>
51+
/// <param name="format">
52+
/// The format to return a response in. Can be:<br/>
53+
/// - "json" string to enable JSON mode<br/>
54+
/// - JSON schema object for structured output validation
55+
/// </param>
5256
/// <param name="keepAlive">
5357
/// How long (in minutes) to keep the model loaded in memory.<br/>
5458
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
@@ -63,10 +67,16 @@ public partial interface ICompletionsClient
6367
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
6468
/// </param>
6569
/// <param name="think">
66-
/// Think controls whether thinking/reasoning models will think before<br/>
67-
/// responding. Needs to be a pointer so we can distinguish between false<br/>
68-
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
69-
/// before this option was introduced).
70+
/// Controls whether thinking/reasoning models will think before responding.<br/>
71+
/// Can be:<br/>
72+
/// - boolean: true/false to enable/disable thinking<br/>
73+
/// - string: "high", "medium", "low" to set thinking intensity level
74+
/// </param>
75+
/// <param name="truncate">
76+
/// Truncates the end of the prompt if it exceeds the context length
77+
/// </param>
78+
/// <param name="shift">
79+
/// Shifts the oldest parts out of the context window when the context limit is reached
7080
/// </param>
7181
/// <param name="cancellationToken">The token to cancel the operation with</param>
7282
/// <exception cref="global::System.InvalidOperationException"></exception>
@@ -79,11 +89,13 @@ public partial interface ICompletionsClient
7989
global::System.Collections.Generic.IList<long>? context = default,
8090
bool? stream = default,
8191
bool? raw = default,
82-
global::Ollama.ResponseFormat? format = default,
92+
global::Ollama.OneOf<global::Ollama.GenerateCompletionRequestFormatEnum?, object>? format = default,
8393
int? keepAlive = default,
8494
global::System.Collections.Generic.IList<string>? images = default,
8595
global::Ollama.RequestOptions? options = default,
86-
bool? think = default,
96+
global::Ollama.OneOf<bool?, global::Ollama.GenerateCompletionRequestThink?>? think = default,
97+
bool? truncate = default,
98+
bool? shift = default,
8799
global::System.Threading.CancellationToken cancellationToken = default);
88100
}
89101
}

src/libs/Ollama/Generated/Ollama.IModelsClient.CreateModel.g.cs

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,14 +38,38 @@ public partial interface IModelsClient
3838
/// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.<br/>
3939
/// Default Value: true
4040
/// </param>
41+
/// <param name="from">
42+
/// Name of the model or file to use as the source
43+
/// </param>
44+
/// <param name="files">
45+
/// Map of files to include when creating the model
46+
/// </param>
47+
/// <param name="adapters">
48+
/// Map of LoRA adapters to include when creating the model
49+
/// </param>
50+
/// <param name="template">
51+
/// Template used when constructing a request to the model
52+
/// </param>
53+
/// <param name="system">
54+
/// System prompt for the model
55+
/// </param>
56+
/// <param name="parameters">
57+
/// Map of hyper-parameters which are applied to the model
58+
/// </param>
4159
/// <param name="cancellationToken">The token to cancel the operation with</param>
4260
/// <exception cref="global::System.InvalidOperationException"></exception>
4361
global::System.Collections.Generic.IAsyncEnumerable<global::Ollama.CreateModelResponse> CreateModelAsync(
4462
string model,
45-
string modelfile,
63+
string? modelfile = default,
4664
string? path = default,
4765
string? quantize = default,
4866
bool? stream = default,
67+
string? from = default,
68+
global::System.Collections.Generic.Dictionary<string, string>? files = default,
69+
global::System.Collections.Generic.Dictionary<string, string>? adapters = default,
70+
string? template = default,
71+
string? system = default,
72+
object? parameters = default,
4973
global::System.Threading.CancellationToken cancellationToken = default);
5074
}
5175
}

0 commit comments

Comments
 (0)