Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions src/libs/Ollama/Generated/Ollama..JsonSerializerContext.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,20 @@ namespace Ollama
DefaultIgnoreCondition = global::System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull,
Converters = new global::System.Type[]
{
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestFormatEnumJsonConverter),
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestFormatEnumNullableJsonConverter),
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestThinkJsonConverter),
typeof(global::Ollama.JsonConverters.GenerateCompletionRequestThinkNullableJsonConverter),
typeof(global::Ollama.JsonConverters.ResponseFormatEnumJsonConverter),
typeof(global::Ollama.JsonConverters.ResponseFormatEnumNullableJsonConverter),
typeof(global::Ollama.JsonConverters.MessageRoleJsonConverter),
typeof(global::Ollama.JsonConverters.MessageRoleNullableJsonConverter),
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestFormatEnumJsonConverter),
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestFormatEnumNullableJsonConverter),
typeof(global::Ollama.JsonConverters.ToolTypeJsonConverter),
typeof(global::Ollama.JsonConverters.ToolTypeNullableJsonConverter),
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestThinkJsonConverter),
typeof(global::Ollama.JsonConverters.GenerateChatCompletionRequestThinkNullableJsonConverter),
typeof(global::Ollama.JsonConverters.DoneReasonEnumJsonConverter),
typeof(global::Ollama.JsonConverters.DoneReasonEnumNullableJsonConverter),
typeof(global::Ollama.JsonConverters.CreateModelStatusEnumJsonConverter),
Expand All @@ -33,6 +41,10 @@ namespace Ollama
typeof(global::Ollama.JsonConverters.DoneReasonJsonConverter),
typeof(global::Ollama.JsonConverters.CreateModelStatusJsonConverter),
typeof(global::Ollama.JsonConverters.PullModelStatusJsonConverter),
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<global::Ollama.GenerateCompletionRequestFormatEnum?, object>),
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<bool?, global::Ollama.GenerateCompletionRequestThink?>),
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<global::Ollama.GenerateChatCompletionRequestFormatEnum?, object>),
typeof(global::Ollama.JsonConverters.OneOfJsonConverter<bool?, global::Ollama.GenerateChatCompletionRequestThink?>),
typeof(global::Ollama.JsonConverters.AnyOfJsonConverter<string, global::Ollama.PushModelResponseStatus?>),
typeof(global::Ollama.JsonConverters.UnixTimestampJsonConverter),
})]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,11 @@ partial void ProcessGenerateChatCompletionResponse(
/// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.<br/>
/// Default Value: true
/// </param>
/// <param name="format"></param>
/// <param name="format">
/// The format to return a response in. Can be:<br/>
/// - "json" string to enable JSON mode<br/>
/// - JSON schema object for structured output validation
/// </param>
/// <param name="keepAlive">
/// How long (in minutes) to keep the model loaded in memory.<br/>
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
Expand All @@ -138,22 +142,30 @@ partial void ProcessGenerateChatCompletionResponse(
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
/// </param>
/// <param name="think">
/// Think controls whether thinking/reasoning models will think before<br/>
/// responding. Needs to be a pointer so we can distinguish between false<br/>
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
/// before this option was introduced).
/// Controls whether thinking/reasoning models will think before responding.<br/>
/// Can be:<br/>
/// - boolean: true/false to enable/disable thinking<br/>
/// - string: "high", "medium", "low" to set thinking intensity level
/// </param>
/// <param name="truncate">
/// Truncates the end of the chat history if it exceeds the context length
/// </param>
/// <param name="shift">
/// Shifts the oldest messages out of the context window when the context limit is reached
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Collections.Generic.IAsyncEnumerable<global::Ollama.GenerateChatCompletionResponse> GenerateChatCompletionAsync(
string model,
global::System.Collections.Generic.IList<global::Ollama.Message> messages,
bool? stream = default,
global::Ollama.ResponseFormat? format = default,
global::Ollama.OneOf<global::Ollama.GenerateChatCompletionRequestFormatEnum?, object>? format = default,
int? keepAlive = default,
global::System.Collections.Generic.IList<global::Ollama.Tool>? tools = default,
global::Ollama.RequestOptions? options = default,
bool? think = default,
global::Ollama.OneOf<bool?, global::Ollama.GenerateChatCompletionRequestThink?>? think = default,
bool? truncate = default,
bool? shift = default,
[global::System.Runtime.CompilerServices.EnumeratorCancellation] global::System.Threading.CancellationToken cancellationToken = default)
{
var __request = new global::Ollama.GenerateChatCompletionRequest
Expand All @@ -166,6 +178,8 @@ partial void ProcessGenerateChatCompletionResponse(
Tools = tools,
Options = options,
Think = think,
Truncate = truncate,
Shift = shift,
};

var __enumerable = GenerateChatCompletionAsync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,14 @@ partial void ProcessGenerateCompletionResponse(
/// Default Value: true
/// </param>
/// <param name="raw">
/// If `true` no formatting will be applied to the prompt and no context will be returned. <br/>
/// If `true` no formatting will be applied to the prompt and no context will be returned.<br/>
/// You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API, and are managing history yourself.
/// </param>
/// <param name="format"></param>
/// <param name="format">
/// The format to return a response in. Can be:<br/>
/// - "json" string to enable JSON mode<br/>
/// - JSON schema object for structured output validation
/// </param>
/// <param name="keepAlive">
/// How long (in minutes) to keep the model loaded in memory.<br/>
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
Expand All @@ -155,10 +159,16 @@ partial void ProcessGenerateCompletionResponse(
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
/// </param>
/// <param name="think">
/// Think controls whether thinking/reasoning models will think before<br/>
/// responding. Needs to be a pointer so we can distinguish between false<br/>
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
/// before this option was introduced).
/// Controls whether thinking/reasoning models will think before responding.<br/>
/// Can be:<br/>
/// - boolean: true/false to enable/disable thinking<br/>
/// - string: "high", "medium", "low" to set thinking intensity level
/// </param>
/// <param name="truncate">
/// Truncates the end of the prompt if it exceeds the context length
/// </param>
/// <param name="shift">
/// Shifts the oldest parts out of the context window when the context limit is reached
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
Expand All @@ -171,11 +181,13 @@ partial void ProcessGenerateCompletionResponse(
global::System.Collections.Generic.IList<long>? context = default,
bool? stream = default,
bool? raw = default,
global::Ollama.ResponseFormat? format = default,
global::Ollama.OneOf<global::Ollama.GenerateCompletionRequestFormatEnum?, object>? format = default,
int? keepAlive = default,
global::System.Collections.Generic.IList<string>? images = default,
global::Ollama.RequestOptions? options = default,
bool? think = default,
global::Ollama.OneOf<bool?, global::Ollama.GenerateCompletionRequestThink?>? think = default,
bool? truncate = default,
bool? shift = default,
[global::System.Runtime.CompilerServices.EnumeratorCancellation] global::System.Threading.CancellationToken cancellationToken = default)
{
var __request = new global::Ollama.GenerateCompletionRequest
Expand All @@ -193,6 +205,8 @@ partial void ProcessGenerateCompletionResponse(
Images = images,
Options = options,
Think = think,
Truncate = truncate,
Shift = shift,
};

var __enumerable = GenerateCompletionAsync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,11 @@ public partial interface IChatClient
/// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.<br/>
/// Default Value: true
/// </param>
/// <param name="format"></param>
/// <param name="format">
/// The format to return a response in. Can be:<br/>
/// - "json" string to enable JSON mode<br/>
/// - JSON schema object for structured output validation
/// </param>
/// <param name="keepAlive">
/// How long (in minutes) to keep the model loaded in memory.<br/>
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
Expand All @@ -46,22 +50,30 @@ public partial interface IChatClient
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
/// </param>
/// <param name="think">
/// Think controls whether thinking/reasoning models will think before<br/>
/// responding. Needs to be a pointer so we can distinguish between false<br/>
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
/// before this option was introduced).
/// Controls whether thinking/reasoning models will think before responding.<br/>
/// Can be:<br/>
/// - boolean: true/false to enable/disable thinking<br/>
/// - string: "high", "medium", "low" to set thinking intensity level
/// </param>
/// <param name="truncate">
/// Truncates the end of the chat history if it exceeds the context length
/// </param>
/// <param name="shift">
/// Shifts the oldest messages out of the context window when the context limit is reached
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
global::System.Collections.Generic.IAsyncEnumerable<global::Ollama.GenerateChatCompletionResponse> GenerateChatCompletionAsync(
string model,
global::System.Collections.Generic.IList<global::Ollama.Message> messages,
bool? stream = default,
global::Ollama.ResponseFormat? format = default,
global::Ollama.OneOf<global::Ollama.GenerateChatCompletionRequestFormatEnum?, object>? format = default,
int? keepAlive = default,
global::System.Collections.Generic.IList<global::Ollama.Tool>? tools = default,
global::Ollama.RequestOptions? options = default,
bool? think = default,
global::Ollama.OneOf<bool?, global::Ollama.GenerateChatCompletionRequestThink?>? think = default,
bool? truncate = default,
bool? shift = default,
global::System.Threading.CancellationToken cancellationToken = default);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,14 @@ public partial interface ICompletionsClient
/// Default Value: true
/// </param>
/// <param name="raw">
/// If `true` no formatting will be applied to the prompt and no context will be returned. <br/>
/// If `true` no formatting will be applied to the prompt and no context will be returned.<br/>
/// You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API, and are managing history yourself.
/// </param>
/// <param name="format"></param>
/// <param name="format">
/// The format to return a response in. Can be:<br/>
/// - "json" string to enable JSON mode<br/>
/// - JSON schema object for structured output validation
/// </param>
/// <param name="keepAlive">
/// How long (in minutes) to keep the model loaded in memory.<br/>
/// - If set to a positive duration (e.g. 20), the model will stay loaded for the provided duration.<br/>
Expand All @@ -63,10 +67,16 @@ public partial interface ICompletionsClient
/// Additional model parameters listed in the documentation for the Modelfile such as `temperature`.
/// </param>
/// <param name="think">
/// Think controls whether thinking/reasoning models will think before<br/>
/// responding. Needs to be a pointer so we can distinguish between false<br/>
/// (request that thinking _not_ be used) and unset (use the old behavior<br/>
/// before this option was introduced).
/// Controls whether thinking/reasoning models will think before responding.<br/>
/// Can be:<br/>
/// - boolean: true/false to enable/disable thinking<br/>
/// - string: "high", "medium", "low" to set thinking intensity level
/// </param>
/// <param name="truncate">
/// Truncates the end of the prompt if it exceeds the context length
/// </param>
/// <param name="shift">
/// Shifts the oldest parts out of the context window when the context limit is reached
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
Expand All @@ -79,11 +89,13 @@ public partial interface ICompletionsClient
global::System.Collections.Generic.IList<long>? context = default,
bool? stream = default,
bool? raw = default,
global::Ollama.ResponseFormat? format = default,
global::Ollama.OneOf<global::Ollama.GenerateCompletionRequestFormatEnum?, object>? format = default,
int? keepAlive = default,
global::System.Collections.Generic.IList<string>? images = default,
global::Ollama.RequestOptions? options = default,
bool? think = default,
global::Ollama.OneOf<bool?, global::Ollama.GenerateCompletionRequestThink?>? think = default,
bool? truncate = default,
bool? shift = default,
global::System.Threading.CancellationToken cancellationToken = default);
}
}
26 changes: 25 additions & 1 deletion src/libs/Ollama/Generated/Ollama.IModelsClient.CreateModel.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,38 @@ public partial interface IModelsClient
/// If `false` the response will be returned as a single response object, otherwise the response will be streamed as a series of objects.<br/>
/// Default Value: true
/// </param>
/// <param name="from">
/// Name of the model or file to use as the source
/// </param>
/// <param name="files">
/// Map of files to include when creating the model
/// </param>
/// <param name="adapters">
/// Map of LoRA adapters to include when creating the model
/// </param>
/// <param name="template">
/// Template used when constructing a request to the model
/// </param>
/// <param name="system">
/// System prompt for the model
/// </param>
/// <param name="parameters">
/// Map of hyper-parameters which are applied to the model
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
global::System.Collections.Generic.IAsyncEnumerable<global::Ollama.CreateModelResponse> CreateModelAsync(
string model,
string modelfile,
string? modelfile = default,
string? path = default,
string? quantize = default,
bool? stream = default,
string? from = default,
global::System.Collections.Generic.Dictionary<string, string>? files = default,
global::System.Collections.Generic.Dictionary<string, string>? adapters = default,
string? template = default,
string? system = default,
object? parameters = default,
global::System.Threading.CancellationToken cancellationToken = default);
}
}
Loading
Loading