@@ -852,7 +852,7 @@ This method does not support tables containing geometries.
852
852
##### Signature
853
853
854
854
``` typescript
855
- async aiRowByRow (column : string , newColumn : string , prompt : string , options ?: { batchSize?: number ; concurrent ?: number ; cache ?: boolean ; test ?: (dataPoint : unknown ) => any ; retry ?: number ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; verbose ?: boolean ; rateLimitPerMinute ?: number ; clean ?: (response : unknown ) => any }): Promise < void > ;
855
+ async aiRowByRow (column : string , newColumn : string , prompt : string , options ?: { batchSize?: number ; concurrent ?: number ; cache ?: boolean ; test ?: (dataPoint : unknown ) => any ; retry ?: number ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; verbose ?: boolean ; rateLimitPerMinute ?: number ; clean ?: (response : unknown ) => any ; contextWindow ?: number }): Promise < void > ;
856
856
```
857
857
858
858
##### Parameters
@@ -894,6 +894,9 @@ async aiRowByRow(column: string, newColumn: string, prompt: string, options?: {
894
894
including the full prompt sent to the AI. Defaults to ` false ` .
895
895
- ** ` options.clean ` ** : - A function to clean the AI's response before testing,
896
896
caching, and storing. Defaults to ` undefined ` .
897
+ - ** ` options.contextWindow ` ** : - An option to specify the context window size
898
+ for Ollama models. By default, Ollama sets this depending on the model, which
899
+ can be lower than the actual maximum context window size of the model.
897
900
898
901
##### Returns
899
902
@@ -971,7 +974,7 @@ This method does not support tables containing geometries.
971
974
##### Signature
972
975
973
976
``` typescript
974
- async aiEmbeddings (column : string , newColumn : string , options ?: { createIndex?: boolean ; concurrent ?: number ; cache ?: boolean ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; verbose ?: boolean ; rateLimitPerMinute ?: number }): Promise < void > ;
977
+ async aiEmbeddings (column : string , newColumn : string , options ?: { createIndex?: boolean ; concurrent ?: number ; cache ?: boolean ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; verbose ?: boolean ; rateLimitPerMinute ?: number ; contextWindow ?: number }): Promise < void > ;
975
978
```
976
979
977
980
##### Parameters
@@ -1005,6 +1008,9 @@ async aiEmbeddings(column: string, newColumn: string, options?: { createIndex?:
1005
1008
- ** ` options.ollama ` ** : - If ` true ` , uses Ollama. Defaults to the ` OLLAMA `
1006
1009
environment variable. If you want your Ollama instance to be used, you can
1007
1010
pass it here too.
1011
+ - ** ` options.contextWindow ` ** : - An option to specify the context window size
1012
+ for Ollama models. By default, Ollama sets this depending on the model, which
1013
+ can be lower than the actual maximum context window size of the model.
1008
1014
- ** ` options.verbose ` ** : - If ` true ` , logs additional debugging information.
1009
1015
Defaults to ` false ` .
1010
1016
@@ -1063,7 +1069,7 @@ up processing. If the index already exists, it will not be recreated.
1063
1069
##### Signature
1064
1070
1065
1071
``` typescript
1066
- async aiVectorSimilarity (text : string , column : string , nbResults : number , options ?: { createIndex?: boolean ; outputTable ?: string ; cache ?: boolean ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; verbose ?: boolean }): Promise < SimpleTable > ;
1072
+ async aiVectorSimilarity (text : string , column : string , nbResults : number , options ?: { createIndex?: boolean ; outputTable ?: string ; cache ?: boolean ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; contextWindow ?: number ; verbose ?: boolean }): Promise < SimpleTable > ;
1067
1073
```
1068
1074
1069
1075
##### Parameters
@@ -1097,6 +1103,9 @@ async aiVectorSimilarity(text: string, column: string, nbResults: number, option
1097
1103
pass it here too.
1098
1104
- ** ` options.verbose ` ** : - If ` true ` , logs additional debugging information.
1099
1105
Defaults to ` false ` .
1106
+ - ** ` options.contextWindow ` ** : - An option to specify the context window size
1107
+ for Ollama models. By default, Ollama sets this depending on the model, which
1108
+ can be lower than the actual maximum context window size of the model.
1100
1109
1101
1110
##### Returns
1102
1111
@@ -1161,7 +1170,7 @@ and time. Remember to add `.journalism-cache` to your `.gitignore`.
1161
1170
##### Signature
1162
1171
1163
1172
``` typescript
1164
- async aiQuery (prompt : string , options ?: { cache?: boolean ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; verbose ?: boolean }): Promise < void > ;
1173
+ async aiQuery (prompt : string , options ?: { cache?: boolean ; model ?: string ; apiKey ?: string ; vertex ?: boolean ; project ?: string ; location ?: string ; ollama ?: boolean | Ollama ; contextWindow ?: number ; verbose ?: boolean }): Promise < void > ;
1165
1174
```
1166
1175
1167
1176
##### Parameters
@@ -1184,6 +1193,9 @@ async aiQuery(prompt: string, options?: { cache?: boolean; model?: string; apiKe
1184
1193
- ** ` options.ollama ` ** : - If ` true ` , uses Ollama. Defaults to the ` OLLAMA `
1185
1194
environment variable. If you want your Ollama instance to be used, you can
1186
1195
pass it here too.
1196
+ - ** ` options.contextWindow ` ** : - An option to specify the context window size
1197
+ for Ollama models. By default, Ollama sets this depending on the model, which
1198
+ can be lower than the actual maximum context window size of the model.
1187
1199
- ** ` options.verbose ` ** : - If ` true ` , logs additional debugging information,
1188
1200
including the full prompt sent to the AI. Defaults to ` false ` .
1189
1201
0 commit comments