Skip to content

Commit b9e4ca1

Browse files
Release 0.14.7 (#20187)
Prepare release Co-authored-by: adrianlyjak <[email protected]>
1 parent 9654190 commit b9e4ca1

File tree

7 files changed

+4448
-3
lines changed

7 files changed

+4448
-3
lines changed

CHANGELOG.md

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,54 @@
22

33
<!--- generated changelog --->
44

5+
## [2025-10-30]
6+
7+
### llama-index-core [0.14.7]
8+
- Feat/serpex tool integration ([#20141](https://github.com/run-llama/llama_index/pull/20141))
9+
- Fix outdated error message about setting LLM ([#20157](https://github.com/run-llama/llama_index/pull/20157))
10+
- Fixing some recently failing tests ([#20165](https://github.com/run-llama/llama_index/pull/20165))
11+
- Fix: update lock to latest workflow and fix issues ([#20173](https://github.com/run-llama/llama_index/pull/20173))
12+
- fix: ensure full docstring is used in FunctionTool ([#20175](https://github.com/run-llama/llama_index/pull/20175))
13+
- fix api docs build ([#20180](https://github.com/run-llama/llama_index/pull/20180))
14+
15+
### llama-index-embeddings-voyageai [0.5.0]
16+
- Updating the VoyageAI integration ([#20073](https://github.com/run-llama/llama_index/pull/20073))
17+
18+
### llama-index-llms-anthropic [0.10.0]
19+
- feat: integrate anthropic with tool call block ([#20100](https://github.com/run-llama/llama_index/pull/20100))
20+
21+
### llama-index-llms-bedrock-converse [0.10.7]
22+
- feat: Add support for Bedrock Guardrails streamProcessingMode ([#20150](https://github.com/run-llama/llama_index/pull/20150))
23+
- bedrock structured output optional force ([#20158](https://github.com/run-llama/llama_index/pull/20158))
24+
25+
### llama-index-llms-fireworks [0.4.5]
26+
- Update FireworksAI models ([#20169](https://github.com/run-llama/llama_index/pull/20169))
27+
28+
### llama-index-llms-mistralai [0.9.0]
29+
- feat: mistralai integration with tool call block ([#20103](https://github.com/run-llama/llama_index/pull/20103))
30+
31+
### llama-index-llms-ollama [0.9.0]
32+
- feat: integrate ollama with tool call block ([#20097](https://github.com/run-llama/llama_index/pull/20097))
33+
34+
### llama-index-llms-openai [0.6.6]
35+
- Allow setting temp of gpt-5-chat ([#20156](https://github.com/run-llama/llama_index/pull/20156))
36+
37+
### llama-index-readers-confluence [0.5.0]
38+
- feat(confluence): make SVG processing optional to fix pycairo install… ([#20115](https://github.com/run-llama/llama_index/pull/20115))
39+
40+
### llama-index-readers-github [0.9.0]
41+
- Add GitHub App authentication support ([#20106](https://github.com/run-llama/llama_index/pull/20106))
42+
43+
### llama-index-retrievers-bedrock [0.5.1]
44+
- Fixing some recently failing tests ([#20165](https://github.com/run-llama/llama_index/pull/20165))
45+
46+
### llama-index-tools-serpex [0.1.0]
47+
- Feat/serpex tool integration ([#20141](https://github.com/run-llama/llama_index/pull/20141))
48+
- add missing toml info ([#20186](https://github.com/run-llama/llama_index/pull/20186))
49+
50+
### llama-index-vector-stores-couchbase [0.6.0]
51+
- Add Hyperscale and Composite Vector Indexes support for Couchbase vector-store ([#20170](https://github.com/run-llama/llama_index/pull/20170))
52+
553
## [2025-10-26]
654

755
### llama-index-core [0.14.6]
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
::: llama_index.tools.serpex
2+
options:
3+
members:
4+
- SerpexToolSpec

docs/api_reference/mkdocs.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -716,6 +716,7 @@ plugins:
716716
- ../../llama-index-integrations/llms/llama-index-llms-sglang
717717
- ../../llama-index-integrations/embeddings/llama-index-embeddings-isaacus
718718
- ../../llama-index-integrations/llms/llama-index-llms-helicone
719+
- ../../llama-index-integrations/tools/llama-index-tools-serpex
719720
site_name: LlamaIndex
720721
site_url: https://developers.llamaindex.ai/python/framework-api-reference/
721722
theme:

docs/src/content/docs/framework/CHANGELOG.md

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,72 @@ title: ChangeLog
44

55
<!--- generated changelog --->
66

7+
## [2025-10-26]
8+
9+
### llama-index-core [0.14.6]
10+
11+
- Add allow_parallel_tool_calls for non-streaming ([#20117](https://github.com/run-llama/llama_index/pull/20117))
12+
- Fix invalid use of field-specific metadata ([#20122](https://github.com/run-llama/llama_index/pull/20122))
13+
- update doc for SemanticSplitterNodeParser ([#20125](https://github.com/run-llama/llama_index/pull/20125))
14+
- fix rare cases when sentence splits are larger than chunk size ([#20147](https://github.com/run-llama/llama_index/pull/20147))
15+
16+
### llama-index-embeddings-bedrock [0.7.0]
17+
18+
- Fix BedrockEmbedding to support Cohere v4 response format ([#20094](https://github.com/run-llama/llama_index/pull/20094))
19+
20+
### llama-index-embeddings-isaacus [0.1.0]
21+
22+
- feat: Isaacus embeddings integration ([#20124](https://github.com/run-llama/llama_index/pull/20124))
23+
24+
### llama-index-embeddings-oci-genai [0.4.2]
25+
26+
- Update OCI GenAI cohere models ([#20146](https://github.com/run-llama/llama_index/pull/20146))
27+
28+
### llama-index-llms-anthropic [0.9.7]
29+
30+
- Fix double token stream in anthropic llm ([#20108](https://github.com/run-llama/llama_index/pull/20108))
31+
- Ensure anthropic content delta only has user facing response ([#20113](https://github.com/run-llama/llama_index/pull/20113))
32+
33+
### llama-index-llms-baseten [0.1.7]
34+
35+
- add GLM ([#20121](https://github.com/run-llama/llama_index/pull/20121))
36+
37+
### llama-index-llms-helicone [0.1.0]
38+
39+
- integrate helicone to llama-index ([#20131](https://github.com/run-llama/llama_index/pull/20131))
40+
41+
### llama-index-llms-oci-genai [0.6.4]
42+
43+
- Update OCI GenAI cohere models ([#20146](https://github.com/run-llama/llama_index/pull/20146))
44+
45+
### llama-index-llms-openai [0.6.5]
46+
47+
- chore: openai vbump ([#20095](https://github.com/run-llama/llama_index/pull/20095))
48+
49+
### llama-index-readers-imdb-review [0.4.2]
50+
51+
- chore: Update selenium dependency in imdb-review reader ([#20105](https://github.com/run-llama/llama_index/pull/20105))
52+
53+
### llama-index-retrievers-bedrock [0.5.0]
54+
55+
- feat(bedrock): add async support for AmazonKnowledgeBasesRetriever ([#20114](https://github.com/run-llama/llama_index/pull/20114))
56+
57+
### llama-index-retrievers-superlinked [0.1.3]
58+
59+
- Update README.md ([#19829](https://github.com/run-llama/llama_index/pull/19829))
60+
61+
### llama-index-storage-kvstore-postgres [0.4.2]
62+
63+
- fix: Replace raw SQL string interpolation with proper SQLAlchemy parameterized APIs in PostgresKVStore ([#20104](https://github.com/run-llama/llama_index/pull/20104))
64+
65+
### llama-index-tools-mcp [0.4.3]
66+
67+
- Fix BasicMCPClient resource signatures ([#20118](https://github.com/run-llama/llama_index/pull/20118))
68+
69+
### llama-index-vector-stores-postgres [0.7.1]
70+
71+
- Add GIN index support for text array metadata in PostgreSQL vector store ([#20130](https://github.com/run-llama/llama_index/pull/20130))
72+
773
## [2025-10-15]
874

975
### llama-index-core [0.14.5]

llama-index-core/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ dev = [
3232

3333
[project]
3434
name = "llama-index-core"
35-
version = "0.14.6"
35+
version = "0.14.7"
3636
description = "Interface between LLMs and your data"
3737
authors = [{name = "Jerry Liu", email = "[email protected]"}]
3838
requires-python = ">=3.9,<4.0"

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ classifiers = [
4040
]
4141
dependencies = [
4242
"llama-index-cli>=0.5.0,<0.6 ; python_version > '3.9'",
43-
"llama-index-core>=0.14.6,<0.15.0",
43+
"llama-index-core>=0.14.7,<0.15.0",
4444
"llama-index-embeddings-openai>=0.5.0,<0.6",
4545
"llama-index-indices-managed-llama-cloud>=0.4.0",
4646
"llama-index-llms-openai>=0.6.0,<0.7",
@@ -70,7 +70,7 @@ maintainers = [
7070
name = "llama-index"
7171
readme = "README.md"
7272
requires-python = ">=3.9,<4.0"
73-
version = "0.14.6"
73+
version = "0.14.7"
7474

7575
[project.scripts]
7676
llamaindex-cli = "llama_index.cli.command_line:main"

0 commit comments

Comments
 (0)