Skip to content

Commit 60115f3

Browse files
authored
fix: non-streaming gemini tool calling (#20207)
1 parent 82f32ca commit 60115f3

File tree

3 files changed

+18
-12
lines changed

3 files changed

+18
-12
lines changed

llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/base.py

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -392,9 +392,14 @@ def gen() -> ChatResponseGen:
392392
content += content_delta
393393
llama_resp = chat_from_gemini_response(r)
394394
if content:
395-
llama_resp.message.blocks.append(TextBlock(text=content))
395+
llama_resp.message.blocks = [TextBlock(text=content)]
396396
if thoughts:
397-
llama_resp.message.blocks.append(ThinkingBlock(content=thoughts))
397+
if llama_resp.message.blocks:
398+
llama_resp.message.blocks.append(
399+
ThinkingBlock(content=thoughts)
400+
)
401+
else:
402+
llama_resp.message.blocks = [ThinkingBlock(content=thoughts)]
398403
yield llama_resp
399404

400405
if self.use_file_api:
@@ -445,15 +450,17 @@ async def gen() -> ChatResponseAsyncGen:
445450
else:
446451
content += content_delta
447452
llama_resp = chat_from_gemini_response(r)
448-
llama_resp.delta = content_delta
449453
if content:
450-
llama_resp.message.blocks.append(
451-
TextBlock(text=content)
452-
)
454+
llama_resp.message.blocks = [TextBlock(text=content)]
453455
if thoughts:
454-
llama_resp.message.blocks.append(
455-
ThinkingBlock(content=thoughts)
456-
)
456+
if llama_resp.message.blocks:
457+
llama_resp.message.blocks.append(
458+
ThinkingBlock(content=thoughts)
459+
)
460+
else:
461+
llama_resp.message.blocks = [
462+
ThinkingBlock(content=thoughts)
463+
]
457464
yield llama_resp
458465

459466
if self.use_file_api:

llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ def chat_from_gemini_response(
191191
)
192192
content_blocks.append(
193193
ToolCallBlock(
194-
tool_call_id=part.function_call.id or "",
194+
tool_call_id=part.function_call.name or "",
195195
tool_name=part.function_call.name or "",
196196
tool_kwargs=part.function_call.args or {},
197197
)
@@ -200,7 +200,6 @@ def chat_from_gemini_response(
200200
# follow the same pattern as for transforming a chatmessage into a gemini message: if it's a function response, package it alone and return it
201201
additional_kwargs["tool_call_id"] = part.function_response.id
202202
role = ROLES_FROM_GEMINI[top_candidate.content.role]
203-
print("RESPONSE", json.dumps(part.function_response.response))
204203
return ChatResponse(
205204
message=ChatMessage(
206205
role=role, content=json.dumps(part.function_response.response)

llama-index-integrations/llms/llama-index-llms-google-genai/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ dev = [
2727

2828
[project]
2929
name = "llama-index-llms-google-genai"
30-
version = "0.7.0"
30+
version = "0.7.1"
3131
description = "llama-index llms google genai integration"
3232
authors = [{name = "Your Name", email = "[email protected]"}]
3333
requires-python = ">=3.9,<4.0"

0 commit comments

Comments
 (0)