Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 20 additions & 18 deletions libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,14 @@ class HumanInTheLoopMiddleware(AgentMiddleware):

def __init__(
self,
tool_configs: dict[str, bool | ToolConfig],
interrupt_on: dict[str, bool | ToolConfig],
*,
description_prefix: str = "Tool execution requires approval",
) -> None:
"""Initialize the human in the loop middleware.

Args:
tool_configs: Mapping of tool name to allowed actions.
interrupt_on: Mapping of tool name to allowed actions.
If a tool doesn't have an entry, it's auto-approved by default.
* `True` indicates all actions are allowed: accept, edit, and respond.
* `False` indicates that the tool is auto-approved.
Expand All @@ -130,7 +130,7 @@ def __init__(
"""
super().__init__()
resolved_tool_configs: dict[str, ToolConfig] = {}
for tool_name, tool_config in tool_configs.items():
for tool_name, tool_config in interrupt_on.items():
if isinstance(tool_config, bool):
if tool_config is True:
resolved_tool_configs[tool_name] = ToolConfig(
Expand All @@ -142,11 +142,11 @@ def __init__(
tool_config.get(x, False) for x in ["allow_accept", "allow_edit", "allow_respond"]
):
resolved_tool_configs[tool_name] = tool_config
self.tool_configs = resolved_tool_configs
self.interrupt_on = resolved_tool_configs
self.description_prefix = description_prefix

def after_model(self, state: AgentState) -> dict[str, Any] | None: # type: ignore[override]
"""Trigger HITL flows for relevant tool calls after an AIMessage."""
"""Trigger interrupt flows for relevant tool calls after an AIMessage."""
messages = state["messages"]
if not messages:
return None
Expand All @@ -156,28 +156,28 @@ def after_model(self, state: AgentState) -> dict[str, Any] | None: # type: igno
return None

# Separate tool calls that need interrupts from those that don't
hitl_tool_calls: list[ToolCall] = []
interrupt_tool_calls: list[ToolCall] = []
auto_approved_tool_calls = []

for tool_call in last_ai_msg.tool_calls:
hitl_tool_calls.append(tool_call) if tool_call[
interrupt_tool_calls.append(tool_call) if tool_call[
"name"
] in self.tool_configs else auto_approved_tool_calls.append(tool_call)
] in self.interrupt_on else auto_approved_tool_calls.append(tool_call)

# If no interrupts needed, return early
if not hitl_tool_calls:
if not interrupt_tool_calls:
return None

# Process all tool calls that require interrupts
approved_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
artificial_tool_messages: list[ToolMessage] = []

# Create interrupt requests for all tools that need approval
hitl_requests: list[HumanInTheLoopRequest] = []
for tool_call in hitl_tool_calls:
interrupt_requests: list[HumanInTheLoopRequest] = []
for tool_call in interrupt_tool_calls:
tool_name = tool_call["name"]
tool_args = tool_call["args"]
config = self.tool_configs[tool_name]
config = self.interrupt_on[tool_name]
description = (
config.get("description")
or f"{self.description_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
Expand All @@ -191,21 +191,23 @@ def after_model(self, state: AgentState) -> dict[str, Any] | None: # type: igno
"config": config,
"description": description,
}
hitl_requests.append(request)
interrupt_requests.append(request)

responses: list[HumanInTheLoopResponse] = interrupt(hitl_requests)
responses: list[HumanInTheLoopResponse] = interrupt(interrupt_requests)

# Validate that the number of responses matches the number of interrupt tool calls
if (responses_len := len(responses)) != (hitl_tool_calls_len := len(hitl_tool_calls)):
if (responses_len := len(responses)) != (
interrupt_tool_calls_len := len(interrupt_tool_calls)
):
msg = (
f"Number of human responses ({responses_len}) does not match "
f"number of hanging tool calls ({hitl_tool_calls_len})."
f"number of hanging tool calls ({interrupt_tool_calls_len})."
)
raise ValueError(msg)

for i, response in enumerate(responses):
tool_call = hitl_tool_calls[i]
config = self.tool_configs[tool_call["name"]]
tool_call = interrupt_tool_calls[i]
config = self.interrupt_on[tool_call["name"]]

if response["type"] == "accept" and config.get("allow_accept"):
approved_tool_calls.append(tool_call)
Expand Down
32 changes: 16 additions & 16 deletions libs/langchain_v1/tests/unit_tests/agents/test_middleware_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,13 +372,13 @@ def test_human_in_the_loop_middleware_initialization() -> None:
"""Test HumanInTheLoopMiddleware initialization."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_accept": True, "allow_edit": True, "allow_respond": True}
},
description_prefix="Custom prefix",
)

assert middleware.tool_configs == {
assert middleware.interrupt_on == {
"test_tool": {"allow_accept": True, "allow_edit": True, "allow_respond": True}
}
assert middleware.description_prefix == "Custom prefix"
Expand All @@ -388,7 +388,7 @@ def test_human_in_the_loop_middleware_no_interrupts_needed() -> None:
"""Test HumanInTheLoopMiddleware when no interrupts are needed."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_respond": True, "allow_edit": True, "allow_accept": True}
}
)
Expand Down Expand Up @@ -417,7 +417,7 @@ def test_human_in_the_loop_middleware_single_tool_accept() -> None:
"""Test HumanInTheLoopMiddleware with single tool accept response."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_respond": True, "allow_edit": True, "allow_accept": True}
}
)
Expand Down Expand Up @@ -452,7 +452,7 @@ def mock_accept(requests):
def test_human_in_the_loop_middleware_single_tool_edit() -> None:
"""Test HumanInTheLoopMiddleware with single tool edit response."""
middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_respond": True, "allow_edit": True, "allow_accept": True}
}
)
Expand Down Expand Up @@ -487,7 +487,7 @@ def test_human_in_the_loop_middleware_single_tool_response() -> None:
"""Test HumanInTheLoopMiddleware with single tool response with custom message."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_respond": True, "allow_edit": True, "allow_accept": True}
}
)
Expand Down Expand Up @@ -518,7 +518,7 @@ def test_human_in_the_loop_middleware_multiple_tools_mixed_responses() -> None:
"""Test HumanInTheLoopMiddleware with multiple tools and mixed response types."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"get_forecast": {"allow_accept": True, "allow_edit": True, "allow_respond": True},
"get_temperature": {"allow_accept": True, "allow_edit": True, "allow_respond": True},
}
Expand Down Expand Up @@ -565,7 +565,7 @@ def test_human_in_the_loop_middleware_multiple_tools_edit_responses() -> None:
"""Test HumanInTheLoopMiddleware with multiple tools and edit responses."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"get_forecast": {"allow_accept": True, "allow_edit": True, "allow_respond": True},
"get_temperature": {"allow_accept": True, "allow_edit": True, "allow_respond": True},
}
Expand Down Expand Up @@ -617,7 +617,7 @@ def test_human_in_the_loop_middleware_edit_with_modified_args() -> None:
"""Test HumanInTheLoopMiddleware with edit action that includes modified args."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_accept": True, "allow_edit": True, "allow_respond": True}
}
)
Expand Down Expand Up @@ -657,7 +657,7 @@ def mock_edit_with_args(requests):
def test_human_in_the_loop_middleware_unknown_response_type() -> None:
"""Test HumanInTheLoopMiddleware with unknown response type."""
middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_accept": True, "allow_edit": True, "allow_respond": True}
}
)
Expand All @@ -684,7 +684,7 @@ def test_human_in_the_loop_middleware_disallowed_action() -> None:

# edit is not allowed by tool config
middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_respond": True, "allow_edit": False, "allow_accept": True}
}
)
Expand Down Expand Up @@ -721,7 +721,7 @@ def test_human_in_the_loop_middleware_mixed_auto_approved_and_interrupt() -> Non
"""Test HumanInTheLoopMiddleware with mix of auto-approved and interrupt tools."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"interrupt_tool": {"allow_respond": True, "allow_edit": True, "allow_accept": True}
}
)
Expand Down Expand Up @@ -755,7 +755,7 @@ def test_human_in_the_loop_middleware_interrupt_request_structure() -> None:
"""Test that interrupt requests are structured correctly."""

middleware = HumanInTheLoopMiddleware(
tool_configs={
interrupt_on={
"test_tool": {"allow_accept": True, "allow_edit": True, "allow_respond": True}
},
description_prefix="Custom prefix",
Expand Down Expand Up @@ -796,7 +796,7 @@ def mock_capture_requests(requests):

def test_human_in_the_loop_middleware_boolean_configs() -> None:
"""Test HITL middleware with boolean tool configs."""
middleware = HumanInTheLoopMiddleware(tool_configs={"test_tool": True})
middleware = HumanInTheLoopMiddleware(interrupt_on={"test_tool": True})

ai_message = AIMessage(
content="I'll help you",
Expand Down Expand Up @@ -834,7 +834,7 @@ def test_human_in_the_loop_middleware_boolean_configs() -> None:
assert len(result["messages"]) == 1
assert result["messages"][0].tool_calls[0]["args"] == {"input": "edited"}

middleware = HumanInTheLoopMiddleware(tool_configs={"test_tool": False})
middleware = HumanInTheLoopMiddleware(interrupt_on={"test_tool": False})

result = middleware.after_model(state)
# No interruption should occur
Expand All @@ -843,7 +843,7 @@ def test_human_in_the_loop_middleware_boolean_configs() -> None:

def test_human_in_the_loop_middleware_sequence_mismatch() -> None:
"""Test that sequence mismatch in resume raises an error."""
middleware = HumanInTheLoopMiddleware(tool_configs={"test_tool": True})
middleware = HumanInTheLoopMiddleware(interrupt_on={"test_tool": True})

ai_message = AIMessage(
content="I'll help you",
Expand Down