unable to use ChatPromptTemplate with AgentExecutor #32324
Unanswered
ranjith-3330
asked this question in
Q&A
Replies: 0 comments
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
hi team
i tyring to create create_react_agent to ans questions related to csv file using python tool
below is the code
libraries
from langchain.chat_models import init_chat_model
from langgraph.checkpoint.memory import MemorySaver
from langchain_experimental.utilities import PythonREPL
from langchain_core.tools import Tool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langgraph.prebuilt import create_react_agent
from langchain.agents import AgentExecutor
tool
python_repl = PythonREPL()
You can create the tool to pass to an agent
repl_tool = Tool(
name="python_repl",
description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with
print(...)
.",func=python_repl.run,
)
tools = [repl_tool]
prompt
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt_template = ChatPromptTemplate.from_messages([
("system",
"""You are an AI CSV chatbot. Your role is to answer questions related to the content in the CSV file.
You have access to tools and should use them when needed.
csv_file_path: export.csv
Use this format when reasoning:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of tools
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
"""),
("human", "{questions}"),
])
#cross check of model working
model = init_chat_model("google_genai:gemini-2.5-pro", temperature=0)
model.invoke("what is your name ")
agent = create_react_agent(model, tools=tools, prompt=prompt_template,debug=True)
react_agent_executor = AgentExecutor(
agent=agent, tools=tools, verbose=True, handle_parsing_errors=True
)
react_agent_executor.invoke({"questions": "Fetch the Material Description name with the highest Balance"})
�[1m> Entering new AgentExecutor chain...�[0m
**---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Cell In[34], line 1
----> 1 react_agent_executor.invoke({"questions": "Fetch the Material Description name with the highest Balance"})
File d:\chat_bot_csv.chat\Lib\site-packages\langchain\chains\base.py:165, in Chain.invoke(self, input, config, **kwargs)
162 try:
163 self._validate_inputs(inputs)
164 outputs = (
--> 165 self._call(inputs, run_manager=run_manager)
166 if new_arg_supported
167 else self._call(inputs)
168 )
170 final_outputs: dict[str, Any] = self.prep_outputs(
171 inputs,
172 outputs,
173 return_only_outputs,
174 )
175 except BaseException as e:
File d:\chat_bot_csv.chat\Lib\site-packages\langchain\agents\agent.py:1625, in AgentExecutor._call(self, inputs, run_manager)
1623 # We now enter the agent loop (until it returns something).
1624 while self._should_continue(iterations, time_elapsed):
-> 1625 next_step_output = self._take_next_step(
1626 name_to_tool_map,
1627 color_mapping,
1628 inputs,
1629 intermediate_steps,
1630 run_manager=run_manager,
1631 )
1632 if isinstance(next_step_output, AgentFinish):
1633 return self._return(
1634 next_step_output,
1635 intermediate_steps,
1636 run_manager=run_manager,
1637 )
File d:\chat_bot_csv.chat\Lib\site-packages\langchain\agents\agent.py:1325, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1316 def _take_next_step(
1317 self,
1318 name_to_tool_map: dict[str, BaseTool],
(...) 1322 run_manager: Optional[CallbackManagerForChainRun] = None,
1323 ) -> Union[AgentFinish, list[tuple[AgentAction, str]]]:
1324 return self._consume_next_step(
-> 1325 list(
1326 self._iter_next_step(
1327 name_to_tool_map,
1328 color_mapping,
1329 inputs,
1330 intermediate_steps,
1331 run_manager,
1332 ),
1333 ),
1334 )
File d:\chat_bot_csv.chat\Lib\site-packages\langchain\agents\agent.py:1352, in AgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1349 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
1351 # Call the LLM to see what to do.
-> 1352 output = self._action_agent.plan(
1353 intermediate_steps,
1354 callbacks=run_manager.get_child() if run_manager else None,
1355 **inputs,
1356 )
1357 except OutputParserException as e:
1358 if isinstance(self.handle_parsing_errors, bool):
File d:\chat_bot_csv.chat\Lib\site-packages\langchain\agents\agent.py:455, in RunnableAgent.plan(self, intermediate_steps, callbacks, **kwargs)
447 final_output: Any = None
448 if self.stream_runnable:
449 # Use streaming to make sure that the underlying LLM is invoked in a
450 # streaming
(...) 453 # Because the response from the plan is not a generator, we need to
454 # accumulate the output into final output and return that.
--> 455 for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}):
456 if final_output is None:
457 final_output = chunk
File d:\chat_bot_csv.chat\Lib\site-packages\langgraph\pregel\main.py:2644, in Pregel.stream(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, subgraphs, debug, **kwargs)
2642 for task in loop.match_cached_writes():
2643 loop.output_writes(task.id, task.writes, cached=True)
-> 2644 for _ in runner.tick(
2645 [t for t in loop.tasks.values() if not t.writes],
2646 timeout=self.step_timeout,
2647 get_waiter=get_waiter,
2648 schedule_task=loop.accept_push,
2649 ):
2650 # emit output
2651 yield from _output(
2652 stream_mode, print_mode, subgraphs, stream.get, queue.Empty
2653 )
2654 loop.after_tick()
File d:\chat_bot_csv.chat\Lib\site-packages\langgraph\pregel_runner.py:162, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter, schedule_task)
160 t = tasks[0]
161 try:
--> 162 run_with_retry(
163 t,
164 retry_policy,
165 configurable={
166 CONFIG_KEY_CALL: partial(
167 _call,
168 weakref.ref(t),
169 retry_policy=retry_policy,
170 futures=weakref.ref(futures),
171 schedule_task=schedule_task,
172 submit=self.submit,
173 ),
174 },
175 )
176 self.commit(t, None)
177 except Exception as exc:
File d:\chat_bot_csv.chat\Lib\site-packages\langgraph\pregel_retry.py:42, in run_with_retry(task, retry_policy, configurable)
40 task.writes.clear()
41 # run the task
---> 42 return task.proc.invoke(task.input, config)
43 except ParentCommand as exc:
44 ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]
File d:\chat_bot_csv.chat\Lib\site-packages\langgraph_internal_runnable.py:640, in RunnableSeq.invoke(self, input, config, **kwargs)
638 # run in context
639 with set_config_context(config, run) as context:
--> 640 input = context.run(step.invoke, input, config, **kwargs)
641 else:
642 input = step.invoke(input, config)
File d:\chat_bot_csv.chat\Lib\site-packages\langgraph_internal_runnable.py:377, in RunnableCallable.invoke(self, input, config, **kwargs)
375 # run in context
376 with set_config_context(child_config, run) as context:
--> 377 ret = context.run(self.func, *args, **kwargs)
378 except BaseException as e:
379 run_manager.on_chain_error(e)
File d:\chat_bot_csv.chat\Lib\site-packages\langgraph\prebuilt\chat_agent_executor.py:618, in create_react_agent..call_model(state, runtime, config)
616 response = cast(AIMessage, dynamic_model.invoke(model_input, config)) # type: ignore[arg-type]
617 else:
--> 618 response = cast(AIMessage, static_model.invoke(model_input, config)) # type: ignore[union-attr]
620 # add agent name to the AIMessage
621 response.name = name
File d:\chat_bot_csv.chat\Lib\site-packages\langchain_core\runnables\base.py:3044, in RunnableSequence.invoke(self, input, config, **kwargs)
3042 with set_config_context(config) as context:
3043 if i == 0:
-> 3044 input_ = context.run(step.invoke, input_, config, **kwargs)
3045 else:
3046 input_ = context.run(step.invoke, input_, config)
File d:\chat_bot_csv.chat\Lib\site-packages\langchain_core\prompts\base.py:216, in BasePromptTemplate.invoke(self, input, config, **kwargs)
214 if self.tags:
215 config["tags"] = config["tags"] + self.tags
--> 216 return self._call_with_config(
217 self._format_prompt_with_error_handling,
218 input,
219 config,
220 run_type="prompt",
221 serialized=self._serialized,
222 )
File d:\chat_bot_csv.chat\Lib\site-packages\langchain_core\runnables\base.py:1939, in Runnable.call_with_config(self, func, input, config, run_type, serialized, **kwargs)
1935 child_config = patch_config(config, callbacks=run_manager.get_child())
1936 with set_config_context(child_config) as context:
1937 output = cast(
1938 "Output",
-> 1939 context.run(
1940 call_func_with_variable_args, # type: ignore[arg-type]
1941 func,
1942 input_,
1943 config,
1944 run_manager,
1945 **kwargs,
1946 ),
1947 )
1948 except BaseException as e:
1949 run_manager.on_chain_error(e)
File d:\chat_bot_csv.chat\Lib\site-packages\langchain_core\runnables\config.py:429, in call_func_with_variable_args(func, input, config, run_manager, **kwargs)
427 if run_manager is not None and accepts_run_manager(func):
428 kwargs["run_manager"] = run_manager
--> 429 return func(input, **kwargs)
File d:\chat_bot_csv.chat\Lib\site-packages\langchain_core\prompts\base.py:189, in BasePromptTemplate._format_prompt_with_error_handling(self, inner_input)
188 def format_prompt_with_error_handling(self, inner_input: dict) -> PromptValue:
--> 189 inner_input = self.validate_input(inner_input)
190 return self.format_prompt(**inner_input)
File d:\chat_bot_csv.chat\Lib\site-packages\langchain_core\prompts\base.py:183, in BasePromptTemplate._validate_input(self, inner_input)
177 example_key = missing.pop()
178 msg += (
179 f"\nNote: if you intended {{{example_key}}} to be part of the string"
180 " and not a variable, please escape it with double curly braces like: "
181 f"'{{{{{example_key}}}}}'."
182 )
--> 183 raise KeyError(
184 create_message(message=msg, error_code=ErrorCode.INVALID_PROMPT_INPUT)
185 )
186 return inner_input
KeyError: "Input to ChatPromptTemplate is missing variables {'questions'}. Expected: ['questions'] Received: ['messages', 'is_last_step', 'remaining_steps']\nNote: if you intended {questions} to be part of the string and not a variable, please escape it with double curly braces like: '{{questions}}'.\nFor troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/INVALID_PROMPT_INPUT "
During task with name 'agent' and id '72c01700-b4ec-1703-d9ba-cb0abf59f0fc' **
Beta Was this translation helpful? Give feedback.
All reactions