- 
                Notifications
    You must be signed in to change notification settings 
- Fork 3.6k
Description
Checked other resources
- This is a bug, not a usage question. For questions, please use GitHub Discussions.
- I added a clear and detailed title that summarizes the issue.
- I read what a minimal reproducible example is (https://stackoverflow.com/help/minimal-reproducible-example).
- I included a self-contained, minimal example that demonstrates the issue INCLUDING all the relevant imports. The code run AS IS to reproduce the issue.
Example Code
from langgraph.errors import GraphRecursionError
from langgraph.prebuilt import create_react_agent
from langchain_tavily import TavilySearch
tool = TavilySearch(max_results=2)
agent = create_react_agent(
    model="anthropic:claude-3-5-haiku-latest",
    tools=[tool],
)
for chunk in agent.stream(
    {"messages": [{"role": "user", "content": "what is the latest news in India Pakistan War"}]},
    stream_mode="messages"  # Changed from "messages" to "updates"
):
    print(chunk)
    print("\n")Error Message and Stack Trace (if applicable)
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
Cell In[3], line 5
      1 agent = create_react_agent(
      2     model="anthropic:claude-3-5-haiku-latest",
      3     tools=[tool],
      4 )
----> 5 for chunk in agent.stream(
      6     {"messages": [{"role": "user", "content": "what is the latest news in India Pakistan War"}]},
      7     stream_mode="messages"  # Changed from "messages" to "updates"
      8 ):
      9     print(chunk)
     10     print("\n")
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langgraph/pregel/__init__.py:2461, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, checkpoint_during, debug, subgraphs)
   2455     # Similarly to Bulk Synchronous Parallel / Pregel model
   2456     # computation proceeds in steps, while there are channel updates.
   2457     # Channel updates from step N are only visible in step N+1
   2458     # channels are guaranteed to be immutable for the duration of the step,
   2459     # with channel updates applied only at the transition between steps.
   2460     while loop.tick(input_keys=self.input_channels):
-> 2461         for _ in runner.tick(
   2462             loop.tasks.values(),
   2463             timeout=self.step_timeout,
   2464             retry_policy=self.retry_policy,
   2465             get_waiter=get_waiter,
   2466         ):
   2467             # emit output
   2468             yield from output()
   2469 # emit output
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langgraph/prebuilt/chat_agent_executor.py:745, in create_react_agent.<locals>.call_model(state, config)
    743 def call_model(state: StateSchema, config: RunnableConfig) -> StateSchema:
    744     state = _get_model_input_state(state)
--> 745     response = cast(AIMessage, model_runnable.invoke(state, config))
    746     # add agent name to the AIMessage
    747     response.name = name
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_core/runnables/base.py:3034, in RunnableSequence.invoke(self, input, config, **kwargs)
   3032                 input = context.run(step.invoke, input, config, **kwargs)
   3033             else:
-> 3034                 input = context.run(step.invoke, input, config)
   3035 # finish the root run
   3036 except BaseException as e:
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_core/runnables/base.py:5416, in RunnableBindingBase.invoke(self, input, config, **kwargs)
   5409 @override
   5410 def invoke(
   5411     self,
   (...)   5414     **kwargs: Optional[Any],
   5415 ) -> Output:
-> 5416     return self.bound.invoke(
   5417         input,
   5418         self._merge_configs(config),
   5419         **{**self.kwargs, **kwargs},
   5420     )
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:370, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
    358 @override
    359 def invoke(
    360     self,
   (...)    365     **kwargs: Any,
    366 ) -> BaseMessage:
    367     config = ensure_config(config)
    368     return cast(
    369         "ChatGeneration",
--> 370         self.generate_prompt(
    371             [self._convert_input(input)],
    372             stop=stop,
    373             callbacks=config.get("callbacks"),
    374             tags=config.get("tags"),
    375             metadata=config.get("metadata"),
    376             run_name=config.get("run_name"),
    377             run_id=config.pop("run_id", None),
    378             **kwargs,
    379         ).generations[0][0],
    380     ).message
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:947, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    938 @override
    939 def generate_prompt(
    940     self,
   (...)    944     **kwargs: Any,
    945 ) -> LLMResult:
    946     prompt_messages = [p.to_messages() for p in prompts]
--> 947     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:766, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    763 for i, m in enumerate(input_messages):
    764     try:
    765         results.append(
--> 766             self._generate_with_cache(
    767                 m,
    768                 stop=stop,
    769                 run_manager=run_managers[i] if run_managers else None,
    770                 **kwargs,
    771             )
    772         )
    773     except BaseException as e:
    774         if run_managers:
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:1001, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
    995 if self._should_stream(
    996     async_api=False,
    997     run_manager=run_manager,
    998     **kwargs,
    999 ):
   1000     chunks: list[ChatGenerationChunk] = []
-> 1001     for chunk in self._stream(messages, stop=stop, **kwargs):
   1002         chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
   1003         if run_manager:
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_anthropic/chat_models.py:1069, in ChatAnthropic._stream(self, messages, stop, run_manager, stream_usage, **kwargs)
   1063 coerce_content_to_string = (
   1064     not _tools_in_params(payload)
   1065     and not _documents_in_params(payload)
   1066     and not _thinking_in_params(payload)
   1067 )
   1068 for event in stream:
-> 1069     msg = _make_message_chunk_from_anthropic_event(
   1070         event,
   1071         stream_usage=stream_usage,
   1072         coerce_content_to_string=coerce_content_to_string,
   1073     )
   1074     if msg is not None:
   1075         chunk = ChatGenerationChunk(message=msg)
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_anthropic/chat_models.py:1784, in _make_message_chunk_from_anthropic_event(event, stream_usage, coerce_content_to_string)
   1779         message_chunk = AIMessageChunk(
   1780             content=[content_block],
   1781             tool_call_chunks=[tool_call_chunk],  # type: ignore
   1782         )
   1783 elif event.type == "message_delta" and stream_usage:
-> 1784     usage_metadata = _create_usage_metadata(event.usage)
   1785     message_chunk = AIMessageChunk(
   1786         content="",
   1787         usage_metadata=usage_metadata,
   (...)   1791         },
   1792     )
   1793 else:
File ~/anaconda3/envs/plpkb/lib/python3.12/site-packages/langchain_anthropic/chat_models.py:1812, in _create_usage_metadata(anthropic_usage)
   1805 input_token_details: dict = {
   1806     "cache_read": getattr(anthropic_usage, "cache_read_input_tokens", None),
   1807     "cache_creation": getattr(anthropic_usage, "cache_creation_input_tokens", None),
   1808 }
   1810 # Anthropic input_tokens exclude cached token counts.
   1811 input_tokens = (
-> 1812     getattr(anthropic_usage, "input_tokens", 0)
   1813     + (input_token_details["cache_read"] or 0)
   1814     + (input_token_details["cache_creation"] or 0)
   1815 )
   1816 output_tokens = getattr(anthropic_usage, "output_tokens", 0)
   1817 return UsageMetadata(
   1818     input_tokens=input_tokens,
   1819     output_tokens=output_tokens,
   (...)   1823     ),
   1824 )
TypeError: unsupported operand type(s) for +: 'NoneType' and 'int'
During task with name 'agent' and id '258a1322-7888-609a-2771-e4df466cf748'Description
I am trying to use create_react_agent with stream_mode="messages" to stream the output token by token. I expected the agent to yield AIMessageChunk objects continuously until completion.
However, after successfully producing several AIMessageChunks, the process fails with the following error:
TypeError: unsupported operand type(s) for +: 'NoneType' and 'int'
This seems to happen when the code tries to compute usage metadata from a None value in the Anthropic response.
System Info
System Information
OS: Linux
OS Version: #61~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Apr 15 17:03:15 UTC 2
Python Version: 3.12.0 | packaged by Anaconda, Inc. | (main, Oct 2 2023, 17:29:18) [GCC 11.2.0]
Package Information
langchain_core: 0.3.58
langchain: 0.3.23
langchain_community: 0.3.21
langsmith: 0.3.24
langchain_anthropic: 0.3.12
langchain_chroma: 0.2.3
langchain_experimental: 0.3.4
langchain_huggingface: 0.1.2
langchain_neo4j: 0.4.0
langchain_openai: 0.3.12
langchain_tavily: 0.1.6
langchain_text_splitters: 0.3.8
langgraph_sdk: 0.1.66
Optional packages not installed
langserve
Other Dependencies
aiohttp: 3.11.16
aiohttp<4.0.0,>=3.8.3: Installed. No version info available.
anthropic<1,>=0.49.0: Installed. No version info available.
async-timeout<5.0.0,>=4.0.0;: Installed. No version info available.
chromadb!=0.5.10,!=0.5.11,!=0.5.12,!=0.5.4,!=0.5.5,!=0.5.7,!=0.5.9,<0.7.0,>=0.4.0: Installed. No version info available.
dataclasses-json<0.7,>=0.5.7: Installed. No version info available.
httpx: 0.28.1
httpx-sse<1.0.0,>=0.4.0: Installed. No version info available.
huggingface-hub: 0.30.2
jsonpatch<2.0,>=1.33: Installed. No version info available.
langchain-anthropic;: Installed. No version info available.
langchain-aws;: Installed. No version info available.
langchain-azure-ai;: Installed. No version info available.
langchain-cohere;: Installed. No version info available.
langchain-community;: Installed. No version info available.
langchain-core<1.0.0,>=0.3.49: Installed. No version info available.
langchain-core<1.0.0,>=0.3.51: Installed. No version info available.
langchain-core<1.0.0,>=0.3.53: Installed. No version info available.
langchain-core>=0.3.52: Installed. No version info available.
langchain-deepseek;: Installed. No version info available.
langchain-fireworks;: Installed. No version info available.
langchain-google-genai;: Installed. No version info available.
langchain-google-vertexai;: Installed. No version info available.
langchain-groq;: Installed. No version info available.
langchain-huggingface;: Installed. No version info available.
langchain-mistralai;: Installed. No version info available.
langchain-ollama;: Installed. No version info available.
langchain-openai;: Installed. No version info available.
langchain-perplexity;: Installed. No version info available.
langchain-text-splitters<1.0.0,>=0.3.8: Installed. No version info available.
langchain-together;: Installed. No version info available.
langchain-xai;: Installed. No version info available.
langchain<1.0.0,>=0.3.23: Installed. No version info available.
langsmith-pyo3: Installed. No version info available.
langsmith<0.4,>=0.1.125: Installed. No version info available.
langsmith<0.4,>=0.1.17: Installed. No version info available.
mypy: 1.15.0
neo4j: 5.28.1
neo4j-graphrag: 1.6.1
numpy<3,>=1.26.2: Installed. No version info available.
numpy>=1.26.0;: Installed. No version info available.
numpy>=2.1.0;: Installed. No version info available.
openai-agents: Installed. No version info available.
openai<2.0.0,>=1.68.2: Installed. No version info available.
opentelemetry-api: 1.32.1
opentelemetry-exporter-otlp-proto-http: Installed. No version info available.
opentelemetry-sdk: 1.32.1
orjson: 3.10.16
packaging: 24.2
packaging<25,>=23.2: Installed. No version info available.
pydantic: 2.11.2
pydantic-settings<3.0.0,>=2.4.0: Installed. No version info available.
pydantic<3.0.0,>=2.5.2;: Installed. No version info available.
pydantic<3.0.0,>=2.7.4: Installed. No version info available.
pydantic<3.0.0,>=2.7.4;: Installed. No version info available.
pytest: Installed. No version info available.
PyYAML>=5.3: Installed. No version info available.
requests: 2.32.3
requests-toolbelt: 1.0.0
requests<3,>=2: Installed. No version info available.
rich: 13.9.4
sentence-transformers: 4.1.0
SQLAlchemy<3,>=1.4: Installed. No version info available.
tenacity!=8.4.0,<10,>=8.1.0: Installed. No version info available.
tenacity!=8.4.0,<10.0.0,>=8.1.0: Installed. No version info available.
tiktoken<1,>=0.7: Installed. No version info available.
tokenizers: 0.21.1
transformers: 4.51.3
typing-extensions>=4.7: Installed. No version info available.
zstandard: 0.23.0