Skip to content

Commit b33104d

Browse files
authored
release: 0.3.28 (#266)
1 parent 55f8150 commit b33104d

34 files changed

+228
-186
lines changed

libs/community/Makefile

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,6 @@ format format_diff:
5959
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff format $(PYTHON_FILES)
6060
[ "$(PYTHON_FILES)" = "" ] || uv run --group typing --group lint ruff check --select I --fix $(PYTHON_FILES)
6161

62-
spell_check:
63-
uv run --group typing --group lint codespell --toml pyproject.toml
64-
65-
spell_fix:
66-
uv run --group typing --group lint codespell --toml pyproject.toml -w
67-
6862
######################
6963
# HELP
7064
######################

libs/community/langchain_community/cache.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,7 @@ def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
485485
# Read from a Redis HASH
486486
try:
487487
results = self.redis.hgetall(self._key(prompt, llm_string))
488-
return self._get_generations(results) # type: ignore[arg-type]
488+
return self._get_generations(results)
489489
except Exception as e:
490490
logger.error(f"Redis lookup failed: {e}")
491491
return None
@@ -558,7 +558,7 @@ async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYP
558558
"""Look up based on prompt and llm_string. Async version."""
559559
try:
560560
results = await self.redis.hgetall(self._key(prompt, llm_string))
561-
return self._get_generations(results) # type: ignore[arg-type]
561+
return self._get_generations(results)
562562
except Exception as e:
563563
logger.error(f"Redis async lookup failed: {e}")
564564
return None

libs/community/langchain_community/chains/pebblo_retrieval/utilities.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def get_runtime() -> Tuple[Framework, Runtime]:
5050
"""
5151
runtime_env = get_runtime_environment()
5252
framework = Framework(
53-
name="langchain", version=runtime_env.get("library_version", None)
53+
name="langchain", version=runtime_env.get("library_version", "unknown")
5454
)
5555
uname = platform.uname()
5656
runtime = Runtime(

libs/community/langchain_community/chat_loaders/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def merge_chat_runs_in_session(
2626
text = ""
2727
for content in message.content:
2828
if isinstance(content, dict):
29-
text += content.get("text", None)
29+
text += content.get("text", "") or ""
3030
else:
3131
text += content
3232
message.content = text

libs/community/langchain_community/chat_models/azureml_endpoint.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,7 @@ def _stream(
311311
]
312312
params = {"stream": True, "stop": stop, "model": None, **kwargs}
313313

314-
default_chunk_class = AIMessageChunk
314+
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
315315
for chunk in client.chat.completions.create(messages=message_dicts, **params):
316316
if not isinstance(chunk, dict):
317317
chunk = chunk.dict()
@@ -366,7 +366,7 @@ async def _astream(
366366
]
367367
params = {"stream": True, "stop": stop, "model": None, **kwargs}
368368

369-
default_chunk_class = AIMessageChunk
369+
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
370370
async for chunk in await async_client.chat.completions.create(
371371
messages=message_dicts,
372372
**params,

libs/community/langchain_community/chat_models/baichuan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -473,7 +473,7 @@ def _stream(
473473
res = self._chat(messages, stream=True, **kwargs)
474474
if res.status_code != 200:
475475
raise ValueError(f"Error from Baichuan api response: {res}")
476-
default_chunk_class = AIMessageChunk
476+
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
477477
for chunk in res.iter_lines():
478478
chunk = chunk.decode("utf-8").strip("\r\n")
479479
parts = chunk.split("data: ", 1)
@@ -490,7 +490,7 @@ def _stream(
490490
default_chunk_class = chunk.__class__
491491
cg_chunk = ChatGenerationChunk(message=chunk)
492492
if run_manager:
493-
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
493+
run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
494494
yield cg_chunk
495495

496496
async def _agenerate(

libs/community/langchain_community/chat_models/coze.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ def _stream(
192192
chunk = _convert_delta_to_message_chunk(response["message"])
193193
cg_chunk = ChatGenerationChunk(message=chunk)
194194
if run_manager:
195-
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
195+
run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
196196
yield cg_chunk
197197

198198
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:

libs/community/langchain_community/chat_models/fireworks.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ def _stream(
208208
**kwargs: Any,
209209
) -> Iterator[ChatGenerationChunk]:
210210
message_dicts = self._create_message_dicts(messages)
211-
default_chunk_class = AIMessageChunk
211+
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
212212
params = {
213213
"model": self.model,
214214
"messages": message_dicts,
@@ -241,7 +241,7 @@ async def _astream(
241241
**kwargs: Any,
242242
) -> AsyncIterator[ChatGenerationChunk]:
243243
message_dicts = self._create_message_dicts(messages)
244-
default_chunk_class = AIMessageChunk
244+
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
245245
params = {
246246
"model": self.model,
247247
"messages": message_dicts,
@@ -263,7 +263,7 @@ async def _astream(
263263
message=chunk, generation_info=generation_info
264264
)
265265
if run_manager:
266-
await run_manager.on_llm_new_token(token=chunk.text, chunk=cg_chunk)
266+
await run_manager.on_llm_new_token(token=cg_chunk.text, chunk=cg_chunk)
267267
yield cg_chunk
268268

269269

libs/community/langchain_community/chat_models/gpt_router.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ def _stream(
334334

335335
if run_manager:
336336
run_manager.on_llm_new_token(
337-
token=chunk.message.content, chunk=chunk.message
337+
token=str(chunk.message.content), chunk=chunk
338338
)
339339

340340
yield chunk
@@ -367,7 +367,7 @@ async def _astream(
367367

368368
if run_manager:
369369
await run_manager.on_llm_new_token(
370-
token=chunk.message.content, chunk=chunk.message
370+
token=str(chunk.message.content), chunk=chunk
371371
)
372372

373373
yield chunk

libs/community/langchain_community/chat_models/hunyuan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def _stream(
228228
) -> Iterator[ChatGenerationChunk]:
229229
res = self._chat(messages, **kwargs)
230230

231-
default_chunk_class = AIMessageChunk
231+
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
232232
for chunk in res:
233233
chunk = chunk.get("data", "")
234234
if len(chunk) == 0:
@@ -245,7 +245,7 @@ def _stream(
245245
default_chunk_class = chunk.__class__
246246
cg_chunk = ChatGenerationChunk(message=chunk)
247247
if run_manager:
248-
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
248+
run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
249249
yield cg_chunk
250250

251251
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> Any:

0 commit comments

Comments
 (0)