Skip to content

Commit e26ae54

Browse files
chore(deps): lock file maintenance (#379)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: James Braza <[email protected]>
1 parent 7ea4c28 commit e26ae54

File tree

5 files changed

+413
-314
lines changed

5 files changed

+413
-314
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ dev = [
4848
"ipython>=8", # Pin to keep recent
4949
"ldp[monitor,nn,rich,server,typing,visualization]",
5050
"litellm", # Version ranges are enforced by lmi
51-
"mypy>=1.8", # Pin for mutable-override
51+
"mypy>=1.19", # Pin for zip default detection
5252
"prek",
5353
"pydantic~=2.9", # Pydantic 2.9 changed JSON schema exports 'allOf', so ensure tests match
5454
"pylint-pydantic",

src/ldp/graph/ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -558,7 +558,7 @@ async def __call__(self, *args, **kwargs) -> OpResult[TOutput_co]:
558558
# Map positional arguments to keyword arguments to make backward pass easier
559559
for i_arg, (arg, param) in enumerate(
560560
# strict=False b/c not all params in _fwd_args will be in args (i.e. defaults and **kwargs)
561-
zip(args, self._fwd_args, strict=False)
561+
zip(args, self._fwd_args, strict=False) # noqa: FURB120
562562
):
563563
# Don't need to check for too many args or collisions with kwargs, since forward()
564564
# will raise an exception anyway

src/ldp/graph/torch_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def store_tensor_inputs(
126126
# See Op.__call__ for some notes on what this is doing.
127127
for i_arg, (arg, param) in enumerate(
128128
# strict=False b/c not all params in _fwd_args will be in args (i.e. defaults and **kwargs)
129-
zip(tensor_args, fwd_args, strict=False)
129+
zip(tensor_args, fwd_args, strict=False) # noqa: FURB120
130130
):
131131
if param.kind == inspect.Parameter.VAR_POSITIONAL:
132132
ctx_args = list(tensor_args[i_arg:])

src/ldp/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def discounted_returns(
7878
"""
7979
returns = []
8080
r = 0.0
81-
for reward, term in zip(reversed(rewards), reversed(terminated), strict=False):
81+
for reward, term in zip(reversed(rewards), reversed(terminated), strict=True):
8282
# 1 - term is 0 if the episode has terminated
8383
r = reward + discount * r * (1 - term)
8484
returns.append(r)

0 commit comments

Comments
 (0)