We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 83089d0 commit 926e2b8Copy full SHA for 926e2b8
tests/kernels/test_cache.py
@@ -169,8 +169,8 @@ def test_reshape_and_cache(
169
cloned_value_cache[block_idx, :, :, block_offset] = value[i]
170
171
if kv_cache_dtype == "fp8_e5m2":
172
- assert torch.allclose(result_key_cache, cloned_key_cache, atol=0.01, rtol=0.1)
173
- assert torch.allclose(result_value_cache, cloned_value_cache, atol=0.01, rtol=0.1)
+ assert torch.allclose(result_key_cache, cloned_key_cache, atol=0.001, rtol=0.1)
+ assert torch.allclose(result_value_cache, cloned_value_cache, atol=0.001, rtol=0.1)
174
else:
175
assert torch.allclose(key_cache, cloned_key_cache)
176
assert torch.allclose(value_cache, cloned_value_cache)
@@ -279,4 +279,4 @@ def test_fp8_conversion(
279
converted_cache = torch.empty_like(cache)
280
cache_ops.convert_fp8(cache_fp8, converted_cache)
281
282
- assert torch.allclose(cache, converted_cache, atol=0.01, rtol=0.1)
+ assert torch.allclose(cache, converted_cache, atol=0.001, rtol=0.1)
0 commit comments