Skip to content

Commit 27fb6fd

Browse files
zero-mAP fix 3 (#9058)
* zero-mAP fix 3 Signed-off-by: Glenn Jocher <[email protected]> * Update torch_utils.py Signed-off-by: Glenn Jocher <[email protected]> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update torch_utils.py Signed-off-by: Glenn Jocher <[email protected]> Signed-off-by: Glenn Jocher <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 841f312 commit 27fb6fd

File tree

1 file changed

+1
-2
lines changed

1 file changed

+1
-2
lines changed

utils/torch_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,6 @@ def __init__(self, model, decay=0.9999, tau=2000, updates=0):
412412
for p in self.ema.parameters():
413413
p.requires_grad_(False)
414414

415-
@smart_inference_mode()
416415
def update(self, model):
417416
# Update EMA parameters
418417
self.updates += 1
@@ -423,7 +422,7 @@ def update(self, model):
423422
if v.dtype.is_floating_point: # true for FP16 and FP32
424423
v *= d
425424
v += (1 - d) * msd[k].detach()
426-
assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32'
425+
assert v.dtype == msd[k].detach().dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must both be FP32'
427426

428427
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
429428
# Update EMA attributes

0 commit comments

Comments
 (0)