@@ -276,7 +276,7 @@ def copy_attr(a, b, include=(), exclude=()):
276
276
setattr (a , k , v )
277
277
278
278
279
- def smart_optimizer (model , name = 'Adam' , lr = 0.001 , momentum = 0.9 , weight_decay = 1e-5 ):
279
+ def smart_optimizer (model , name = 'Adam' , lr = 0.001 , momentum = 0.9 , decay = 1e-5 ):
280
280
# YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay
281
281
g = [], [], [] # optimizer parameter groups
282
282
bn = tuple (v for k , v in nn .__dict__ .items () if 'Norm' in k ) # normalization layers, i.e. BatchNorm2d()
@@ -299,10 +299,10 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e-
299
299
else :
300
300
raise NotImplementedError (f'Optimizer { name } not implemented.' )
301
301
302
- optimizer .add_param_group ({'params' : g [0 ], 'weight_decay' : weight_decay }) # add g0 with weight_decay
302
+ optimizer .add_param_group ({'params' : g [0 ], 'weight_decay' : decay }) # add g0 with weight_decay
303
303
optimizer .add_param_group ({'params' : g [1 ], 'weight_decay' : 0.0 }) # add g1 (BatchNorm2d weights)
304
- LOGGER .info (f"{ colorstr ('optimizer:' )} { type (optimizer ).__name__ } with parameter groups "
305
- f"{ len (g [1 ])} weight (no decay), { len (g [0 ])} weight, { len (g [2 ])} bias" )
304
+ LOGGER .info (f"{ colorstr ('optimizer:' )} { type (optimizer ).__name__ } (lr= { lr } ) with parameter groups "
305
+ f"{ len (g [1 ])} weight( decay=0.0 ), { len (g [0 ])} weight(decay= { decay } ) , { len (g [2 ])} bias" )
306
306
return optimizer
307
307
308
308
0 commit comments