@@ -276,7 +276,7 @@ def copy_attr(a, b, include=(), exclude=()):
276276 setattr (a , k , v )
277277
278278
279- def smart_optimizer (model , name = 'Adam' , lr = 0.001 , momentum = 0.9 , weight_decay = 1e-5 ):
279+ def smart_optimizer (model , name = 'Adam' , lr = 0.001 , momentum = 0.9 , decay = 1e-5 ):
280280 # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay
281281 g = [], [], [] # optimizer parameter groups
282282 bn = tuple (v for k , v in nn .__dict__ .items () if 'Norm' in k ) # normalization layers, i.e. BatchNorm2d()
@@ -299,10 +299,10 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e-
299299 else :
300300 raise NotImplementedError (f'Optimizer { name } not implemented.' )
301301
302- optimizer .add_param_group ({'params' : g [0 ], 'weight_decay' : weight_decay }) # add g0 with weight_decay
302+ optimizer .add_param_group ({'params' : g [0 ], 'weight_decay' : decay }) # add g0 with weight_decay
303303 optimizer .add_param_group ({'params' : g [1 ], 'weight_decay' : 0.0 }) # add g1 (BatchNorm2d weights)
304- LOGGER .info (f"{ colorstr ('optimizer:' )} { type (optimizer ).__name__ } with parameter groups "
305- f"{ len (g [1 ])} weight (no decay), { len (g [0 ])} weight, { len (g [2 ])} bias" )
304+ LOGGER .info (f"{ colorstr ('optimizer:' )} { type (optimizer ).__name__ } (lr= { lr } ) with parameter groups "
305+ f"{ len (g [1 ])} weight( decay=0.0 ), { len (g [0 ])} weight(decay= { decay } ) , { len (g [2 ])} bias" )
306306 return optimizer
307307
308308
0 commit comments