@@ -78,11 +78,15 @@ enum DataType {
7878 repeated bytes content = 2 ;
7979}
8080
81+ message LrPolicyState {
82+ // learninRate Policy
83+ optional double learning_rate = 1 [default = 1.0 ];
84+ optional double lr_decay_a = 2 ;
85+ optional double lr_decay_b = 3 ;
86+ }
87+
8188message SGDOptimizerState {
82- // learning rate policy
83- optional double learning_rate = 101 ;
84- optional double lr_decay_a = 102 ;
85- optional double lr_decay_b = 103 ;
89+ optional LrPolicyState lr_state = 101 ;
8690 optional double num_sample_passed = 104 ;
8791 // state
8892 optional TensorProto parameter = 1 ;
@@ -91,9 +95,7 @@ message SGDOptimizerState {
9195
9296message AdadeltaOptimizerState {
9397 // learning rate policy
94- optional double learning_rate = 101 ;
95- optional double lr_decay_a = 102 ;
96- optional double lr_decay_b = 103 ;
98+ optional LrPolicyState lr_state = 101 ;
9799 optional double num_sample_passed = 104 ;
98100 // state
99101 optional TensorProto parameter = 1 ;
@@ -102,22 +104,17 @@ message AdadeltaOptimizerState {
102104 optional TensorProto update_delta = 4 ;
103105}
104106
107+
105108message AdagradOptimizerState {
106- // learning rate policy
107- optional double learning_rate = 101 ;
108- optional double lr_decay_a = 102 ;
109- optional double lr_decay_b = 103 ;
109+ optional LrPolicyState lr_state = 101 ;
110110 optional double num_sample_passed = 104 ;
111111 // state
112112 optional TensorProto parameter = 1 ;
113113 optional TensorProto accum_gradient = 2 ;
114114}
115115
116116message AdamOptimizerState {
117- // learning rate policy
118- optional double learning_rate = 101 ;
119- optional double lr_decay_a = 102 ;
120- optional double lr_decay_b = 103 ;
117+ optional LrPolicyState lr_state = 101 ;
121118 optional double num_sample_passed = 104 ;
122119 // state
123120 optional TensorProto parameter = 1 ;
0 commit comments