@@ -16,14 +16,18 @@ def test_sgd_optimizer(self):
1616 dtype = "float32" , shape = [10 , 8 ], lod_level = 0 , name = "mul.y" )
1717 mul_out = block .create_var (
1818 dtype = "float32" , shape = [5 , 8 ], lod_level = 0 , name = "mul.out" )
19+ mean_out = block .create_var (
20+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
1921 block .append_op (
2022 type = "mul" ,
2123 inputs = {"X" : mul_x ,
2224 "Y" : mul_y },
2325 outputs = {"Out" : mul_out },
2426 attrs = {"x_num_col_dims" : 1 })
27+ block .append_op (
28+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
2529 sgd_optimizer = optimizer .SGDOptimizer (learning_rate = 0.01 )
26- opts = sgd_optimizer .minimize (mul_out , init_program )
30+ opts = sgd_optimizer .minimize (mean_out , init_program )
2731 self .assertEqual (len (opts ), 1 )
2832 sgd_op = opts [0 ]
2933 self .assertEqual (sgd_op .type , "sgd" )
@@ -44,12 +48,16 @@ def test_sgd_optimizer_with_global_step(self):
4448 "Y" : mul_y },
4549 outputs = {"Out" : mul_out },
4650 attrs = {"x_num_col_dims" : 1 })
51+ mean_out = block .create_var (
52+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
53+ block .append_op (
54+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
4755 global_step = block .create_var (
4856 dtype = "float32" , shape = [1 ], lod_level = 0 , name = "step" )
4957 learning_rate = 0.01
5058 sgd_optimizer = optimizer .SGDOptimizer (
5159 learning_rate = learning_rate , global_step = global_step )
52- opts = sgd_optimizer .minimize (mul_out , init_program )
60+ opts = sgd_optimizer .minimize (mean_out , init_program )
5361 self .assertEqual (len (opts ), 2 )
5462 sgd_op = opts [0 ]
5563 self .assertEqual (sgd_op .type , "sgd" )
@@ -90,7 +98,11 @@ def test_vanilla_momentum_optimizer(self):
9098 learning_rate = 0.01
9199 momentum_optimizer = self .MockMomentum (
92100 learning_rate = learning_rate , momentum = 0.2 )
93- params_grads = append_backward_ops (mul_out )
101+ mean_out = block .create_var (
102+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
103+ block .append_op (
104+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
105+ params_grads = append_backward_ops (mean_out )
94106 self .assertEqual (len (params_grads ), 1 )
95107 self .assertEqual (len (momentum_optimizer .get_accumulators ()), 0 )
96108 opts = momentum_optimizer .create_optimization_pass (
@@ -132,10 +144,14 @@ def test_nesterov_momentum_optimizer(self):
132144 "Y" : mul_y },
133145 outputs = {"Out" : mul_out },
134146 attrs = {"x_num_col_dims" : 1 })
147+ mean_out = block .create_var (
148+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
149+ block .append_op (
150+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
135151 learning_rate = 0.01
136152 momentum_optimizer = self .MockMomentum (
137153 learning_rate = learning_rate , momentum = 0.2 , use_nesterov = True )
138- params_grads = append_backward_ops (mul_out )
154+ params_grads = append_backward_ops (mean_out )
139155 self .assertEqual (len (params_grads ), 1 )
140156 self .assertEqual (len (momentum_optimizer .get_accumulators ()), 0 )
141157 opts = momentum_optimizer .create_optimization_pass (
@@ -186,10 +202,14 @@ def test_adagrad_optimizer(self):
186202 "Y" : mul_y },
187203 outputs = {"Out" : mul_out },
188204 attrs = {"x_num_col_dims" : 1 })
205+ mean_out = block .create_var (
206+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
207+ block .append_op (
208+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
189209 learning_rate = 0.01
190210 adagrad_optimizer = self .MockAdagrad (
191211 learning_rate = learning_rate , epsilon = 1.0e-6 )
192- params_grads = append_backward_ops (mul_out )
212+ params_grads = append_backward_ops (mean_out )
193213 self .assertEqual (len (params_grads ), 1 )
194214 self .assertEqual (len (adagrad_optimizer .get_accumulators ()), 0 )
195215 opts = adagrad_optimizer .create_optimization_pass (params_grads , mul_out ,
@@ -242,10 +262,14 @@ def test_adam_optimizer(self):
242262 "Y" : mul_y },
243263 outputs = {"Out" : mul_out },
244264 attrs = {"x_num_col_dims" : 1 })
265+ mean_out = block .create_var (
266+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
267+ block .append_op (
268+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
245269 learning_rate = 0.01
246270 adam_optimizer = self .MockAdam (
247271 learning_rate = learning_rate , beta1 = 0.9 , beta2 = 0.999 )
248- params_grads = append_backward_ops (mul_out )
272+ params_grads = append_backward_ops (mean_out )
249273 self .assertEqual (len (params_grads ), 1 )
250274 self .assertEqual (len (adam_optimizer .get_accumulators ()), 0 )
251275 opts = adam_optimizer .create_optimization_pass (params_grads , mul_out ,
@@ -300,10 +324,14 @@ def test_adamax_optimizer(self):
300324 "Y" : mul_y },
301325 outputs = {"Out" : mul_out },
302326 attrs = {"x_num_col_dims" : 1 })
327+ mean_out = block .create_var (
328+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
329+ block .append_op (
330+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
303331 learning_rate = 0.01
304332 adamax_optimizer = self .MockAdamax (
305333 learning_rate = learning_rate , beta1 = 0.9 , beta2 = 0.999 )
306- params_grads = append_backward_ops (mul_out )
334+ params_grads = append_backward_ops (mean_out )
307335 self .assertEqual (len (params_grads ), 1 )
308336 self .assertEqual (len (adamax_optimizer .get_accumulators ()), 0 )
309337 opts = adamax_optimizer .create_optimization_pass (params_grads , mul_out ,
@@ -355,10 +383,14 @@ def test_decayed_adagrad_optimizer(self):
355383 "Y" : mul_y },
356384 outputs = {"Out" : mul_out },
357385 attrs = {"x_num_col_dims" : 1 })
386+ mean_out = block .create_var (
387+ dtype = "float32" , shape = [1 ], lod_level = 0 , name = "mean.out" )
388+ block .append_op (
389+ type = "mean" , inputs = {"X" : mul_out }, outputs = {"Out" : mean_out })
358390 learning_rate = 0.01
359391 decayed_adagrad_optimizer = self .MockDecayedAdagrad (
360392 learning_rate = learning_rate , decay = 0.95 , epsilon = 1.0e-6 )
361- params_grads = append_backward_ops (mul_out )
393+ params_grads = append_backward_ops (mean_out )
362394 self .assertEqual (len (params_grads ), 1 )
363395 self .assertEqual (len (decayed_adagrad_optimizer .get_accumulators ()), 0 )
364396 opts = decayed_adagrad_optimizer .create_optimization_pass (
0 commit comments