@@ -31,6 +31,37 @@ def get_backward_op(scope, op, no_grad_set):
3131 return backward_op
3232
3333
34+ def _reference_testing (x , scale , offset , mean , var , epsilon , data_format ):
35+ x_shape = x .shape
36+ if len (x_shape ) == 2 :
37+ if data_format == "NCHW" :
38+ x = np .reshape (x , (x .shape [0 ], x .shape [1 ], 1 , 1 ))
39+ else :
40+ x = np .reshape (x , (x .shape [0 ], 1 , 1 , x .shape [1 ]))
41+
42+ if data_format == "NCHW" :
43+ n , c , h , w = x .shape
44+ mean_tile = np .reshape (mean , (1 , c , 1 , 1 ))
45+ mean_tile = np .tile (mean_tile , (n , 1 , h , w ))
46+ var_tile = np .reshape (var , (1 , c , 1 , 1 ))
47+ var_tile = np .tile (var_tile , (n , 1 , h , w ))
48+ normalized = (x - mean_tile ) / np .sqrt (var_tile + epsilon )
49+ scale_tile = np .reshape (scale , (1 , c , 1 , 1 ))
50+ scale_tile = np .tile (scale_tile , (n , 1 , h , w ))
51+ offset_tile = np .reshape (offset , (1 , c , 1 , 1 ))
52+ offset_tile = np .reshape (offset_tile , (1 , c , 1 , 1 ))
53+ y = normalized * scale_tile + offset_tile
54+ elif data_format == "NHWC" :
55+ normalized = (x - mean ) / np .sqrt (var + epsilon )
56+ y = normalized * scale + offset
57+ else :
58+ raise ValueError ("Unknown data order." )
59+
60+ if len (x_shape ) == 2 :
61+ y = np .reshape (y , x_shape )
62+ return y
63+
64+
3465def _reference_training (x , scale , offset , epsilon , data_format ):
3566 x_shape = x .shape
3667 if len (x_shape ) == 2 :
@@ -155,11 +186,159 @@ def __set_tensor__(name, data=None):
155186 __set_tensor__ (output , data )
156187
157188
158- class TestBatchNormOp (OpTest ):
189+ class TestBatchNormOpInference (OpTest ):
190+ def setUp (self ):
191+ self .dtype = np .float32
192+
159193 def __assert_close (self , tensor , np_array , msg , atol = 1e-4 ):
160194 self .assertTrue (np .allclose (np .array (tensor ), np_array , atol = atol ), msg )
161195
162- def test_python (self ):
196+ def check_with_place (self , place , data_layout , dtype , shape ):
197+ epsilon = 0.00001
198+ if len (shape ) == 2 :
199+ x_shape = shape
200+ c = x_shape [1 ]
201+ else :
202+ n , h , w , c = shape [0 ], shape [1 ], shape [2 ], shape [3 ]
203+ if data_layout == "NHWC" :
204+ x_shape = [n , h , w , c ]
205+ elif data_layout == "NCHW" :
206+ x_shape = [n , c , h , w ]
207+ else :
208+ raise ValueError ("Unknown data layout." )
209+ scale_shape = [c ]
210+
211+ x_val = np .random .random_sample (x_shape ).astype (dtype )
212+ scale_val = np .random .random_sample (scale_shape ).astype (np .float32 )
213+ bias_val = np .random .random_sample (scale_shape ).astype (np .float32 )
214+
215+ mean = np .zeros (scale_shape ).astype (np .float32 )
216+ variance = np .ones (scale_shape ).astype (np .float32 )
217+
218+ y_out = _reference_testing (x_val , scale_val , bias_val , mean , variance ,
219+ epsilon , data_layout ).astype (dtype )
220+
221+ scope = core .Scope ()
222+
223+ # create input
224+ x_tensor = create_or_get_tensor (scope , "x_val" ,
225+ OpTest .np_dtype_to_fluid_dtype (x_val ),
226+ place )
227+ scale_tensor = create_or_get_tensor (
228+ scope , "scale_val" ,
229+ OpTest .np_dtype_to_fluid_dtype (scale_val ), place )
230+ bias_tensor = create_or_get_tensor (
231+ scope , "bias_val" , OpTest .np_dtype_to_fluid_dtype (bias_val ), place )
232+ mean_tensor = create_or_get_tensor (scope , "mean" ,
233+ OpTest .np_dtype_to_fluid_dtype (mean ),
234+ place )
235+ variance_tensor = create_or_get_tensor (
236+ scope , "variance" , OpTest .np_dtype_to_fluid_dtype (variance ), place )
237+
238+ # create output
239+ y_tensor = create_or_get_tensor (scope , "y_out" , None , place )
240+ saved_mean_tensor = create_or_get_tensor (scope , "saved_mean" , None ,
241+ place )
242+ saved_variance_tensor = create_or_get_tensor (scope , "saved_variance" ,
243+ None , place )
244+ mean_out_tensor = mean_tensor
245+ variance_out_tensor = variance_tensor
246+
247+ batch_norm_op = Operator (
248+ "batch_norm" ,
249+ # inputs
250+ X = "x_val" ,
251+ Scale = "scale_val" ,
252+ Bias = "bias_val" ,
253+ Mean = "mean" ,
254+ Variance = "variance" ,
255+ # outputs
256+ Y = "y_out" ,
257+ MeanOut = "mean" ,
258+ VarianceOut = "variance" ,
259+ SavedMean = "saved_mean" ,
260+ SavedVariance = "saved_variance" ,
261+ # attrs
262+ is_test = True ,
263+ data_layout = data_layout ,
264+ epsilon = epsilon )
265+
266+ batch_norm_op .run (scope , place )
267+
268+ # check inference result
269+ self .__assert_close (
270+ y_tensor ,
271+ y_out ,
272+ "inference output are different at " + str (place ) + ", " +
273+ data_layout + ", " + str (np .dtype (dtype )) +
274+ str (np .array (y_tensor )) + str (y_out ),
275+ atol = 1e-3 )
276+
277+ def test_check_output (self ):
278+ places = [core .CPUPlace ()]
279+ if core .is_compiled_with_cuda () and core .op_support_gpu ("batch_norm" ):
280+ places .append (core .CUDAPlace (0 ))
281+
282+ for place in places :
283+ for data_format in ["NCHW" , "NHWC" ]:
284+ self .check_with_place (place , data_format , self .dtype ,
285+ [2 , 3 , 4 , 5 ])
286+ self .check_with_place (place , data_format , self .dtype , [2 , 3 ])
287+
288+
289+ class TestFP16BatchNormOpInference (TestBatchNormOpInference ):
290+ def setUp (self ):
291+ self .dtype = np .float16
292+
293+ def test_check_output (self ):
294+ places = []
295+ if core .is_compiled_with_cuda () and core .op_support_gpu ("batch_norm" ):
296+ place = core .CUDAPlace (0 )
297+ if core .is_float16_supported (place ):
298+ places .append (place )
299+
300+ for place in places :
301+ for data_format in ["NCHW" , "NHWC" ]:
302+ self .check_with_place (place , data_format , self .dtype ,
303+ [2 , 3 , 4 , 5 ])
304+ self .check_with_place (place , data_format , self .dtype , [2 , 3 ])
305+
306+
307+ class TestBatchNormOpTraining (OpTest ):
308+ def __assert_close (self , tensor , np_array , msg , atol = 1e-4 ):
309+ self .assertTrue (np .allclose (np .array (tensor ), np_array , atol = atol ), msg )
310+
311+ def test_python_testing (self ):
312+ data_format = "NHWC"
313+ epsilon = 0.00001
314+
315+ n , h , w , c = 2 , 3 , 4 , 5
316+ x_shape = [n , h , w , c ]
317+ scale_shape = [c ]
318+
319+ x_val = np .random .random_sample (x_shape ).astype (np .float32 )
320+ scale_val = np .random .random_sample (scale_shape ).astype (np .float32 )
321+ bias_val = np .random .random_sample (scale_shape ).astype (np .float32 )
322+
323+ mean = np .zeros (scale_shape ).astype (np .float32 )
324+ variance = np .ones (scale_shape ).astype (np .float32 )
325+
326+ y_out = _reference_testing (x_val , scale_val , bias_val , mean , variance ,
327+ epsilon , "NHWC" )
328+
329+ # running N, C, H, W case
330+ # should produce the same results
331+ x_shape2 = [n , c , h , w ]
332+ x_val2 = np .transpose (x_val , (0 , 3 , 1 , 2 ))
333+ y_out2 = _reference_testing (x_val2 , scale_val , bias_val , mean , variance ,
334+ epsilon , "NCHW" )
335+
336+ # transfer (N, C, H, W) back to (N, H, W, C)
337+ y_out2_trans = np .transpose (y_out2 , (0 , 2 , 3 , 1 ))
338+ self .__assert_close (y_out , y_out2_trans , "inference output" )
339+ print 'python: NHWC, NCHW, inference checking passed'
340+
341+ def test_python_training (self ):
163342 data_format = "NHWC"
164343 epsilon = 0.00001
165344 momentum = 0.9
@@ -197,7 +376,7 @@ def test_python(self):
197376
198377 # transfer (N, C, H, W) back to (N, H, W, C)
199378 y_out2_trans = np .transpose (y_out2 , (0 , 2 , 3 , 1 ))
200- self .__assert_close (y_out , y_out2_trans , "batch variance " )
379+ self .__assert_close (y_out , y_out2_trans , "batch output " )
201380 print 'python: NHWC, NCHW, forward checking passed'
202381
203382 # test backward now
0 commit comments