Skip to content

Commit 0679678

Browse files
authored
add norm 2.0 api, test=develop (#26465)
* add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop * add norm 2.0 api, test=develop
1 parent a8b5741 commit 0679678

File tree

8 files changed

+1583
-13
lines changed

8 files changed

+1583
-13
lines changed
Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import os
16+
import unittest
17+
import numpy as np
18+
import paddle.fluid.core as core
19+
from paddle.fluid.op import Operator
20+
import paddle.fluid as fluid
21+
from op_test import OpTest, _set_use_system_allocator
22+
from paddle.fluid.framework import grad_var_name
23+
import paddle.fluid as fluid
24+
from paddle.fluid import Program, program_guard
25+
import paddle
26+
27+
28+
class TestBatchNorm(unittest.TestCase):
29+
def test_name(self):
30+
places = [fluid.CPUPlace()]
31+
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
32+
places.append(fluid.CUDAPlace(0))
33+
for p in places:
34+
with fluid.dygraph.guard(p):
35+
batch_norm1d = paddle.nn.BatchNorm1d(1, name="test")
36+
37+
def test_error(self):
38+
places = [fluid.CPUPlace()]
39+
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
40+
places.append(fluid.CUDAPlace(0))
41+
for p in places:
42+
#paddle.disable_static()
43+
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
44+
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
45+
46+
def error1d():
47+
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
48+
batch_norm1d = paddle.nn.BatchNorm1d(1)
49+
batch_norm1d(fluid.dygraph.to_variable(x_data_4))
50+
51+
def error2d():
52+
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
53+
batch_norm2d = paddle.nn.BatchNorm2d(1)
54+
batch_norm2d(fluid.dygraph.to_variable(x_data_3))
55+
56+
def error3d():
57+
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
58+
batch_norm3d = paddle.nn.BatchNorm3d(1)
59+
batch_norm3d(fluid.dygraph.to_variable(x_data_4))
60+
61+
with fluid.dygraph.guard(p):
62+
self.assertRaises(ValueError, error1d)
63+
self.assertRaises(ValueError, error2d)
64+
self.assertRaises(ValueError, error3d)
65+
66+
def test_dygraph(self):
67+
places = [fluid.CPUPlace()]
68+
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
69+
places.append(fluid.CUDAPlace(0))
70+
for p in places:
71+
shape = [4, 10, 4, 4]
72+
73+
def compute_v1(x, is_test, trainable_statistics):
74+
with fluid.dygraph.guard(p):
75+
bn = fluid.dygraph.BatchNorm(
76+
shape[1],
77+
is_test=is_test,
78+
trainable_statistics=trainable_statistics)
79+
y = bn(fluid.dygraph.to_variable(x))
80+
return y.numpy()
81+
82+
def compute_v2(x):
83+
with fluid.dygraph.guard(p):
84+
bn = paddle.nn.BatchNorm2d(shape[1])
85+
y = bn(fluid.dygraph.to_variable(x))
86+
return y.numpy()
87+
88+
x = np.random.randn(*shape).astype("float32")
89+
y1 = compute_v1(x, False, False)
90+
y2 = compute_v2(x)
91+
self.assertTrue(np.allclose(y1, y2))
92+
93+
def test_static(self):
94+
places = [fluid.CPUPlace()]
95+
if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
96+
places.append(fluid.CUDAPlace(0))
97+
for p in places:
98+
exe = fluid.Executor(p)
99+
shape = [4, 10, 16, 16]
100+
101+
def compute_v1(x_np, is_test, trainable_statistics):
102+
with program_guard(Program(), Program()):
103+
bn = fluid.dygraph.BatchNorm(
104+
shape[1],
105+
is_test=is_test,
106+
trainable_statistics=trainable_statistics)
107+
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
108+
y = bn(x)
109+
exe.run(fluid.default_startup_program())
110+
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
111+
return r
112+
113+
def compute_v2(x_np):
114+
with program_guard(Program(), Program()):
115+
bn = paddle.nn.BatchNorm2d(shape[1])
116+
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
117+
y = bn(x)
118+
exe.run(fluid.default_startup_program())
119+
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
120+
return r
121+
122+
x = np.random.randn(*shape).astype("float32")
123+
y1 = compute_v1(x, False, False)
124+
y2 = compute_v2(x)
125+
self.assertTrue(np.allclose(y1, y2))
126+
127+
128+
if __name__ == '__main__':
129+
unittest.main()
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import os
16+
import unittest
17+
import numpy as np
18+
import paddle.fluid.core as core
19+
from paddle.fluid.op import Operator
20+
import paddle.fluid as fluid
21+
from op_test import OpTest, _set_use_system_allocator
22+
from paddle.fluid.framework import grad_var_name
23+
import paddle.fluid as fluid
24+
from paddle.fluid import Program, program_guard
25+
import paddle
26+
27+
28+
class TestDygraphGroupNormv2(unittest.TestCase):
29+
def test_dygraph(self):
30+
places = [fluid.CPUPlace()]
31+
if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
32+
places.append(fluid.CUDAPlace(0))
33+
for p in places:
34+
shape = [2, 6, 2, 2]
35+
36+
def compute_v1(x):
37+
with fluid.dygraph.guard(p):
38+
gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
39+
y = gn(fluid.dygraph.to_variable(x))
40+
return y.numpy()
41+
42+
def compute_v2(x):
43+
with fluid.dygraph.guard(p):
44+
gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
45+
y = gn(fluid.dygraph.to_variable(x))
46+
return y.numpy()
47+
48+
x = np.random.randn(*shape).astype("float32")
49+
y1 = compute_v1(x)
50+
y2 = compute_v2(x)
51+
self.assertTrue(np.allclose(y1, y2))
52+
53+
def test_static(self):
54+
places = [fluid.CPUPlace()]
55+
if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"):
56+
places.append(fluid.CUDAPlace(0))
57+
for p in places:
58+
exe = fluid.Executor(p)
59+
shape = [2, 6, 2, 2]
60+
61+
def compute_v1(x_np):
62+
with program_guard(Program(), Program()):
63+
gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
64+
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
65+
y = gn(x)
66+
exe.run(fluid.default_startup_program())
67+
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
68+
return r
69+
70+
def compute_v2(x_np):
71+
with program_guard(Program(), Program()):
72+
gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
73+
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
74+
y = gn(x)
75+
exe.run(fluid.default_startup_program())
76+
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
77+
return r
78+
79+
x = np.random.randn(*shape).astype("float32")
80+
y1 = compute_v1(x)
81+
y2 = compute_v2(x)
82+
self.assertTrue(np.allclose(y1, y2))
83+
84+
85+
if __name__ == '__main__':
86+
unittest.main()
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import os
16+
import unittest
17+
import numpy as np
18+
import paddle.fluid.core as core
19+
from paddle.fluid.op import Operator
20+
import paddle.fluid as fluid
21+
from op_test import OpTest, _set_use_system_allocator
22+
from paddle.fluid.framework import grad_var_name
23+
import paddle.fluid as fluid
24+
from paddle.fluid import Program, program_guard
25+
import paddle
26+
27+
28+
class TestInstanceNorm(unittest.TestCase):
29+
def test_error(self):
30+
places = [fluid.CPUPlace()]
31+
if core.is_compiled_with_cuda() and core.op_support_gpu(
32+
"instance_norm"):
33+
places.append(fluid.CUDAPlace(0))
34+
for p in places:
35+
36+
def error1d():
37+
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
38+
instance_norm1d = paddle.nn.InstanceNorm1d(1)
39+
instance_norm1d(fluid.dygraph.to_variable(x_data_4))
40+
41+
def error2d():
42+
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
43+
instance_norm2d = paddle.nn.InstanceNorm2d(1)
44+
instance_norm2d(fluid.dygraph.to_variable(x_data_3))
45+
46+
def error3d():
47+
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
48+
instance_norm3d = paddle.nn.BatchNorm3d(1)
49+
instance_norm3d(fluid.dygraph.to_variable(x_data_4))
50+
51+
with fluid.dygraph.guard(p):
52+
self.assertRaises(ValueError, error1d)
53+
self.assertRaises(ValueError, error2d)
54+
self.assertRaises(ValueError, error3d)
55+
56+
def test_dygraph(self):
57+
places = [fluid.CPUPlace()]
58+
if core.is_compiled_with_cuda() and core.op_support_gpu(
59+
"instance_norm"):
60+
places.append(fluid.CUDAPlace(0))
61+
for p in places:
62+
shape = [4, 10, 4, 4]
63+
64+
def compute_v1(x):
65+
with fluid.dygraph.guard(p):
66+
bn = fluid.dygraph.InstanceNorm(shape[1])
67+
y = bn(fluid.dygraph.to_variable(x))
68+
return y.numpy()
69+
70+
def compute_v2(x):
71+
with fluid.dygraph.guard(p):
72+
bn = paddle.nn.InstanceNorm2d(shape[1])
73+
y = bn(fluid.dygraph.to_variable(x))
74+
return y.numpy()
75+
76+
x = np.random.randn(*shape).astype("float32")
77+
y1 = compute_v1(x)
78+
y2 = compute_v2(x)
79+
self.assertTrue(np.allclose(y1, y2))
80+
81+
def test_static(self):
82+
places = [fluid.CPUPlace()]
83+
if core.is_compiled_with_cuda() and core.op_support_gpu(
84+
"instance_norm"):
85+
places.append(fluid.CUDAPlace(0))
86+
for p in places:
87+
exe = fluid.Executor(p)
88+
shape = [4, 10, 16, 16]
89+
90+
def compute_v1(x_np):
91+
with program_guard(Program(), Program()):
92+
ins = fluid.dygraph.InstanceNorm(shape[1])
93+
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
94+
y = ins(x)
95+
exe.run(fluid.default_startup_program())
96+
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
97+
return r
98+
99+
def compute_v2(x_np):
100+
with program_guard(Program(), Program()):
101+
ins = paddle.nn.InstanceNorm2d(shape[1])
102+
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
103+
y = ins(x)
104+
exe.run(fluid.default_startup_program())
105+
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
106+
return r
107+
108+
x = np.random.randn(*shape).astype("float32")
109+
y1 = compute_v1(x)
110+
y2 = compute_v2(x)
111+
self.assertTrue(np.allclose(y1, y2))
112+
113+
114+
if __name__ == '__main__':
115+
unittest.main()

0 commit comments

Comments
 (0)