Skip to content

Commit edf5f31

Browse files
authored
[2.0 API] add paddle.nn.functional.linear and fix paddle.nn.Linear (#26480)
1 parent 2f75465 commit edf5f31

File tree

13 files changed

+275
-32
lines changed

13 files changed

+275
-32
lines changed

python/paddle/fluid/tests/unittests/test_adamax_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def test_adamax_api_dygraph(self):
2626
paddle.disable_static()
2727
value = np.arange(26).reshape(2, 13).astype("float32")
2828
a = paddle.to_variable(value)
29-
linear = paddle.nn.Linear(13, 5, dtype="float32")
29+
linear = paddle.nn.Linear(13, 5)
3030
adam = paddle.optimizer.Adamax(
3131
learning_rate=0.01,
3232
parameters=linear.parameters(),

python/paddle/fluid/tests/unittests/test_adamw_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def test_adamw_op_dygraph(self):
2323
paddle.disable_static()
2424
value = np.arange(26).reshape(2, 13).astype("float32")
2525
a = paddle.to_variable(value)
26-
linear = paddle.nn.Linear(13, 5, dtype="float32")
26+
linear = paddle.nn.Linear(13, 5)
2727
adam = paddle.optimizer.AdamW(
2828
learning_rate=0.01,
2929
parameters=linear.parameters(),
@@ -38,7 +38,7 @@ def test_adamw_op_coverage(self):
3838
paddle.disable_static()
3939
value = np.arange(26).reshape(2, 13).astype("float32")
4040
a = paddle.to_variable(value)
41-
linear = paddle.nn.Linear(13, 5, dtype="float32")
41+
linear = paddle.nn.Linear(13, 5)
4242
adam = paddle.optimizer.AdamW(
4343
learning_rate=0.0,
4444
parameters=linear.parameters(),

python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,8 @@ def __init__(self, num_classes=10, classifier_activation='softmax'):
4040
if num_classes > 0:
4141
self.fc = nn.Sequential(
4242
nn.Linear(400, 120),
43-
nn.Linear(120, 84),
44-
nn.Linear(
45-
84, 10, act=classifier_activation))
43+
nn.Linear(120, 84), nn.Linear(84, 10),
44+
nn.Softmax()) #Todo: accept any activation
4645

4746
def forward(self, inputs):
4847
x = self.features(inputs)
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
import unittest
18+
import numpy as np
19+
import paddle.fluid.core as core
20+
from op_test import OpTest
21+
import paddle
22+
from paddle import fluid, nn
23+
import paddle.fluid.dygraph as dg
24+
import paddle.nn.functional as F
25+
import paddle.fluid.initializer as I
26+
27+
28+
class LinearTestCase(unittest.TestCase):
29+
def setUp(self):
30+
self.dtype = 'float32'
31+
self.input = np.ones((3, 1, 2)).astype(self.dtype)
32+
self.weight = np.ones((2, 2)).astype(self.dtype)
33+
self.bias = np.ones((2)).astype(self.dtype)
34+
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
35+
) else paddle.CPUPlace()
36+
37+
def functional(self, place):
38+
paddle.disable_static(place)
39+
input = paddle.to_tensor(self.input)
40+
weight = paddle.to_tensor(self.weight)
41+
bias = paddle.to_tensor(self.bias)
42+
out = F.linear(input, weight, bias)
43+
return out.numpy()
44+
45+
def paddle_nn_layer(self, place):
46+
paddle.disable_static(place)
47+
input = paddle.to_tensor(self.input)
48+
weight_attr = fluid.ParamAttr(
49+
name="linear_weight",
50+
learning_rate=1.0,
51+
trainable=False,
52+
regularizer=None,
53+
initializer=paddle.fluid.initializer.ConstantInitializer(value=1.0))
54+
bias_attr = fluid.ParamAttr(
55+
name="linear_bias",
56+
learning_rate=1.0,
57+
trainable=False,
58+
regularizer=None,
59+
initializer=paddle.fluid.initializer.ConstantInitializer(value=1.0))
60+
linear = paddle.nn.Linear(
61+
2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
62+
y = linear(input)
63+
return y.numpy()
64+
65+
def numpy_cal(self):
66+
res = np.matmul(self.input, self.weight) + self.bias
67+
return res
68+
69+
def test_error(self, place=paddle.CPUPlace()):
70+
res_f = self.functional(place)
71+
res_nn = self.paddle_nn_layer(place)
72+
res_np = self.numpy_cal()
73+
np.testing.assert_array_almost_equal(res_f, res_nn)
74+
np.testing.assert_array_almost_equal(res_nn, res_np)
75+
76+
77+
if __name__ == "__main__":
78+
unittest.main()

python/paddle/fluid/tests/unittests/test_rmsprop_op.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def test_rmsprop_dygraph(self):
228228
paddle.disable_static()
229229
value = np.arange(26).reshape(2, 13).astype("float32")
230230
a = paddle.to_tensor(value)
231-
linear = paddle.nn.Linear(13, 5, dtype="float32")
231+
linear = paddle.nn.Linear(13, 5)
232232
# This can be any optimizer supported by dygraph.
233233
adam = paddle.optimizer.RMSProp(
234234
learning_rate=0.01,

python/paddle/incubate/hapi/tests/test_model.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
import tempfile
2424

2525
from paddle import fluid
26-
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential
26+
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential, Softmax
2727
from paddle.fluid.dygraph.base import to_variable
2828

2929
import paddle.incubate.hapi as hapi
@@ -53,10 +53,8 @@ def __init__(self, num_classes=10, classifier_activation=None):
5353

5454
if num_classes > 0:
5555
self.fc = Sequential(
56-
Linear(400, 120),
57-
Linear(120, 84),
58-
Linear(
59-
84, 10, act=classifier_activation))
56+
Linear(400, 120), Linear(120, 84), Linear(84, 10),
57+
Softmax()) #Todo: accept any activation
6058

6159
def forward(self, inputs):
6260
x = self.features(inputs)
@@ -83,10 +81,8 @@ def __init__(self, num_classes=10, classifier_activation=None):
8381

8482
if num_classes > 0:
8583
self.fc = Sequential(
86-
Linear(400, 120),
87-
Linear(120, 84),
88-
Linear(
89-
84, 10, act=classifier_activation))
84+
Linear(400, 120), Linear(120, 84), Linear(84, 10),
85+
Softmax()) #Todo: accept any activation
9086

9187
@declarative
9288
def forward(self, inputs):
@@ -320,10 +316,12 @@ def predict(self, dynamic):
320316
class MyModel(fluid.dygraph.Layer):
321317
def __init__(self, classifier_activation='softmax'):
322318
super(MyModel, self).__init__()
323-
self._fc = Linear(20, 10, act=classifier_activation)
319+
self._fc = Linear(20, 10)
320+
self._act = Softmax() #Todo: accept any activation
324321

325322
def forward(self, x):
326323
y = self._fc(x)
324+
y = self._act(y)
327325
return y
328326

329327

python/paddle/incubate/hapi/tests/test_uncombined_weight2state_dict.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
import tempfile
2323

2424
from paddle import fluid
25-
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential
25+
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential, Softmax
2626

2727
from paddle.incubate.hapi.utils import uncombined_weight_to_state_dict
2828

@@ -43,10 +43,8 @@ def __init__(self, num_classes=10, classifier_activation='softmax'):
4343

4444
if num_classes > 0:
4545
self.fc = Sequential(
46-
Linear(400, 120),
47-
Linear(120, 84),
48-
Linear(
49-
84, 10, act=classifier_activation))
46+
Linear(400, 120), Linear(120, 84), Linear(84, 10),
47+
Softmax()) #Todo: accept any activation
5048

5149
def forward(self, inputs):
5250
x = self.features(inputs)

python/paddle/incubate/hapi/vision/models/lenet.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
#limitations under the License.
1414

1515
import paddle.fluid as fluid
16-
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential
16+
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential, Softmax
1717

1818
__all__ = ['LeNet']
1919

@@ -50,10 +50,8 @@ def __init__(self, num_classes=10, classifier_activation='softmax'):
5050

5151
if num_classes > 0:
5252
self.fc = Sequential(
53-
Linear(400, 120),
54-
Linear(120, 84),
55-
Linear(
56-
84, 10, act=classifier_activation))
53+
Linear(400, 120), Linear(120, 84), Linear(84, 10),
54+
Softmax()) #Todo: accept any activation
5755

5856
def forward(self, inputs):
5957
x = self.features(inputs)

python/paddle/incubate/hapi/vision/models/vgg.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
import paddle.fluid as fluid
16-
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, ReLU
16+
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, ReLU, Softmax
1717
from paddle.fluid.dygraph.container import Sequential
1818

1919
from ...download import get_weights_path_from_url
@@ -37,7 +37,8 @@ def __init__(self, num_classes, classifier_activation='softmax'):
3737
super(Classifier, self).__init__()
3838
self.linear1 = Linear(512 * 7 * 7, 4096)
3939
self.linear2 = Linear(4096, 4096)
40-
self.linear3 = Linear(4096, num_classes, act=classifier_activation)
40+
self.linear3 = Linear(4096, num_classes)
41+
self.act = Softmax() #Todo: accept any activation
4142

4243
def forward(self, x):
4344
x = self.linear1(x)
@@ -46,7 +47,8 @@ def forward(self, x):
4647
x = self.linear2(x)
4748
x = fluid.layers.relu(x)
4849
x = fluid.layers.dropout(x, 0.5)
49-
out = self.linear3(x)
50+
x = self.linear3(x)
51+
out = self.act(x)
5052
return out
5153

5254

python/paddle/nn/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@
115115
# from .layer.learning_rate import NoamDecay #DEFINE_ALIAS
116116
# from .layer.learning_rate import PiecewiseDecay #DEFINE_ALIAS
117117
# from .layer.learning_rate import PolynomialDecay #DEFINE_ALIAS
118+
from .layer.common import Linear
118119
# from .layer.loss import NCELoss #DEFINE_ALIAS
119120
from .layer.loss import BCEWithLogitsLoss #DEFINE_ALIAS
120121
from .layer.loss import CrossEntropyLoss #DEFINE_ALIAS

0 commit comments

Comments
 (0)