Skip to content

Commit 856211e

Browse files
committed
[API Compatibility] Add clip_、logsigmoid、_calculate_fan_in_and_fan_out、meshgrid、autocast
1 parent f45380c commit 856211e

File tree

9 files changed

+174
-1
lines changed

9 files changed

+174
-1
lines changed

python/paddle/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,6 +226,7 @@ def new_init(self, *args, **kwargs):
226226
get_autocast_gpu_dtype,
227227
is_autocast_enabled,
228228
)
229+
from .amp.auto_cast import autocast as _autocast
229230
from .autograd import (
230231
enable_grad,
231232
grad,
@@ -970,7 +971,7 @@ def __dir__(self):
970971
manual_seed = seed
971972
sub = subtract
972973
sub_ = subtract_
973-
974+
autocast = _autocast
974975

975976
__all__ = [
976977
'block_diag',
@@ -1481,6 +1482,7 @@ def __dir__(self):
14811482
'conv3d',
14821483
'manual_seed',
14831484
'softmax',
1485+
'autocast',
14841486
]
14851487
import os
14861488

python/paddle/amp/auto_cast.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1078,7 +1078,11 @@ def autocast(
10781078
imperative mode.
10791079
10801080
Args:
1081+
<<<<<<< HEAD
10811082
device_type(str, optional): Device type.But because the paddle does not distinguish between devices, this parameter does not work
1083+
=======
1084+
device_type(str, optional): Device type. Default is 'gpu'. But this parameter is not used.
1085+
>>>>>>> 0cddb652ad ([API Compatibility] Add clip_、logsigmoid、_calculate_fan_in_and_fan_out、meshgrid、autocast)
10821086
enable(bool, optional): Enable auto-mixed-precision or not. Default is True.
10831087
dtype(str, optional): Whether to use 'float16' or 'bfloat16'. Default is 'float16'.
10841088
cache_enabled(bool, optional): whether to enable cache or not. Default is True. But this parameter is not used

python/paddle/functional.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
from .compat import split
15+
from .tensor.creation import meshgrid
1516
from .tensor.einsum import einsum
1617
from .tensor.linalg import norm
1718
from .tensor.manipulation import (
@@ -31,4 +32,5 @@
3132
"norm",
3233
'split',
3334
'unique_consecutive',
35+
"meshgrid",
3436
]

python/paddle/nn/functional/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,7 @@
172172
pixel_unshuffle,
173173
)
174174

175+
logsigmoid = log_sigmoid
175176
__all__ = [
176177
'celu',
177178
'conv1d',
@@ -192,6 +193,7 @@
192193
'leaky_relu',
193194
'leaky_relu_',
194195
'log_sigmoid',
196+
'logsigmoid',
195197
'maxout',
196198
'prelu',
197199
'relu',

python/paddle/nn/functional/activation.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -754,6 +754,7 @@ def relu_(x: Tensor, name: str | None = None) -> Tensor:
754754
return _C_ops.relu_(x)
755755

756756

757+
@param_one_alias(["x", "input"])
757758
def log_sigmoid(x: Tensor, name: str | None = None) -> Tensor:
758759
r"""
759760
log_sigmoid activation.
@@ -764,6 +765,7 @@ def log_sigmoid(x: Tensor, name: str | None = None) -> Tensor:
764765
765766
Parameters:
766767
x (Tensor): The input Tensor with data type float32, float64, complex64, complex128.
768+
Alias: ``input``.
767769
name (str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
768770
769771
Returns:

python/paddle/nn/init.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414

1515
from __future__ import annotations
1616

17+
import numpy as np
18+
1719
import paddle
1820

1921
from ..base.framework import in_dygraph_mode, in_pir_mode
@@ -27,6 +29,41 @@
2729
from .initializer.xavier import XavierNormal, XavierUniform
2830

2931

32+
def _calculate_fan_in_and_fan_out(var: paddle.Tensor) -> tuple[int, int]:
33+
"""Compute the fan_in and the fan_out for layers
34+
35+
This method computes the fan_in and the fan_out
36+
for neural network layers, if not specified. It is
37+
not possible to perfectly estimate fan_in and fan_out.
38+
This method will estimate it correctly for matrix multiply and
39+
convolutions.
40+
41+
Args:
42+
var: variable for which fan_in and fan_out have to be computed.
43+
44+
Returns:
45+
tuple of two integers (fan_in, fan_out).
46+
"""
47+
shape = var.shape
48+
if not shape or len(shape) == 0:
49+
fan_in = fan_out = 1
50+
elif len(shape) == 1:
51+
fan_in = fan_out = shape[0]
52+
elif len(shape) == 2:
53+
# This is the case for simple matrix multiply
54+
fan_in = shape[0]
55+
fan_out = shape[1]
56+
else:
57+
# Assume this to be a convolutional kernel
58+
# In PaddlePaddle, the shape of the kernel is like:
59+
# [num_filters, num_filter_channels, ...] where the remaining
60+
# dimensions are the filter_size
61+
receptive_field_size = np.prod(shape[2:])
62+
fan_in = int(shape[1] * receptive_field_size)
63+
fan_out = int(shape[0] * receptive_field_size)
64+
return (fan_in, fan_out)
65+
66+
3067
def kaiming_uniform_(
3168
tensor: paddle.Tensor,
3269
a: float = 0,

test/legacy_test/test_activation_op.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -854,6 +854,53 @@ def test_errors(self):
854854
F.log_sigmoid(x_fp16)
855855

856856

857+
class TestLogSigmoidOutAndParaDecorator(unittest.TestCase):
858+
def setUp(self) -> None:
859+
paddle.disable_static()
860+
self.apis = [
861+
paddle.nn.functional.log_sigmoid,
862+
paddle.nn.functional.logsigmoid,
863+
]
864+
self.shape = [3, 4, 5]
865+
self.input_np = np.random.random(self.shape).astype('float32')
866+
867+
def do_test(self, api, test_type):
868+
self.test_types = [
869+
"decorator1",
870+
]
871+
x = paddle.to_tensor(self.input_np, stop_gradient=False)
872+
out = paddle.zeros(self.shape, dtype='float32')
873+
out.stop_gradient = False
874+
if test_type == "raw":
875+
out = paddle.nn.functional.log_sigmoid(x)
876+
out.mean().backward()
877+
return out, x.grad
878+
elif test_type == "decorator1":
879+
res = api(input=x)
880+
loss = res.mean()
881+
loss.backward()
882+
x_grad = x.grad
883+
return res, x_grad
884+
else:
885+
raise NotImplementedError(
886+
f"Test type {test_type} is not implemented."
887+
)
888+
889+
def test_api(self):
890+
out_std, x_grad_std = self.do_test(
891+
paddle.nn.functional.log_sigmoid, "raw"
892+
)
893+
for api in self.apis:
894+
for test_type in self.test_types:
895+
out, x_grad = self.do_test(api, test_type)
896+
np.testing.assert_allclose(
897+
out.numpy(), out_std.numpy(), rtol=1e-20
898+
)
899+
np.testing.assert_allclose(
900+
x_grad.numpy(), x_grad_std.numpy(), rtol=1e-20
901+
)
902+
903+
857904
class TestTanh(TestActivation, TestParameter):
858905
def setUp(self):
859906
self.op_type = "tanh"

test/legacy_test/test_autocast.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import unittest
16+
17+
import paddle
18+
19+
20+
class TestAutoCast(unittest.TestCase):
21+
def init_net(self):
22+
self._conv = paddle.nn.Conv2D(
23+
in_channels=1, out_channels=6, kernel_size=3, bias_attr=False
24+
)
25+
self._linear = paddle.nn.Linear(in_features=4, out_features=4)
26+
27+
def test_autocast(self):
28+
self.init_net()
29+
with paddle.autocast():
30+
out1 = self._conv(paddle.rand(shape=[1, 1, 6, 6], dtype='float32'))
31+
out2 = out1 + paddle.rand(shape=out1.shape, dtype='float16')
32+
out3 = self._linear(out2)
33+
34+
self.assertEqual(out1.dtype, paddle.float16)
35+
self.assertEqual(out2.dtype, paddle.float16)
36+
self.assertEqual(out3.dtype, paddle.float32)
37+
38+
39+
if __name__ == '__main__':
40+
unittest.main()

test/legacy_test/test_nn_init_function.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,22 @@ def _calculate_gain(nonlinearity, param):
6262
return recommended_gain[nonlinearity]
6363

6464

65+
def _calculate_fan_in_and_fan_out(var: paddle.Tensor) -> tuple[int, int]:
66+
shape = var.shape
67+
if not shape or len(shape) == 0:
68+
fan_in = fan_out = 1
69+
elif len(shape) == 1:
70+
fan_in = fan_out = shape[0]
71+
elif len(shape) == 2:
72+
fan_in = shape[0]
73+
fan_out = shape[1]
74+
else:
75+
receptive_field_size = np.prod(shape[2:])
76+
fan_in = shape[1] * receptive_field_size
77+
fan_out = shape[0] * receptive_field_size
78+
return (fan_in, fan_out)
79+
80+
6581
class Test_calculate_gain(unittest.TestCase):
6682
def test(self):
6783
for nonlinearity in [
@@ -87,6 +103,27 @@ def test(self):
87103
)
88104

89105

106+
class TestCAlFanINOUT(unittest.TestCase):
107+
def test_cal_fan_in_and_out(self):
108+
x = paddle.tensor.randn([10])
109+
self.assertEqual(
110+
_calculate_fan_in_and_fan_out(x),
111+
paddle.nn.init._calculate_fan_in_and_fan_out(x),
112+
)
113+
114+
y = paddle.tensor.randn([10, 10])
115+
self.assertEqual(
116+
_calculate_fan_in_and_fan_out(y),
117+
paddle.nn.init._calculate_fan_in_and_fan_out(y),
118+
)
119+
120+
z = paddle.randn([10, 10, 10])
121+
self.assertEqual(
122+
_calculate_fan_in_and_fan_out(z),
123+
paddle.nn.init._calculate_fan_in_and_fan_out(z),
124+
)
125+
126+
90127
class Test_kaiming_uniform_(unittest.TestCase):
91128
def check_kaiming_uniform(
92129
self, tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'

0 commit comments

Comments
 (0)