Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions doc/fluid/dev/src/fc.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,3 +79,45 @@ def fc(input,
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
"""


def lrn(input, n=5, k=2.0, alpha=1e-4, beta=0.75, name=None):
"""
**Local Response Normalization Operator**

This operator comes from the paper:
<<ImageNet Classification with Deep Convolutional Neural Networks>>.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

参考文献 请移动至 参数介绍前,另去掉书名号,加上文献链接。
此处请添加 功能介绍。

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

1.参考文献 请移动至 公式符号介绍后,参数介绍前。
2. 补充功能介绍


.. math::

Output(i, x, y) = Input(i, x, y) / \left(
k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)}
(Input(j, x, y))^2 \right)^{\beta}

In the above equation:

* :math:`n`: The number of channels to sum over.
* :math:`k`: The offset (usually positive to avoid dividing by 0).
* :math:`alpha`: The scaling parameter.
* :math:`beta`: The exponent.

Args:
input(Variable): The input tensor of this layer. The dims of the input tensor must be 4 and it's order should be 'NCHW'.
n(int, default 5): The number of channels to sum over.
k(float, default 2.0): An offset (usually positive to avoid dividing by 0).
alpha(float, default 1e-4): The scaling parameter.
beta(float, default 0.75): The exponent.
name(str, default None): A name for this operation.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

L105 不要使用 简写 如 dims
L105~L110 参数名和括号间请加一个空格。

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done


Raises:
ValueError: If rank of the input tensor is not 4.

Returns:
A tensor variable storing the transformation result.

Examples:
.. code-block:: python

data = fluid.layers.data(name="data", shape=[3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
"""
68 changes: 68 additions & 0 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'lrn',
]


Expand Down Expand Up @@ -3292,3 +3293,70 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
counter.stop_gradient = True

return counter


def lrn(input, n=5, k=2.0, alpha=1e-4, beta=0.75, name=None):
"""
**Local Response Normalization Operator**

This operator comes from the paper:
<<ImageNet Classification with Deep Convolutional Neural Networks>>.

.. math::

Output(i, x, y) = Input(i, x, y) / \left(
k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)}
(Input(j, x, y))^2 \right)^{\beta}

In the above equation:

* :math:`n`: The number of channels to sum over.
* :math:`k`: The offset (usually positive to avoid dividing by 0).
* :math:`alpha`: The scaling parameter.
* :math:`beta`: The exponent parameter.

Args:
input(Variable): The input tensor of this layer, and the dims of the input tensor must be 4.
n(int, default 5): The number of channels to sum over.
k(float, default 2.0): An offset (usually positive to avoid dividing by 0).
alpha(float, default 1e-4): The scaling parameter.
beta(float, default 0.75): The exponent.
name(str, default None): A name for this operation.

Raises:
ValueError: If rank of the input tensor is not 4.

Returns:
A tensor variable storing the transformation result.

Examples:
.. code-block:: python

data = fluid.layers.data(name="data", shape=[3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
"""
helper = LayerHelper('lrn', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)

if dims != 4:
raise ValueError(
"dims of input must be 4(not %d), and it's order must be NCHW" %
(dims))

mid_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True)
lrn_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="lrn",
inputs={"X": input},
outputs={
"Out": lrn_out,
"MidOut": mid_out,
},
attrs={"n": n,
"k": k,
"alpha": alpha,
"beta": beta})

return lrn_out
7 changes: 7 additions & 0 deletions python/paddle/fluid/tests/unittests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,13 @@ def test_softmax(self):
self.assertIsNotNone(layers.softmax(hid))
print(str(program))

def test_lrn(self):
program = Program()
with program_guard(program):
data = layers.data(name='data', shape=[6, 2, 2], dtype='float32')
self.assertIsNotNone(layers.lrn(data))
print(str(program))

def test_get_places(self):
program = Program()
with program_guard(program):
Expand Down