Skip to content

Commit 95b3ef2

Browse files
zrr1999SigureMo
andauthored
[Typing] update pybind11 stub (PaddlePaddle#72553)
--------- Co-authored-by: SigureMo <sigure.qaq@gmail.com>
1 parent 5883eef commit 95b3ef2

File tree

15 files changed

+79
-84
lines changed

15 files changed

+79
-84
lines changed

python/paddle/base/framework.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1969,7 +1969,7 @@ def backward(self, retain_graph=False):
19691969
>>> import paddle
19701970
>>> paddle.disable_static()
19711971
1972-
>>> x = np.ones([2, 2], np.float32) # type: ignore[var-annotated]
1972+
>>> x = np.ones([2, 2], np.float32)
19731973
>>> inputs = []
19741974
>>> for _ in range(10):
19751975
... tmp = paddle.to_tensor(x)
@@ -2013,7 +2013,7 @@ def gradient(self):
20132013
>>> import numpy as np
20142014
20152015
>>> # example1: return ndarray
2016-
>>> x = np.ones([2, 2], np.float32) # type: ignore[var-annotated]
2016+
>>> x = np.ones([2, 2], np.float32)
20172017
>>> with base.dygraph.guard():
20182018
... inputs2 = []
20192019
... for _ in range(10):
@@ -2062,7 +2062,7 @@ def clear_gradient(self):
20622062
>>> import paddle.base as base
20632063
>>> import numpy as np
20642064
2065-
>>> x = np.ones([2, 2], np.float32) # type: ignore[var-annotated]
2065+
>>> x = np.ones([2, 2], np.float32)
20662066
>>> inputs2 = []
20672067
>>> for _ in range(10):
20682068
>>> tmp = paddle.to_tensor(x)

python/paddle/base/layers/math_op_patch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,7 @@ def astype(self, dtype):
358358
>>> import paddle
359359
>>> import numpy as np
360360
361-
>>> x = np.ones([2, 2], np.float32) # type: ignore[var-annotated]
361+
>>> x = np.ones([2, 2], np.float32)
362362
>>> with base.dygraph.guard():
363363
... original_variable = paddle.to_tensor(x)
364364
... print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype))

python/paddle/distributed/parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ class DataParallel(Layer):
348348
... model = paddle.DataParallel(model)
349349
... opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
350350
... for step in range(10):
351-
... x_data = numpy.random.randn(2, 2).astype(numpy.float32) # type: ignore[var-annotated]
351+
... x_data = numpy.random.randn(2, 2).astype(numpy.float32)
352352
... x = paddle.to_tensor(x_data)
353353
... x.stop_gradient = False
354354
... # step 1 : skip gradient synchronization by 'no_sync'

python/paddle/incubate/optimizer/lbfgs.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,10 +93,10 @@ class LBFGS(Optimizer):
9393
9494
>>> paddle.disable_static()
9595
>>> np.random.seed(0)
96-
>>> np_w = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated]
97-
>>> np_x = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated]
96+
>>> np_w = np.random.rand(1).astype(np.float32)
97+
>>> np_x = np.random.rand(1).astype(np.float32)
9898
99-
>>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)] # type: ignore[var-annotated]
99+
>>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)]
100100
>>> # y = 2x
101101
>>> targets = [2 * x for x in inputs]
102102

python/paddle/optimizer/lbfgs.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -396,10 +396,10 @@ class LBFGS(Optimizer):
396396
397397
>>> paddle.disable_static()
398398
>>> np.random.seed(0)
399-
>>> np_w = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated]
400-
>>> np_x = np.random.rand(1).astype(np.float32) # type: ignore[var-annotated]
399+
>>> np_w = np.random.rand(1).astype(np.float32)
400+
>>> np_x = np.random.rand(1).astype(np.float32)
401401
402-
>>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)] # type: ignore[var-annotated]
402+
>>> inputs = [np.random.rand(1).astype(np.float32) for i in range(10)]
403403
>>> # y = 2x
404404
>>> targets = [2 * x for x in inputs]
405405

python/paddle/static/input.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def data(
110110
111111
# In this example, we will feed x and y with np-ndarray "1"
112112
# and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
113-
>>> feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) # type: ignore[var-annotated]
113+
>>> feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)
114114
115115
>>> exe = paddle.static.Executor(paddle.framework.CPUPlace())
116116
>>> out = exe.run(paddle.static.default_main_program(),
@@ -308,7 +308,7 @@ def from_numpy(
308308
>>> import numpy as np
309309
>>> from paddle.static import InputSpec
310310
311-
>>> x = np.ones([2, 2], np.float32) # type: ignore[var-annotated]
311+
>>> x = np.ones([2, 2], np.float32)
312312
>>> x_spec = InputSpec.from_numpy(x, name='x')
313313
>>> print(x_spec)
314314
InputSpec(shape=(2, 2), dtype=paddle.float32, name=x, stop_gradient=False)

python/paddle/static/io.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -900,7 +900,7 @@ def load_inference_model(
900900
901901
>>> [inference_program, feed_target_names, fetch_targets] = (
902902
... paddle.static.load_inference_model(path_prefix, exe))
903-
>>> tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32) # type: ignore[var-annotated]
903+
>>> tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32)
904904
>>> results = exe.run(inference_program,
905905
... feed={feed_target_names[0]: tensor_img},
906906
... fetch_list=fetch_targets)

python/paddle/static/nn/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3578,7 +3578,7 @@ def embedding(
35783578
>>> exe = paddle.static.Executor(place)
35793579
>>> exe.run(paddle.static.default_startup_program())
35803580
3581-
>>> x = np.array([[7, 2, 4, 5],[4, 3, 2, 9]], dtype=np.int64) # type: ignore[var-annotated]
3581+
>>> x = np.array([[7, 2, 4, 5],[4, 3, 2, 9]], dtype=np.int64)
35823582
>>> out, = exe.run(paddle.static.default_main_program(), feed={'x':x}, fetch_list=[output])
35833583
>>> print(out)
35843584
[[[1. 1. 1.]

python/paddle/static/nn/static_pylayer.py

Lines changed: 36 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -312,46 +312,42 @@ def static_pylayer(forward_fn, inputs, backward_fn=None, name=None):
312312
Examples:
313313
.. code-block:: python
314314
315-
>>> import paddle
316-
>>> import numpy as np
317-
318-
>>> paddle.enable_static()
319-
320-
>>> def forward_fn(x):
321-
... return paddle.exp(x)
322-
323-
>>> def backward_fn(dy):
324-
... return 2 * paddle.exp(dy)
325-
326-
>>> main_program = paddle.static.Program()
327-
>>> start_program = paddle.static.Program()
328-
329-
>>> place = paddle.CPUPlace()
330-
>>> exe = paddle.static.Executor(place)
331-
>>> with paddle.static.program_guard(main_program, start_program):
332-
... data = paddle.static.data(name="X", shape=[None, 5], dtype="float32")
333-
... data.stop_gradient = False
334-
... ret = paddle.static.nn.static_pylayer(forward_fn, [data], backward_fn)
335-
... data_grad = paddle.static.gradients([ret], data)[0]
336-
337-
>>> exe.run(start_program)
338-
>>> x = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32) # type: ignore[var-annotated]
339-
>>> x, x_grad, y = exe.run(
340-
... main_program,
341-
... feed={"X": x},
342-
... fetch_list=[
343-
... data.name,
344-
... data_grad.name,
345-
... ret.name
346-
... ],
347-
... )
348-
349-
>>> print(x)
350-
[[1. 2. 3. 4. 5.]]
351-
>>> print(x_grad)
352-
[[5.4365635 5.4365635 5.4365635 5.4365635 5.4365635]]
353-
>>> print(y)
354-
[[ 2.7182817 7.389056 20.085537 54.59815 148.41316 ]]
315+
>>> import paddle
316+
>>> import numpy as np
317+
318+
>>> paddle.enable_static()
319+
320+
>>> def forward_fn(x):
321+
... return paddle.exp(x)
322+
323+
>>> def backward_fn(dy):
324+
... return 2 * paddle.exp(dy)
325+
326+
>>> main_program = paddle.static.Program()
327+
>>> start_program = paddle.static.Program()
328+
329+
>>> place = paddle.CPUPlace()
330+
>>> exe = paddle.static.Executor(place)
331+
>>> with paddle.static.program_guard(main_program, start_program):
332+
... data = paddle.static.data(name="X", shape=[None, 5], dtype="float32")
333+
... data.stop_gradient = False
334+
... ret = paddle.static.nn.static_pylayer(forward_fn, [data], backward_fn)
335+
... data_grad = paddle.static.gradients([ret], data)[0]
336+
337+
>>> exe.run(start_program)
338+
>>> x = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
339+
>>> x, x_grad, y = exe.run(
340+
... main_program,
341+
... feed={"X": x},
342+
... fetch_list=[data, data_grad, ret],
343+
... )
344+
345+
>>> print(x)
346+
[[1. 2. 3. 4. 5.]]
347+
>>> print(x_grad)
348+
[[5.4365635 5.4365635 5.4365635 5.4365635 5.4365635]]
349+
>>> print(y)
350+
[[ 2.7182817 7.389056 20.085537 54.59815 148.41316 ]]
355351
"""
356352
assert (
357353
in_dygraph_mode() is False

python/paddle/static/pir_io.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -840,7 +840,7 @@ def load_inference_model_pir(path_prefix, executor, **kwargs):
840840
841841
>>> [inference_program, feed_target_names, fetch_targets] = (
842842
... paddle.static.load_inference_model(path_prefix, exe))
843-
>>> tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32) # type: ignore[var-annotated]
843+
>>> tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32)
844844
>>> results = exe.run(inference_program,
845845
... feed={feed_target_names[0]: tensor_img},
846846
... fetch_list=fetch_targets)

0 commit comments

Comments
 (0)