Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,7 @@ def new_init(self, *args, **kwargs):
where,
where_,
)
from .tensor.size import Size
from .tensor.stat import (
mean,
median,
Expand Down Expand Up @@ -995,6 +996,7 @@ def __dir__(self):
'logit',
'logit_',
'LazyGuard',
'Size',
'sign',
'is_empty',
'equal',
Expand Down
6 changes: 6 additions & 0 deletions python/paddle/base/dygraph/tensor_patch_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def _to_static_var(self, to_parameter=False, **kwargs):
'offset',
'__cuda_array_interface__',
'itemsize',
'is_cuda',
]
param_keys = ['stop_gradient', 'trainable']
if isinstance(self, EagerParamBase):
Expand Down Expand Up @@ -1156,6 +1157,10 @@ def cuda(
res.persistable = self.persistable
return res

@property
def is_cuda(self: Tensor) -> bool:
return self.place.is_gpu_place()

@framework.dygraph_only
def pin_memory(self: Tensor, blocking: bool = True) -> Tensor:
if (
Expand Down Expand Up @@ -1463,6 +1468,7 @@ def __dlpack__(self, stream=None):
("backward", backward),
("clear_grad", clear_grad),
("inplace_version", inplace_version),
("is_cuda", is_cuda),
("gradient", gradient),
("apply_", apply_),
("apply", apply),
Expand Down
21 changes: 21 additions & 0 deletions python/paddle/pir/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,26 @@ def cuda(self, device_id=None, blocking=True):
# 1 means cuda place, see paddle/phi/kernels/memcpy_kernel.cc
return _C_ops.memcpy(self, 1)

@property
def is_cuda(self):
"""
Value don't have 'is_cuda' interface in static graph mode
But this interface can greatly facilitate dy2static.
So we give a warning here and return None.
"""
warnings.warn(
"Value do not have 'is_cuda' interface for pir graph mode, try not to use it."
)
from paddle import framework

if hasattr(self, 'place') and isinstance(
self.place, framework.core.CUDAPlace
):
return True
else:
expected_place = framework._current_expected_place_()
return isinstance(expected_place, framework.core.CUDAPlace)

@property
def place(self):
"""
Expand Down Expand Up @@ -1415,6 +1435,7 @@ def itemsize(self) -> int:
('cuda', cuda),
('place', place),
('contiguous', contiguous),
('is_cuda', is_cuda),
('is_contiguous', is_contiguous),
('item', _item),
('dim', dim),
Expand Down
103 changes: 103 additions & 0 deletions python/paddle/tensor/size.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from collections.abc import Iterable, Sequence


class Size(tuple):
"""The result type of a call to ``paddle.Tensor.size()``.
It describes the size of all dimensions of the original tensor. As a subclass of tuple,
it supports all common sequence operations like indexing, slicing, concatenation, etc.

Args:
*args: Either a sequence of integers or multiple integer arguments representing dimensions.

Returns:
Size: A special tuple subclass representing tensor dimensions.

Examples:
.. code-block:: python

>>> import paddle
>>> size = paddle.Size([2, 3, 4])
>>> print(size)
paddle.Size([2, 3, 4])
"""

def __new__(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], Sequence):
seq = args[0]
else:
seq = args

if len(seq) == 1 and hasattr(seq[0], 'ndim') and seq[0].ndim == 1:
seq = seq[0].tolist()

converted = []
for item in seq:
if hasattr(item, '__index__'):
converted.append(int(item.__index__()))
else:
raise TypeError(
f"paddle.Size() takes an iterable of 'int' (got {type(item).__name__})"
)

return super().__new__(cls, converted)

def __repr__(self):
if not self:
return "paddle.Size([])"
return f"paddle.Size([{', '.join(map(str, self))}])"

def __add__(self, other: Iterable):
if isinstance(other, (tuple)):
return Size(super().__add__(tuple(other)))
raise TypeError(
f"can only concatenate tuple (not {type(other).__name__}) to Size"
)

def __radd__(self, other: Iterable):
if isinstance(other, (tuple)):
return Size(tuple(other).__add__(self))
raise TypeError(
f"can only concatenate tuple (not {type(other).__name__}) to Size"
)

def __mul__(self, other: Iterable):
if isinstance(other, int):
return Size(super().__mul__(other))
return NotImplemented

__rmul__ = __mul__

def numel(self):
return functools.reduce(lambda x, y: x * y, self, 1)

def __reduce__(self):
return (Size, (tuple(self),))

def __concat__(self, other: Iterable):
if not isinstance(other, (tuple, Size)):
raise TypeError(
f"can only concatenate tuple (not {type(other).__name__}) to paddle.Size"
)
return self + other

def __getitem__(self, key):
from builtins import slice

result = super().__getitem__(key)
if isinstance(key, slice):
return Size(result)
return result
34 changes: 34 additions & 0 deletions test/legacy_test/test_eager_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1757,6 +1757,40 @@ def test_bump_inplace_version(self):
self.assertEqual(var.inplace_version, 2)


class TestEagerTensorIsCuda(unittest.TestCase):
def test_dynamic_is_cuda(self):
paddle.disable_static()
cpu_tensor = paddle.to_tensor(
[2, 3], dtype="float32", place=paddle.CPUPlace()
)
self.assertFalse(cpu_tensor.is_cuda)

if paddle.is_compiled_with_cuda():
gpu_tensor = paddle.to_tensor(
[2, 3], dtype="float32", place=paddle.CUDAPlace(0)
)
self.assertTrue(gpu_tensor.is_cuda)

def test_static_is_cuda(self):
paddle.enable_static()

if paddle.is_compiled_with_cuda():
with paddle.static.program_guard(paddle.static.Program()):
data = paddle.static.data(
name='data', shape=[2], dtype='float32'
)
out = data + 1.0

gpu_exe = paddle.static.Executor(paddle.CUDAPlace(0))
gpu_result = gpu_exe.run(
feed={'data': np.array([1.0, 2.0], dtype='float32')},
fetch_list=[out],
)
self.assertTrue(data.is_cuda)

paddle.disable_static()


class TestEagerTensorSlice(unittest.TestCase):
def test_slice(self):
paddle.disable_static()
Expand Down
172 changes: 172 additions & 0 deletions test/legacy_test/test_size.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest

import numpy as np

import paddle


class TestPaddleSize(unittest.TestCase):
# TODO: enable when paddle.Tensor.size() is implemented
# def test_tensor_size(self):
# x = paddle.empty(3, 4, 5)
# size = x.size()
# self.assertEqual(size, (3, 4, 5))
# self.assertIsInstance(size, paddle.Size)

# int_size = x.size(dim=1)
# self.assertEqual(int_size, 3)
# self.assertIsInstance(int_size, int)

def test_creation_size(self):
size = paddle.Size()
self.assertEqual(size, ())
self.assertIsInstance(size, tuple)
self.assertIsInstance(size, paddle.Size)

size = paddle.Size([2, 3, 4])
self.assertEqual(size, (2, 3, 4))
self.assertIsInstance(size, paddle.Size)

size = paddle.Size((2, 3, 4))
self.assertEqual(size, (2, 3, 4))
self.assertIsInstance(size, paddle.Size)

tensor1 = paddle.to_tensor(2)
tensor2 = paddle.to_tensor(3)
size = paddle.Size([tensor1, tensor2])
self.assertEqual(size, (2, 3))
self.assertIsInstance(size, paddle.Size)

tensor3 = paddle.to_tensor([2, 3])
size = paddle.Size(tensor3)
self.assertEqual(size, (2, 3))
self.assertIsInstance(size, paddle.Size)

size = paddle.Size([True, False])
self.assertEqual(size, (1, 0))
self.assertIsInstance(size, paddle.Size)

size = paddle.Size([np.int64(8), np.int64(8)])
self.assertEqual(size, (8, 8))
self.assertIsInstance(size, paddle.Size)

def test_creation_invalid_type(self):
with self.assertRaises(TypeError):
paddle.Size([1.5, 2.5]) # float not allowed
with self.assertRaises(TypeError):
paddle.Size(["a", "b"]) # string not allowed

def test_creation_from_mixed_types(self):
size = paddle.Size([1, paddle.to_tensor(2), 3])
self.assertEqual(size, (1, 2, 3))
self.assertIsInstance(size, paddle.Size)

def test_getitem_int(self):
size = paddle.Size([2, 3, 4])
self.assertEqual(size[0], 2)
self.assertEqual(size[1], 3)
self.assertEqual(size[2], 4)
self.assertIsInstance(size[0], int)

def test_getitem_slice(self):
size = paddle.Size([2, 3, 4, 5])
sliced = size[1:3]
self.assertEqual(sliced, (3, 4))
self.assertIsInstance(sliced, paddle.Size)

def test_addition(self):
size1 = paddle.Size([2, 3])
size2 = (4, 5)
result = size1 + size2
self.assertEqual(result, (2, 3, 4, 5))
self.assertIsInstance(result, paddle.Size)

def test_raddition(self):
size1 = paddle.Size([2, 3])
size2 = (4, 5)
result = size2 + size1
self.assertEqual(result, (4, 5, 2, 3))
self.assertIsInstance(result, paddle.Size)

def test_addition_invalid_type(self):
size = paddle.Size([2, 3])
with self.assertRaises(TypeError):
size + "abc" # string not allowed

def test_multiplication(self):
size = paddle.Size([2, 3])
result = size * 2
self.assertEqual(result, (2, 3, 2, 3))
self.assertIsInstance(result, paddle.Size)

def test_rmultiplication(self):
size = paddle.Size([2, 3])
result = 2 * size
self.assertEqual(result, (2, 3, 2, 3))
self.assertIsInstance(result, paddle.Size)

def test_multiplication_invalid_type(self):
size = paddle.Size([2, 3])
with self.assertRaises(TypeError):
size * 2.5 # float not allowed
with self.assertRaises(TypeError):
size * "a" # string not allowed

def test_repr(self):
size = paddle.Size([2, 3, 4])
size1 = paddle.Size()
self.assertEqual(repr(size), "paddle.Size([2, 3, 4])")
self.assertEqual(str(size), "paddle.Size([2, 3, 4])")
self.assertEqual(str(size1), "paddle.Size([])")

def test_numel(self):
size = paddle.Size([2, 3, 4])
self.assertEqual(size.numel(), 24) # 2*3*4=24

def test_empty_size_numel(self):
size = paddle.Size([])
self.assertEqual(size.numel(), 1) # Empty size has numel=1

def test_concat_method(self):
size1 = paddle.Size([1, 2])
size2 = (3, 4)
result = size1.__concat__(size2)
self.assertEqual(result, (1, 2, 3, 4))
self.assertIsInstance(result, paddle.Size)

def test_concat_invalid_type(self):
size = paddle.Size([1, 2])
with self.assertRaises(TypeError):
size.__concat__("invalid") # string not allowed

def test_reduce(self):
size = paddle.Size([2, 3])
reduced = size.__reduce__()
self.assertEqual(reduced, (paddle.Size, ((2, 3),)))
# Test reconstruction
new_size = reduced[0](*reduced[1])
self.assertEqual(new_size, size)
self.assertIsInstance(new_size, paddle.Size)

def test_count_index(self):
x = paddle.Size([2, 3]).count(2)
y = paddle.Size([2, 3]).index(3, 0)
self.assertEqual(x, 1)
self.assertEqual(y, 1)


if __name__ == "__main__":
unittest.main()
Loading