Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 59 additions & 3 deletions onnxscript/function_libs/torch_aten/ops/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,14 @@

from typing import Any, Optional, Sequence, Union

from onnxscript import BOOL, DOUBLE, FLOAT, INT64
from onnxscript import BOOL, DOUBLE, FLOAT, INT16, INT32, INT64
from onnxscript.function_libs.torch_aten.registration import torch_op
from onnxscript.function_libs.torch_aten.typing import (
TFloat,
TFloatOrBFloat16,
TInt,
TReal,
TRealUnlessFloat16OrInt8,
TRealUnlessInt16OrInt8,
TTensor,
)
Expand Down Expand Up @@ -226,10 +227,65 @@ def aten_any(self: TensorType) -> TensorType:
raise NotImplementedError()


def aten_arange(end: float) -> TensorType:
@torch_op("aten::arange")
def aten_arange(end: Union[DOUBLE, FLOAT, INT16, INT32, INT64], dtype: int = -1) -> TensorType:
# arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

raise NotImplementedError()
# Cast input to double if dtype is specified, because the input dtype may be e.g. bool
# which Range does not support. The output type is ensured because the output
# is casted to the specified dtype.
if dtype != -1:
end = op.Cast(end, to=DOUBLE.dtype)

result = op.Range(0, end, 1)
if dtype != -1:
result = op.Cast(result, to=dtype)

return result


@torch_op("aten::arange", overload=True)
def aten_arange_start(
start: TRealUnlessFloat16OrInt8, end: TRealUnlessFloat16OrInt8, dtype: int = -1
) -> TensorType:
# arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

# Cast input to double if dtype is specified, because the input dtype may be e.g. bool
# which Range does not support. The output type is ensured because the output
# is casted to the specified dtype.
if dtype != -1:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since TReal has limited dtypes, what if the given dtype doesn't exist in the scope of TReal?

Should we set a default value it to dtype in case the given value of dtype is not in the scope of TReal?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. I am considering if I should cast after Range

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. I am considering if I should cast after Range

If we pass start/end/step with an unexpected dtype into op.Range, will it work successfully?

Copy link
Collaborator Author

@justinchuby justinchuby Jan 8, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated. It will work if dtype is specified. Otherwise they need to be casted before entering the function.

start = op.Cast(start, to=DOUBLE.dtype)
end = op.Cast(end, to=DOUBLE.dtype)

result = op.Range(start, end, 1)
if dtype != -1:
result = op.Cast(result, to=dtype)

return result


@torch_op("aten::arange", overload=True)
def aten_arange_start_step(
start: TRealUnlessFloat16OrInt8,
end: TRealUnlessFloat16OrInt8,
step: TRealUnlessFloat16OrInt8,
dtype: int = -1,
) -> TensorType:
# arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

# Cast input to double if dtype is specified, because the input dtype may be e.g. bool
# which Range does not support. The output type is ensured because the output
# is casted to the specified dtype.
if dtype != -1:
start = op.Cast(start, to=DOUBLE.dtype)
end = op.Cast(end, to=DOUBLE.dtype)
step = op.Cast(step, to=DOUBLE.dtype)

result = op.Range(start, end, step)
if dtype != -1:
result = op.Cast(result, to=dtype)

return result


def aten_arccos(self: TensorType) -> TensorType:
Expand Down
7 changes: 5 additions & 2 deletions onnxscript/function_libs/torch_aten/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
]
_FloatType = Union[FLOAT16, FLOAT, DOUBLE]
_IntType = Union[INT8, INT16, INT32, INT64]
_RealType = Union[
RealType = Union[
BFLOAT16,
FLOAT16,
FLOAT,
Expand All @@ -57,7 +57,10 @@
TFloat = TypeVar("TFloat", bound=_FloatType)
TFloatOrBFloat16 = TypeVar("TFloatOrBFloat16", bound=Union[FLOAT16, FLOAT, DOUBLE, BFLOAT16])
TInt = TypeVar("TInt", bound=_IntType)
TReal = TypeVar("TReal", bound=_RealType)
TReal = TypeVar("TReal", bound=RealType)
TRealUnlessInt16OrInt8 = TypeVar(
"TRealUnlessInt16OrInt8", bound=Union[FLOAT16, FLOAT, DOUBLE, BFLOAT16, INT32, INT64]
)
TRealUnlessFloat16OrInt8 = TypeVar(
"TRealUnlessFloat16OrInt8", bound=Union[DOUBLE, FLOAT, INT16, INT32, INT64]
)
32 changes: 32 additions & 0 deletions onnxscript/test/function_libs/torch_aten/ops_correctness_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,9 @@ def _upsample_kwargs_wrangler(kwargs: dict[str, Any]) -> dict[str, Any]:
"addmm": core_ops.aten_addmm,
"amax": (core_ops.aten_amax, _amax_amin_kwargs_wrangler),
"amin": (core_ops.aten_amin, _amax_amin_kwargs_wrangler),
"arange_start_step": core_ops.aten_arange_start_step,
"arange_start": core_ops.aten_arange_start,
"arange": core_ops.aten_arange,
"asin": core_ops.aten_asin,
"asinh": core_ops.aten_asinh,
"atan": core_ops.aten_atan,
Expand Down Expand Up @@ -289,6 +292,26 @@ def _upsample_kwargs_wrangler(kwargs: dict[str, Any]) -> dict[str, Any]:


SKIP_SUBTESTS: tuple[DecorateMeta, ...] = (
skip(
"arange",
matcher=lambda sample: len(sample.args) != 0,
reason="arange overload takes single argument",
),
skip(
"arange",
matcher=lambda sample: sample.kwargs.get("end") is not None,
reason="arange overload does not support positional 'end' argument",
),
skip(
"arange_start",
matcher=lambda sample: len(sample.args) != 1,
reason="arange_start overload takes two arguments (input, start)",
),
skip(
"arange_start_step",
matcher=lambda sample: len(sample.args) != 2,
reason="arange_start_step overload takes three arguments (input, start, step)",
),
skip(
"div",
matcher=lambda sample: sample.kwargs.get("rounding_mode") is not None,
Expand Down Expand Up @@ -343,6 +366,15 @@ def _upsample_kwargs_wrangler(kwargs: dict[str, Any]) -> dict[str, Any]:
),
)

duplicate_opinfo(
OPS_DB,
"arange",
(
"arange_start",
"arange_start_step",
),
)


# END OF SECTION TO MODIFY #####################################################

Expand Down