Skip to content
Merged
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 29 additions & 2 deletions onnxscript/function_libs/torch_aten/ops/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,10 +240,37 @@ def aten_any(self: TensorType) -> TensorType:
raise NotImplementedError()


def aten_arange(end: float) -> TensorType:
@torch_op("aten::arange")
def aten_arange(end: TReal, dtype: int = -1) -> TReal:
# arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

raise NotImplementedError()
if dtype != -1:
end = op.Cast(end, to=dtype)

return op.Range(0, end, 1)


@torch_op("aten::arange", overload=True)
def aten_arange_start(start: TReal, end: TReal, dtype: int = -1) -> TReal:
# arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

if dtype != -1:
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since TReal has limited dtypes, what if the given dtype doesn't exist in the scope of TReal?

Should we set a default value it to dtype in case the given value of dtype is not in the scope of TReal?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. I am considering if I should cast after Range

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. I am considering if I should cast after Range

If we pass start/end/step with an unexpected dtype into op.Range, will it work successfully?

Copy link
Copy Markdown
Collaborator Author

@justinchuby justinchuby Jan 8, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated. It will work if dtype is specified. Otherwise they need to be casted before entering the function.

start = op.Cast(start, to=dtype)
end = op.Cast(end, to=dtype)

return op.Range(start, end, 1)


@torch_op("aten::arange", overload=True)
def aten_arange_start_step(start: TReal, end: TReal, step: TReal, dtype: int = -1) -> TReal:
# arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

if dtype != -1:
start = op.Cast(start, to=dtype)
end = op.Cast(end, to=dtype)
step = op.Cast(step, to=dtype)

return op.Range(start, end, step)


def aten_arccos(self: TensorType) -> TensorType:
Expand Down