-
Notifications
You must be signed in to change notification settings - Fork 71
feat(atenlib): arange with overloads #285
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 5 commits
7cfae57
7e24cf9
3962f7a
be2d43f
1d598d4
20bbca1
a10443d
8074704
0fba26f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -240,10 +240,37 @@ def aten_any(self: TensorType) -> TensorType: | |
raise NotImplementedError() | ||
|
||
|
||
def aten_arange(end: float) -> TensorType: | ||
@torch_op("aten::arange") | ||
def aten_arange(end: TReal, dtype: int = -1) -> TReal: | ||
# arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | ||
|
||
raise NotImplementedError() | ||
if dtype != -1: | ||
end = op.Cast(end, to=dtype) | ||
|
||
return op.Range(0, end, 1) | ||
|
||
|
||
@torch_op("aten::arange", overload=True) | ||
def aten_arange_start(start: TReal, end: TReal, dtype: int = -1) -> TReal: | ||
# arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | ||
|
||
if dtype != -1: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since TReal has limited dtypes, what if the given dtype doesn't exist in the scope of TReal? Should we set a default value it to dtype in case the given value of dtype is not in the scope of TReal? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good point. I am considering if I should cast after Range There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
If we pass start/end/step with an unexpected dtype into op.Range, will it work successfully? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Updated. It will work if dtype is specified. Otherwise they need to be casted before entering the function. |
||
start = op.Cast(start, to=dtype) | ||
end = op.Cast(end, to=dtype) | ||
|
||
return op.Range(start, end, 1) | ||
|
||
|
||
@torch_op("aten::arange", overload=True) | ||
|
||
def aten_arange_start_step(start: TReal, end: TReal, step: TReal, dtype: int = -1) -> TReal: | ||
# arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | ||
|
||
if dtype != -1: | ||
start = op.Cast(start, to=dtype) | ||
end = op.Cast(end, to=dtype) | ||
step = op.Cast(step, to=dtype) | ||
|
||
|
||
return op.Range(start, end, step) | ||
|
||
|
||
def aten_arccos(self: TensorType) -> TensorType: | ||
|
Uh oh!
There was an error while loading. Please reload this page.