Skip to content

PSROIPool + Dispatcher + Autocast + Code Cleanup #2926

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Oct 30, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion torchvision/csrc/PSROIAlign.h
Original file line number Diff line number Diff line change
Expand Up @@ -223,4 +223,4 @@ at::Tensor PSROIAlign_backward_autograd(
channels,
height,
width)[0];
}
}
184 changes: 129 additions & 55 deletions torchvision/csrc/PSROIPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,62 +3,68 @@
#include "cpu/vision_cpu.h"

#ifdef WITH_CUDA
#include "autocast.h"
#include "cuda/vision_cuda.h"
#endif
#ifdef WITH_HIP
#include "autocast.h"
#include "hip/vision_cuda.h"
#endif

std::tuple<at::Tensor, at::Tensor> PSROIPool_forward(
// TODO: put this stuff in torchvision namespace

std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
if (input.is_cuda()) {
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::ps_roi_pool", "")
.typed<decltype(ps_roi_pool)>();
return op.call(input, rois, spatial_scale, pooled_height, pooled_width);
}

#if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIPool_forward_cuda(
input, rois, spatial_scale, pooled_height, pooled_width);
#else
TORCH_CHECK(false, "Not compiled with GPU support");
#endif
}
return PSROIPool_forward_cpu(
input, rois, spatial_scale, pooled_height, pooled_width);
std::tuple<at::Tensor, at::Tensor> PSROIPool_autocast(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast);
auto result = ps_roi_pool(
at::autocast::cached_cast(at::kFloat, input),
at::autocast::cached_cast(at::kFloat, rois),
spatial_scale,
pooled_height,
pooled_width);

return std::make_tuple(
std::get<0>(result).to(input.scalar_type()),
std::get<1>(result).to(input.scalar_type()));
}
#endif

at::Tensor PSROIPool_backward(
at::Tensor _ps_roi_pool_backward(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& mapping_channel,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIPool_backward_cuda(
grad,
rois,
mapping_channel,
spatial_scale,
pooled_height,
pooled_width,
batch_size,
channels,
height,
width);
#else
TORCH_CHECK(false, "Not compiled with GPU support");
#endif
}
return PSROIPool_backward_cpu(
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
static auto op =
c10::Dispatcher::singleton()
.findSchemaOrThrow("torchvision::_ps_roi_pool_backward", "")
.typed<decltype(_ps_roi_pool_backward)>();
return op.call(
grad,
rois,
mapping_channel,
channel_mapping,
spatial_scale,
pooled_height,
pooled_width,
Expand All @@ -72,33 +78,36 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
public:
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
const torch::autograd::Variable& input,
const torch::autograd::Variable& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
ctx->saved_data["spatial_scale"] = spatial_scale;
ctx->saved_data["pooled_height"] = pooled_height;
ctx->saved_data["pooled_width"] = pooled_width;
ctx->saved_data["input_shape"] = input.sizes();
auto result = PSROIPool_forward(
input, rois, spatial_scale, pooled_height, pooled_width);
at::AutoNonVariableTypeMode g;
auto result =
ps_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width);

auto output = std::get<0>(result);
auto channel_mapping = std::get<1>(result);
ctx->save_for_backward({rois, channel_mapping});
ctx->mark_non_differentiable({channel_mapping});

return {output, channel_mapping};
}

static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
const torch::autograd::variable_list& grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
auto channel_mapping = saved[1];
auto input_shape = ctx->saved_data["input_shape"].toIntList();
auto grad_in = PSROIPool_backward(
auto grad_in = _ps_roi_pool_backward(
grad_output[0],
rois,
channel_mapping,
Expand All @@ -109,6 +118,7 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
input_shape[1],
input_shape[2],
input_shape[3]);

return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
Expand All @@ -117,13 +127,77 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
}
};

std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
// TODO: There should be an easier way to do this
class PSROIPoolBackwardFunction
: public torch::autograd::Function<PSROIPoolBackwardFunction> {
public:
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
const torch::autograd::Variable& grad,
const torch::autograd::Variable& rois,
const torch::autograd::Variable& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
at::AutoNonVariableTypeMode g;
auto grad_in = _ps_roi_pool_backward(
grad,
rois,
channel_mapping,
spatial_scale,
pooled_height,
pooled_width,
batch_size,
channels,
height,
width);

return {grad_in};
}

static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
const torch::autograd::variable_list& grad_output) {
TORCH_CHECK(0, "double backwards on ps_roi_pool not supported");
}
};

std::tuple<at::Tensor, at::Tensor> PSROIPool_autograd(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
auto result = PSROIPoolFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width);
return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);

return std::make_tuple(result[0], result[1]);
}

at::Tensor PSROIPool_backward_autograd(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
return PSROIPoolBackwardFunction::apply(
grad,
rois,
channel_mapping,
spatial_scale,
pooled_height,
pooled_width,
batch_size,
channels,
height,
width)[0];
}
48 changes: 24 additions & 24 deletions torchvision/csrc/cpu/PSROIPool_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@ template <typename T>
void PSROIPoolForward(
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
const T* rois,
const int channels_out,
const int num_rois,
int channels_out,
int num_rois,
T* output,
int* channel_mapping) {
for (int n = 0; n < num_rois; ++n) {
Expand Down Expand Up @@ -82,14 +82,14 @@ template <typename T>
void PSROIPoolBackward(
const T* grad_output,
const int* channel_mapping,
const int num_rois,
int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int channels_out,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int channels_out,
T* grad_input,
const T* rois) {
for (int n = 0; n < num_rois; ++n) {
Expand Down Expand Up @@ -146,9 +146,9 @@ void PSROIPoolBackward(
std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cpu(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width) {
// Check if input tensors are CPU tensors
TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor");
TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
Expand Down Expand Up @@ -204,13 +204,13 @@ at::Tensor PSROIPool_backward_cpu(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CPU tensors
TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor");
TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
Expand Down
Loading