@@ -305,6 +305,8 @@ std::tuple<at::Tensor, at::Tensor> DistanceBackwardCuda(
305305 at::CheckedFrom c = " DistanceBackwardCuda" ;
306306 at::checkAllSameGPU (c, {objects_t , targets_t , idx_objects_t , grad_dists_t });
307307 at::checkAllSameType (c, {objects_t , targets_t , grad_dists_t });
308+ // This is nondeterministic because atomicAdd
309+ at::globalContext ().alertNotDeterministic (" DistanceBackwardCuda" );
308310
309311 // Set the device for the kernel launch based on the device of the input
310312 at::cuda::CUDAGuard device_guard (objects.device ());
@@ -624,6 +626,9 @@ std::tuple<at::Tensor, at::Tensor> PointFaceArrayDistanceBackwardCuda(
624626 at::CheckedFrom c = " PointFaceArrayDistanceBackwardCuda" ;
625627 at::checkAllSameGPU (c, {points_t , tris_t , grad_dists_t });
626628 at::checkAllSameType (c, {points_t , tris_t , grad_dists_t });
629+ // This is nondeterministic because atomicAdd
630+ at::globalContext ().alertNotDeterministic (
631+ " PointFaceArrayDistanceBackwardCuda" );
627632
628633 // Set the device for the kernel launch based on the device of the input
629634 at::cuda::CUDAGuard device_guard (points.device ());
@@ -787,6 +792,9 @@ std::tuple<at::Tensor, at::Tensor> PointEdgeArrayDistanceBackwardCuda(
787792 at::CheckedFrom c = " PointEdgeArrayDistanceBackwardCuda" ;
788793 at::checkAllSameGPU (c, {points_t , segms_t , grad_dists_t });
789794 at::checkAllSameType (c, {points_t , segms_t , grad_dists_t });
795+ // This is nondeterministic because atomicAdd
796+ at::globalContext ().alertNotDeterministic (
797+ " PointEdgeArrayDistanceBackwardCuda" );
790798
791799 // Set the device for the kernel launch based on the device of the input
792800 at::cuda::CUDAGuard device_guard (points.device ());
0 commit comments