@@ -488,9 +488,10 @@ Tensor& logsumexp_out(Tensor& result, const Tensor& self, DimnameList dims, bool
488
488
static Tensor& norm_out (Tensor &result, const Tensor &self, optional<Scalar> opt_p,
489
489
IntArrayRef dim, bool keepdim, optional<ScalarType> opt_dtype) {
490
490
auto p = opt_p.value_or (2.0 );
491
- TORCH_CHECK (self.options ().backend () == Backend::CPU || self.options ().backend () == Backend::CUDA,
492
- " norm only supports CPU AND CUDA backend, got: " , toString (self.options ().backend ()));
493
-
491
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
492
+ " norm only supports CPU AND CUDA device type, got: " , self.device ().type ());
493
+ TORCH_CHECK (self.layout () == Layout::Strided,
494
+ " norm only supports strided layout, got: " , self.layout ());
494
495
495
496
ScalarType scalarType = opt_dtype.has_value () ? opt_dtype.value () : self.scalar_type ();
496
497
TORCH_CHECK (
@@ -513,8 +514,10 @@ static inline Tensor _norm(const Tensor &self, Scalar p) {
513
514
if (self.is_sparse ()) {
514
515
return at::native_norm (self, p);
515
516
} else {
516
- TORCH_CHECK (self.options ().backend () == Backend::CPU || self.options ().backend () == Backend::CUDA,
517
- " norm only supports CPU AND CUDA backend, got: " , toString (self.options ().backend ()));
517
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
518
+ " norm only supports CPU AND CUDA device type, got: " , self.device ().type ());
519
+ TORCH_CHECK (self.layout () == Layout::Strided,
520
+ " norm only supports strided layout, got: " , self.layout ());
518
521
TORCH_CHECK (at::isFloatingType (self.scalar_type ()) || at::isComplexType (self.scalar_type ()),
519
522
" norm only supports floating-point dtypes" );
520
523
@@ -565,9 +568,10 @@ inline Tensor & _all(Tensor & result, TensorIterator & iter) {
565
568
}
566
569
567
570
Tensor all (const Tensor& self) {
568
- TORCH_CHECK (self.options ().backend () == Backend::CPU ||
569
- self.options ().backend () == Backend::CUDA, " all only supports CPU AND CUDA "
570
- " backend, got: " , toString (self.options ().backend ()));
571
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
572
+ " all only supports CPU AND CUDA device type, got: " , self.device ().type ());
573
+ TORCH_CHECK (self.layout () == Layout::Strided,
574
+ " all only supports strided layout, got: " , self.layout ());
571
575
TORCH_CHECK (self.scalar_type () == at::ScalarType::Byte || self.scalar_type () == at::ScalarType::Bool,
572
576
" all only supports torch.uint8 and torch.bool dtypes" );
573
577
@@ -583,9 +587,10 @@ Tensor all(const Tensor& self, int64_t dim, bool keepdim) {
583
587
}
584
588
585
589
Tensor &all_out (Tensor &result, const Tensor &self, int64_t dim, bool keepdim) {
586
- TORCH_CHECK (self.options ().backend () == Backend::CPU ||
587
- self.options ().backend () == Backend::CUDA, " all only supports CPU AND CUDA "
588
- " backend, got: " , toString (self.options ().backend ()));
590
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
591
+ " all only supports CPU AND CUDA device type, got: " , self.device ().type ());
592
+ TORCH_CHECK (self.layout () == Layout::Strided,
593
+ " all only supports strided layout, got: " , self.layout ());
589
594
TORCH_CHECK (self.scalar_type () == at::ScalarType::Byte || self.scalar_type () == at::ScalarType::Bool,
590
595
" all only supports torch.uint8 and torch.bool dtypes" );
591
596
dim = maybe_wrap_dim (dim, self.dim ());
@@ -609,11 +614,10 @@ inline Tensor & _any(Tensor & result, TensorIterator & iter) {
609
614
}
610
615
611
616
Tensor any (const Tensor& self) {
612
- TORCH_CHECK (self.options ().backend () == Backend::CPU ||
613
- self.options ().backend () == Backend::CUDA ||
614
- self.options ().backend () == Backend::SparseCPU ||
615
- self.options ().backend () == Backend::SparseCUDA, " any only supports CPU, CUDA, "
616
- " SparseCPU and SparseCUDA backend, got: " , toString (self.options ().backend ()));
617
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
618
+ " any only supports CPU AND CUDA device type, got: " , self.device ().type ());
619
+ TORCH_CHECK (self.layout () == Layout::Strided || self.layout () == Layout::Sparse,
620
+ " any only supports strided AND sparse layout, got: " , self.layout ());
617
621
TORCH_CHECK (self.scalar_type () == at::ScalarType::Byte || self.scalar_type () == at::ScalarType::Bool,
618
622
" all only supports torch.uint8 and torch.bool dtypes" );
619
623
@@ -629,9 +633,10 @@ Tensor any(const Tensor& self, int64_t dim, bool keepdim) {
629
633
}
630
634
631
635
Tensor &any_out (Tensor &result, const Tensor &self, int64_t dim, bool keepdim) {
632
- TORCH_CHECK (self.options ().backend () == Backend::CPU ||
633
- self.options ().backend () == Backend::CUDA, " any only supports CPU AND CUDA "
634
- " backend, got: " , toString (self.options ().backend ()));
636
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
637
+ " any only supports CPU AND CUDA device type, got: " , self.device ().type ());
638
+ TORCH_CHECK (self.layout () == Layout::Strided,
639
+ " any only supports strided layout, got: " , self.layout ());
635
640
TORCH_CHECK (self.scalar_type () == at::ScalarType::Byte || self.scalar_type () == at::ScalarType::Bool,
636
641
" all only supports torch.uint8 and torch.bool dtypes" );
637
642
dim = maybe_wrap_dim (dim, self.dim ());
@@ -730,8 +735,10 @@ Tensor argmin(const Tensor& self, c10::optional<int64_t> dim, bool keepdims) {
730
735
}
731
736
732
737
static Tensor &std_var_out (Tensor &result, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, bool take_sqrt) {
733
- TORCH_CHECK (self.options ().backend () == Backend::CPU || self.options ().backend () == Backend::CUDA,
734
- " std and var only support CPU AND CUDA backend, got: " , toString (self.options ().backend ()));
738
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
739
+ " std and var only supports CPU AND CUDA device type, got: " , self.device ().type ());
740
+ TORCH_CHECK (self.layout () == Layout::Strided,
741
+ " std and var only supports strided layout, got: " , self.layout ());
735
742
TORCH_CHECK (at::isFloatingType (self.scalar_type ()) || at::isComplexType (self.scalar_type ()),
736
743
" std and var only support floating-point dtypes" );
737
744
@@ -769,8 +776,12 @@ static Tensor &std_var_out(Tensor &result, const Tensor &self, IntArrayRef dim,
769
776
770
777
static std::tuple<Tensor&,Tensor&> std_var_mean_out (const char * fname, Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, bool take_sqrt) {
771
778
AT_ASSERT (result1.defined () && result2.defined ());
772
- TORCH_CHECK (self.options ().backend () == Backend::CPU || self.options ().backend () == Backend::CUDA, fname, " only support CPU AND CUDA backend, got: " , toString (self.options ().backend ()));
773
- TORCH_CHECK (at::isFloatingType (self.scalar_type ()) || at::isComplexType (self.scalar_type ()), fname, " only support floating-point dtypes" );
779
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
780
+ fname, " only supports CPU AND CUDA device type, got: " , self.device ().type ());
781
+ TORCH_CHECK (self.layout () == Layout::Strided,
782
+ fname, " only supports strided layout, got: " , self.layout ());
783
+ TORCH_CHECK (at::isFloatingType (self.scalar_type ()) || at::isComplexType (self.scalar_type ()),
784
+ fname, " only support floating-point dtypes" );
774
785
TORCH_CHECK (result1.scalar_type () == result2.scalar_type (),
775
786
" provided by result1 dtype must match dtype of result2. Got " ,
776
787
toString (result1.scalar_type ()),
@@ -856,8 +867,10 @@ std::tuple<Tensor,Tensor> var_mean(const Tensor& self, bool unbiased) {
856
867
}
857
868
858
869
Tensor var (const Tensor& self, bool unbiased) {
859
- TORCH_CHECK (self.options ().backend () == Backend::CPU || self.options ().backend () == Backend::CUDA,
860
- " var only supports CPU AND CUDA backend, got: " , toString (self.options ().backend ()));
870
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
871
+ " var only supports CPU AND CUDA device type, got: " , self.device ().type ());
872
+ TORCH_CHECK (self.layout () == Layout::Strided,
873
+ " var only supports strided layout, got: " , self.layout ());
861
874
TORCH_CHECK (at::isFloatingType (self.scalar_type ()) || at::isComplexType (self.scalar_type ()),
862
875
" var only supports floating-point dtypes" );
863
876
auto trivial_return = _allreduce_return_trivial (self, std::numeric_limits<double >::quiet_NaN ());
@@ -874,8 +887,10 @@ Tensor &var_out(Tensor &result, const Tensor &self, IntArrayRef dim, bool unbias
874
887
}
875
888
876
889
Tensor std (const Tensor& self, bool unbiased) {
877
- TORCH_CHECK (self.options ().backend () == Backend::CPU || self.options ().backend () == Backend::CUDA,
878
- " std only supports CPU AND CUDA backend, got: " , toString (self.options ().backend ()));
890
+ TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
891
+ " std only supports CPU AND CUDA device type, got: " , self.device ().type ());
892
+ TORCH_CHECK (self.layout () == Layout::Strided,
893
+ " std only supports strided layout, got: " , self.layout ());
879
894
TORCH_CHECK (at::isFloatingType (self.scalar_type ()) || at::isComplexType (self.scalar_type ()),
880
895
" std only supports floating-point dtypes" );
881
896
auto trivial_return = _allreduce_return_trivial (self, std::numeric_limits<double >::quiet_NaN ());
0 commit comments