@@ -29,9 +29,7 @@ namespace framework {
2929
3030class Tensor {
3131 public:
32- Tensor () : numel_(0 ), offset_(0 ) {}
33-
34- Tensor& operator =(const Tensor& src) = delete ;
32+ Tensor () : offset_(0 ) {}
3533
3634 template <typename T>
3735 const T* data () const {
@@ -48,34 +46,33 @@ class Tensor {
4846 }
4947
5048 template <typename T>
51- T* mutable_data (DDim dims, paddle:: platform::Place place) {
49+ T* mutable_data (DDim dims, platform::Place place) {
5250 set_dims (dims);
5351 return mutable_data<T>(place);
5452 }
5553
5654 template <typename T>
57- T* mutable_data (paddle:: platform::Place place) {
58- PADDLE_ENFORCE (numel_ > 0 ,
59- " Tensor::numel_ must be larger than zero to call "
55+ T* mutable_data (platform::Place place) {
56+ PADDLE_ENFORCE (product (dims_) > 0 ,
57+ " Tensor's numel must be larger than zero to call "
6058 " Tensor::mutable_data. Call Tensor::set_dim first." );
6159 if (holder_ == nullptr ||
6260 !(holder_->place () ==
6361 place) /* some versions of boost::variant don't have operator!= */
64- || holder_->size () < numel_ * sizeof (T) + offset_) {
62+ || holder_->size () < product (dims_) * sizeof (T) + offset_) {
6563 if (platform::is_cpu_place (place)) {
6664 holder_.reset (new PlaceholderImpl<T, platform::CPUPlace>(
67- boost::get<platform::CPUPlace>(place), numel_ * sizeof (T)));
68- }
65+ boost::get<platform::CPUPlace>(place), product (dims_) * sizeof (T)));
66+ } else if ( platform::is_gpu_place (place)) {
6967#ifdef __CUDACC__
70- else if (platform::is_gpu_place (place)) {
7168 holder_.reset (new PlaceholderImpl<T, platform::GPUPlace>(
72- boost::get<platform::GPUPlace>(place), numel_ * sizeof (T)));
73- }
69+ boost::get<platform::GPUPlace>(place), product (dims_) * sizeof (T)));
7470#else
75- else if (platform::is_gpu_place (place)) {
76- PADDLE_ENFORCE (true , " GPU not support!" );
77- }
71+ PADDLE_ENFORCE (true , " 'GPUPlace' is not supported in CPU only device." );
7872#endif
73+ } else {
74+ PADDLE_ENFORCE (true , " Unknown 'place'." );
75+ }
7976 offset_ = 0 ;
8077 }
8178 return reinterpret_cast <T*>(reinterpret_cast <uintptr_t >(holder_->ptr ()) +
@@ -98,7 +95,7 @@ class Tensor {
9895 // flat to rank = 1
9996 template <typename T>
10097 typename TTypes<T>::Flat flat () {
101- return shaped<T, 1 >(make_ddim ({static_cast <int >(numel_ )}));
98+ return shaped<T, 1 >(make_ddim ({static_cast <int >(product (dims_) )}));
10299 }
103100
104101 // to TensorType Vec
@@ -129,7 +126,7 @@ class Tensor {
129126
130127 template <typename T>
131128 typename TTypes<T>::ConstFlat flat () const {
132- return shaped<T, 1 >(make_ddim ({static_cast <int >(numel_ )}));
129+ return shaped<T, 1 >(make_ddim ({static_cast <int >(product (dims_) )}));
133130 }
134131
135132 template <typename T>
@@ -151,12 +148,12 @@ class Tensor {
151148 }
152149
153150 template <typename T>
154- void CopyFrom (const Tensor& src, paddle:: platform::Place dst_place) {
151+ void CopyFrom (const Tensor& src, platform::Place dst_place) {
155152 PADDLE_ENFORCE (platform::is_cpu_place (src.holder_ ->place ()) &&
156153 platform::is_cpu_place (dst_place),
157154 " Tensor::CopyFrom only support CPU now." );
158155 src.CheckDims <T>();
159- size_t size = src.numel_ * sizeof (T);
156+ size_t size = product ( src.dims_ ) * sizeof (T);
160157 set_dims (src.dims ());
161158 const void * src_ptr = static_cast <const void *>(src.data <T>());
162159 void * dst_ptr = static_cast <void *>(mutable_data<T>(dst_place));
@@ -190,7 +187,6 @@ class Tensor {
190187 return ;
191188 }
192189 dims_ = dims;
193- numel_ = product (dims_);
194190 }
195191
196192 DDim dims () const { return dims_; }
@@ -201,7 +197,7 @@ class Tensor {
201197 struct Placeholder {
202198 virtual ~Placeholder () {}
203199 virtual void * ptr () const = 0;
204- virtual paddle:: platform::Place place () const = 0;
200+ virtual platform::Place place () const = 0;
205201 virtual size_t size () const = 0;
206202 };
207203
@@ -212,42 +208,39 @@ class Tensor {
212208 class Deleter {
213209 public:
214210 Deleter (PType place) : place_(place) {}
215- void operator ()(T* ptr) {
216- paddle::memory::Free (place_, static_cast <void *>(ptr));
217- }
211+ void operator ()(T* ptr) { memory::Free (place_, static_cast <void *>(ptr)); }
218212
219213 private:
220214 PType place_;
221215 };
222216
223217 public:
224218 PlaceholderImpl (PlaceType place, size_t size)
225- : ptr_(static_cast <T*>(paddle:: memory::Alloc(place, size)),
219+ : ptr_(static_cast <T*>(memory::Alloc(place, size)),
226220 Deleter<PlaceType>(place)),
227221 place_ (place),
228222 size_(size) {}
229223
230224 virtual void * ptr () const { return static_cast <void *>(ptr_.get ()); }
231225 virtual size_t size () const { return size_; }
232- virtual paddle:: platform::Place place () const { return place_; }
226+ virtual platform::Place place () const { return place_; }
233227
234228 std::unique_ptr<T, Deleter<PlaceType>> ptr_;
235- paddle:: platform::Place place_; // record the place of ptr_.
236- size_t size_; // size of the memory block.
229+ platform::Place place_; // record the place of ptr_.
230+ size_t size_; // size of the memory block.
237231 };
238232
239233 template <typename T>
240234 inline void CheckDims () const {
241235 PADDLE_ENFORCE (holder_ != nullptr ,
242236 " Tenosr holds no memory. Call Tensor::mutable_data first." );
243- PADDLE_ENFORCE (holder_->size () >= numel_ * sizeof (T) + offset_,
237+ PADDLE_ENFORCE (holder_->size () >= product (dims_) * sizeof (T) + offset_,
244238 " Tensor's dims_ is out of bound. Call Tensor::mutable_data "
245239 " first to re-allocate memory." );
246240 }
247241
248242 std::shared_ptr<Placeholder> holder_; // holds the memory block if allocated.
249243 DDim dims_;
250- size_t numel_; // cache of `product(dims_)`
251244 size_t offset_; // marks the begin of tensor data area.
252245};
253246
0 commit comments