@@ -235,11 +235,8 @@ Qnn_QuantizeParams_t ToQuantizeParam(const tensor_type& tensor) {
235
235
236
236
flatbuffers::Offset<qcir::Tensor> ToTensor (
237
237
const Qnn_Tensor_t& tensor,
238
+ const uint64_t data_offset,
238
239
flatbuffers::FlatBufferBuilder* builder) {
239
- std::vector<uint8_t > buffer (
240
- static_cast <uint8_t *>(QNN_VER_PTR (tensor)->clientBuf .data ),
241
- static_cast <uint8_t *>(QNN_VER_PTR (tensor)->clientBuf .data ) +
242
- QNN_VER_PTR (tensor)->clientBuf .dataSize );
243
240
std::vector<uint32_t > shape (
244
241
QNN_VER_PTR (tensor)->dimensions ,
245
242
QNN_VER_PTR (tensor)->dimensions + QNN_VER_PTR (tensor)->rank );
@@ -251,10 +248,11 @@ flatbuffers::Offset<qcir::Tensor> ToTensor(
251
248
ToTensorType (QNN_VER_PTR (tensor)->type ),
252
249
ToDataType (QNN_VER_PTR (tensor)->dataType ),
253
250
ToQuantizeParam (tensor, builder),
254
- &buffer);
251
+ QNN_VER_PTR (tensor)->clientBuf .dataSize ,
252
+ data_offset);
255
253
}
256
254
257
- Qnn_Tensor_t ToTensor (const tensor_type& tensor) {
255
+ Qnn_Tensor_t ToTensor (const tensor_type& tensor, const uint8_t * data_ptr ) {
258
256
auto is_io_tensor = [](Qnn_TensorType_t type) {
259
257
return type < QNN_TENSOR_TYPE_STATIC;
260
258
};
@@ -266,10 +264,10 @@ Qnn_Tensor_t ToTensor(const tensor_type& tensor) {
266
264
QNN_VER_PTR (t)->quantizeParams = ToQuantizeParam (tensor);
267
265
QNN_VER_PTR (t)->rank = tensor->shape ()->size ();
268
266
QNN_VER_PTR (t)->dimensions = const_cast <uint32_t *>(tensor->shape ()->data ());
269
- QNN_VER_PTR (t)->clientBuf .dataSize = tensor->data ()-> size ();
267
+ QNN_VER_PTR (t)->clientBuf .dataSize = tensor->size ();
270
268
QNN_VER_PTR (t)->clientBuf .data = is_io_tensor (QNN_VER_PTR (t)->type )
271
269
? nullptr
272
- : static_cast <void *>(const_cast <uint8_t *>(tensor-> data ()-> Data () ));
270
+ : static_cast <void *>(const_cast <uint8_t *>(data_ptr ));
273
271
return t;
274
272
}
275
273
0 commit comments