From 0d41d0ff37418b857aeb7d6617a831462704e74a Mon Sep 17 00:00:00 2001 From: Maksim Doronin Date: Tue, 26 May 2020 14:12:29 +0300 Subject: [PATCH 1/3] [IE VPU] OutShapeOfReshape per-layer tests --- .../operations/out_shape_of_reshape.hpp | 3 + .../operations/out_shape_of_reshape.cpp | 197 ++++++++++++++++++ .../out_shape_of_reshape.cpp | 120 +++++++++++ 3 files changed, 320 insertions(+) create mode 100644 inference-engine/tests/functional/plugin/myriad/single_layer_tests/out_shape_of_reshape.cpp diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp index 6bb56c16f023be..89cc9f0fd55810 100644 --- a/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp +++ b/inference-engine/src/vpu/common/include/vpu/ngraph/operations/out_shape_of_reshape.hpp @@ -6,6 +6,7 @@ #include #include +#include "ngraph/runtime/host_tensor.hpp" namespace ngraph { namespace vpu { namespace op { @@ -28,6 +29,8 @@ class OutShapeOfReshape : public ngraph::op::Op { bool getSpecialZero() const { return m_specialZero; } void setSpecialZero(bool special_zero) { m_specialZero = special_zero; } + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) override; + private: bool m_specialZero; }; diff --git a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp index 4688e7c18522ed..07439cf5c738b3 100644 --- a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include #include "vpu/ngraph/operations/out_shape_of_reshape.hpp" namespace ngraph { namespace vpu { namespace op { @@ -66,6 +67,202 @@ bool OutShapeOfReshape::visit_attributes(ngraph::AttributeVisitor& visitor) { return true; } +namespace { + +template +bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& result) { + using T = typename element_type_traits::value_type; + T* dataPtr = data->get_data_ptr(); + if (!dataPtr) { + return false; + } + size_t outputRank = data->get_shape()[0]; + + for (int i = 0; i < outputRank; i++) { + result.push_back(dataPtr[i]); + } + + return true; +} + +template +bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) { + using T = typename element_type_traits::value_type; + T* dataPtr = data->get_data_ptr(); + if (!dataPtr) { + return false; + } + size_t outputRank = data->get_shape()[0]; + + for (int i = 0; i < outputRank; i++) { + dataPtr[i] = shape[i]; + } + return true; +} + +bool evaluateOutShapeOfReshape( + const HostTensorPtr& inDataShapeTensor, + const HostTensorPtr& outShapeDescriptorTensor, + bool specialZero, + const HostTensorPtr& outShapeTensor) { + if (!inDataShapeTensor || !outShapeDescriptorTensor || !outShapeTensor) { + return false; + } + Shape inputShape; + Shape outputShape; + + switch (inDataShapeTensor->get_element_type()) { + case element::Type_t::i8: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + case element::Type_t::i16: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + case element::Type_t::i32: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + case element::Type_t::i64: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + case element::Type_t::u8: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + case element::Type_t::u16: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + case element::Type_t::u32: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + case element::Type_t::u64: + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + break; + default: return false; + } + + switch (outShapeDescriptorTensor->get_element_type()) { + case element::Type_t::i8: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + case element::Type_t::i16: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + case element::Type_t::i32: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + case element::Type_t::i64: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + case element::Type_t::u8: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + case element::Type_t::u16: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + case element::Type_t::u32: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + case element::Type_t::u64: + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + break; + default: return false; + } + + if (std::any_of(outputShape.begin(), outputShape.end(), [](int64_t value) { return value < -1; })) { + return false; + } + + int zeroDimsCount = std::count_if(outputShape.begin(), outputShape.end(), + [](int64_t value) { return value == 0; }); + int negativeDimsCount = std::count_if(outputShape.begin(), outputShape.end(), + [](int64_t value) { return value == -1; }); + if (negativeDimsCount > 1) { + return false; + } + + size_t outputRank = outputShape.size(); + + if (!(zeroDimsCount && specialZero) && !negativeDimsCount) { + if (shape_size(inputShape) != shape_size(outputShape)) { + return false; + } + } else { + int negativeDimIdx = -1; + + size_t inputTotalDimCount = shape_size(inputShape); + size_t outputTotalDimCount = 1; + + + // compute the output shape + for (size_t i = 0; i < outputRank; i++) { + if (outputShape[i] == 0 && specialZero) { + // Copy input_shape[i] for zero values + if (i > inputShape.size() - 1) { + return false; + } + outputShape[i] = inputShape[i]; + outputTotalDimCount *= inputShape[i]; + } else if (outputShape[i] == -1) { + negativeDimIdx = i; + } else { + outputTotalDimCount *= outputShape[i]; + } + } + + if (negativeDimIdx != -1) { + // Infer size such that number of output elements matches + // input elements + if (outputTotalDimCount == 0) { + if (inputTotalDimCount != 0) { + return false; + } + outputShape[negativeDimIdx] = 0; + } else { + if (inputTotalDimCount % outputTotalDimCount != 0) { + return false; + } + outputShape[negativeDimIdx] = inputTotalDimCount / outputTotalDimCount; + } + } + } + + switch (outShapeTensor->get_element_type()) { + case element::Type_t::i8: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + case element::Type_t::i16: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + case element::Type_t::i32: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + case element::Type_t::i64: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + case element::Type_t::u8: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + case element::Type_t::u16: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + case element::Type_t::u32: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + case element::Type_t::u64: + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; + break; + default: return false; + } + + return true; +} + +} // namespace + +bool OutShapeOfReshape::evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) { + return evaluateOutShapeOfReshape(inputs[0], inputs[1], m_specialZero, outputs[0]); +} + } // namespace op } // namespace vpu diff --git a/inference-engine/tests/functional/plugin/myriad/single_layer_tests/out_shape_of_reshape.cpp b/inference-engine/tests/functional/plugin/myriad/single_layer_tests/out_shape_of_reshape.cpp new file mode 100644 index 00000000000000..30d45ab25d64c8 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/single_layer_tests/out_shape_of_reshape.cpp @@ -0,0 +1,120 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "vpu/ngraph/operations/out_shape_of_reshape.hpp" + +#include "vpu/private_plugin_config.hpp" + +#include +#include +#include +#include + +#include +#include +#include +#include + +using InputShape = InferenceEngine::SizeVector; +using ShapeDescriptor = std::vector; + +using OutShapeOfReshapeParam = std::tuple< + InputShape, // Input shape + ShapeDescriptor, // out shape descriptor + bool>; // Special zero + +using OutShapeOfReshapeTestParam = std::tuple< + OutShapeOfReshapeParam, // Shape params + LayerTestsUtils::TargetDevice>; // Device name + + +namespace LayerTestsDefinitions { + +class OutShapeOfReshapeLayerTest : public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + OutShapeOfReshapeParam shapesParam; + std::string targetDevice; + std::tie(shapesParam, targetDevice) = obj.param; + + const auto& inputShape = std::get<0>(shapesParam); + const auto& outShapeDescriptor = std::get<1>(shapesParam); + const auto& specialZero = std::get<2>(shapesParam); + + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "OSD=" << CommonTestUtils::vec2str(outShapeDescriptor) << "_"; + result << "SZ=" << std::to_string(specialZero) << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); + } + +protected: + void SetUp() override { + SetRefMode(LayerTestsUtils::RefMode::INTERPRETER); + configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO); + configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES); + + OutShapeOfReshapeParam shapesParam; + std::tie(shapesParam, targetDevice) = this->GetParam(); + inPrc = InferenceEngine::Precision::I32; + outPrc = InferenceEngine::Precision::I32; + + const auto& inputShape = std::get<0>(shapesParam); + const auto& outShapeDescriptor = std::get<1>(shapesParam); + const auto& specialZero = std::get<2>(shapesParam); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); + + const auto inputShapeParam = std::make_shared( + ngPrc, ngraph::Shape{inputShape.size()}); + const auto outShapeDescriptorConst = std::make_shared( + ngPrc, ngraph::Shape{outShapeDescriptor.size()}, outShapeDescriptor); + + const auto outShapeOfReshape = std::make_shared( + inputShapeParam, outShapeDescriptorConst, specialZero); + ngraph::ResultVector results{std::make_shared(outShapeOfReshape)}; + function = std::make_shared(results, ngraph::ParameterVector{inputShapeParam}); + } + + InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override { + OutShapeOfReshapeParam shapesParam; + std::string targetDevice; + std::tie(shapesParam, targetDevice) = this->GetParam(); + const auto& inputShape = std::get<0>(shapesParam); + + InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); + blob->allocate(); + + auto dataPtr = InferenceEngine::as(blob)->rwmap().as(); + for (size_t i = 0; i < blob->size(); ++i) { + dataPtr[i] = inputShape[i]; + } + + return blob; + } +}; + +TEST_P(OutShapeOfReshapeLayerTest, accuracy) { + Run(); +} + +std::vector shapeParams = { + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 0, 0, 64, 512 }, true), + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 3, 2, 64, 512 }, false), + std::make_tuple(InputShape{ 2, 3, 0, 256 }, ShapeDescriptor{ 3, 8, 0, 512 }, false), + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 2, 3, -1, 64 }, false), + std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 2, -1, 0 }, true), + std::make_tuple(InputShape{ 2, 5, 5, 24 }, ShapeDescriptor{ 0, -1, 4 }, true), + std::make_tuple(InputShape{ 2, 5, 5, 0 }, ShapeDescriptor{ 0, 4 }, false), +}; + +INSTANTIATE_TEST_CASE_P(accuracy, OutShapeOfReshapeLayerTest, + ::testing::Combine( + ::testing::ValuesIn(shapeParams), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)), + OutShapeOfReshapeLayerTest::getTestCaseName); + +} // namespace LayerTestsDefinitions From bdbb5c6477d174730a64758d82bee6157605f068 Mon Sep 17 00:00:00 2001 From: Maksim Doronin Date: Thu, 28 May 2020 11:48:05 +0300 Subject: [PATCH 2/3] [IE VPU] Update firmware --- inference-engine/cmake/vpu_dependencies.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference-engine/cmake/vpu_dependencies.cmake b/inference-engine/cmake/vpu_dependencies.cmake index 6db2057e8fd513..10744c60942f18 100644 --- a/inference-engine/cmake/vpu_dependencies.cmake +++ b/inference-engine/cmake/vpu_dependencies.cmake @@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x) # Default packages # -set(FIRMWARE_PACKAGE_VERSION 1169) +set(FIRMWARE_PACKAGE_VERSION 1176) set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.02.0") # From 5d7c32cf5fb049c6b84f2c184a41a44abb0b50f3 Mon Sep 17 00:00:00 2001 From: Maksim Doronin Date: Thu, 28 May 2020 12:15:47 +0300 Subject: [PATCH 3/3] [IE VPU] OutShapeOfReshape: get rid of code duplication --- .../operations/out_shape_of_reshape.cpp | 108 +++++++++--------- 1 file changed, 51 insertions(+), 57 deletions(-) diff --git a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp index 07439cf5c738b3..14a7c27903000b 100644 --- a/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/operations/out_shape_of_reshape.cpp @@ -93,6 +93,9 @@ bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) { return false; } size_t outputRank = data->get_shape()[0]; + if (shape.size() != outputRank) { + return false; + } for (int i = 0; i < outputRank; i++) { dataPtr[i] = shape[i]; @@ -100,71 +103,86 @@ bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) { return true; } -bool evaluateOutShapeOfReshape( - const HostTensorPtr& inDataShapeTensor, - const HostTensorPtr& outShapeDescriptorTensor, - bool specialZero, - const HostTensorPtr& outShapeTensor) { - if (!inDataShapeTensor || !outShapeDescriptorTensor || !outShapeTensor) { - return false; - } - Shape inputShape; - Shape outputShape; - - switch (inDataShapeTensor->get_element_type()) { +bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& shape) { + bool rc = false; + switch (data->get_element_type()) { case element::Type_t::i8: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; case element::Type_t::i16: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; case element::Type_t::i32: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; case element::Type_t::i64: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; case element::Type_t::u8: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; case element::Type_t::u16: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; case element::Type_t::u32: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; case element::Type_t::u64: - if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) return false; + rc = getShapeFromHostTensorData(data, shape); break; - default: return false; + default: rc = false; } + return rc; +} - switch (outShapeDescriptorTensor->get_element_type()) { +bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) { + bool rc = false; + switch (data->get_element_type()) { case element::Type_t::i8: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; case element::Type_t::i16: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; case element::Type_t::i32: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; case element::Type_t::i64: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; case element::Type_t::u8: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; case element::Type_t::u16: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; case element::Type_t::u32: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; case element::Type_t::u64: - if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) return false; + rc = setShapeToHostTensorData(data, shape); break; - default: return false; + default: rc = false; + } + return rc; +} + +bool evaluateOutShapeOfReshape( + const HostTensorPtr& inDataShapeTensor, + const HostTensorPtr& outShapeDescriptorTensor, + bool specialZero, + const HostTensorPtr& outShapeTensor) { + if (!inDataShapeTensor || !outShapeDescriptorTensor || !outShapeTensor) { + return false; + } + Shape inputShape; + Shape outputShape; + + if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) { + return false; + } + if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) { + return false; } if (std::any_of(outputShape.begin(), outputShape.end(), [](int64_t value) { return value < -1; })) { @@ -225,32 +243,8 @@ bool evaluateOutShapeOfReshape( } } - switch (outShapeTensor->get_element_type()) { - case element::Type_t::i8: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - case element::Type_t::i16: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - case element::Type_t::i32: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - case element::Type_t::i64: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - case element::Type_t::u8: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - case element::Type_t::u16: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - case element::Type_t::u32: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - case element::Type_t::u64: - if (!setShapeToHostTensorData(outShapeTensor, outputShape)) return false; - break; - default: return false; + if (!setShapeToHostTensorData(outShapeTensor, outputShape)) { + return false; } return true;