Skip to content

[IE VPU] OutShapeOfReshape per-layer tests #631

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion inference-engine/cmake/vpu_dependencies.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
# Default packages
#

set(FIRMWARE_PACKAGE_VERSION 1169)
set(FIRMWARE_PACKAGE_VERSION 1176)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.02.0")

#
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include <ngraph/node.hpp>
#include <ngraph/op/op.hpp>
#include "ngraph/runtime/host_tensor.hpp"

namespace ngraph { namespace vpu { namespace op {

Expand All @@ -28,6 +29,8 @@ class OutShapeOfReshape : public ngraph::op::Op {
bool getSpecialZero() const { return m_specialZero; }
void setSpecialZero(bool special_zero) { m_specialZero = special_zero; }

bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) override;

private:
bool m_specialZero;
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//

#include <vpu/utils/error.hpp>
#include "vpu/ngraph/operations/out_shape_of_reshape.hpp"

namespace ngraph { namespace vpu { namespace op {
Expand Down Expand Up @@ -66,6 +67,196 @@ bool OutShapeOfReshape::visit_attributes(ngraph::AttributeVisitor& visitor) {
return true;
}

namespace {

template<element::Type_t ET>
bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& result) {
using T = typename element_type_traits<ET>::value_type;
T* dataPtr = data->get_data_ptr<ET>();
if (!dataPtr) {
return false;
}
size_t outputRank = data->get_shape()[0];

for (int i = 0; i < outputRank; i++) {
result.push_back(dataPtr[i]);
}

return true;
}

template<element::Type_t ET>
bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) {
using T = typename element_type_traits<ET>::value_type;
T* dataPtr = data->get_data_ptr<ET>();
if (!dataPtr) {
return false;
}
size_t outputRank = data->get_shape()[0];
if (shape.size() != outputRank) {
return false;
}

for (int i = 0; i < outputRank; i++) {
dataPtr[i] = shape[i];
}
return true;
}

bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& shape) {
bool rc = false;
switch (data->get_element_type()) {
case element::Type_t::i8:
rc = getShapeFromHostTensorData<element::Type_t::i8>(data, shape);
break;
case element::Type_t::i16:
rc = getShapeFromHostTensorData<element::Type_t::i16>(data, shape);
break;
case element::Type_t::i32:
rc = getShapeFromHostTensorData<element::Type_t::i32>(data, shape);
break;
case element::Type_t::i64:
rc = getShapeFromHostTensorData<element::Type_t::i64>(data, shape);
break;
case element::Type_t::u8:
rc = getShapeFromHostTensorData<element::Type_t::u8>(data, shape);
break;
case element::Type_t::u16:
rc = getShapeFromHostTensorData<element::Type_t::u16>(data, shape);
break;
case element::Type_t::u32:
rc = getShapeFromHostTensorData<element::Type_t::u32>(data, shape);
break;
case element::Type_t::u64:
rc = getShapeFromHostTensorData<element::Type_t::u64>(data, shape);
break;
default: rc = false;
}
return rc;
}

bool setShapeToHostTensorData(const HostTensorPtr& data, const Shape& shape) {
bool rc = false;
switch (data->get_element_type()) {
case element::Type_t::i8:
rc = setShapeToHostTensorData<element::Type_t::i8>(data, shape);
break;
case element::Type_t::i16:
rc = setShapeToHostTensorData<element::Type_t::i16>(data, shape);
break;
case element::Type_t::i32:
rc = setShapeToHostTensorData<element::Type_t::i32>(data, shape);
break;
case element::Type_t::i64:
rc = setShapeToHostTensorData<element::Type_t::i64>(data, shape);
break;
case element::Type_t::u8:
rc = setShapeToHostTensorData<element::Type_t::u8>(data, shape);
break;
case element::Type_t::u16:
rc = setShapeToHostTensorData<element::Type_t::u16>(data, shape);
break;
case element::Type_t::u32:
rc = setShapeToHostTensorData<element::Type_t::u32>(data, shape);
break;
case element::Type_t::u64:
rc = setShapeToHostTensorData<element::Type_t::u64>(data, shape);
break;
default: rc = false;
}
return rc;
}

bool evaluateOutShapeOfReshape(
const HostTensorPtr& inDataShapeTensor,
const HostTensorPtr& outShapeDescriptorTensor,
bool specialZero,
const HostTensorPtr& outShapeTensor) {
if (!inDataShapeTensor || !outShapeDescriptorTensor || !outShapeTensor) {
return false;
}
Shape inputShape;
Shape outputShape;

if (!getShapeFromHostTensorData(inDataShapeTensor, inputShape)) {
return false;
}
if (!getShapeFromHostTensorData(outShapeDescriptorTensor, outputShape)) {
return false;
}

if (std::any_of(outputShape.begin(), outputShape.end(), [](int64_t value) { return value < -1; })) {
return false;
}

int zeroDimsCount = std::count_if(outputShape.begin(), outputShape.end(),
[](int64_t value) { return value == 0; });
int negativeDimsCount = std::count_if(outputShape.begin(), outputShape.end(),
[](int64_t value) { return value == -1; });
if (negativeDimsCount > 1) {
return false;
}

size_t outputRank = outputShape.size();

if (!(zeroDimsCount && specialZero) && !negativeDimsCount) {
if (shape_size(inputShape) != shape_size(outputShape)) {
return false;
}
} else {
int negativeDimIdx = -1;

size_t inputTotalDimCount = shape_size(inputShape);
size_t outputTotalDimCount = 1;


// compute the output shape
for (size_t i = 0; i < outputRank; i++) {
if (outputShape[i] == 0 && specialZero) {
// Copy input_shape[i] for zero values
if (i > inputShape.size() - 1) {
return false;
}
outputShape[i] = inputShape[i];
outputTotalDimCount *= inputShape[i];
} else if (outputShape[i] == -1) {
negativeDimIdx = i;
} else {
outputTotalDimCount *= outputShape[i];
}
}

if (negativeDimIdx != -1) {
// Infer size such that number of output elements matches
// input elements
if (outputTotalDimCount == 0) {
if (inputTotalDimCount != 0) {
return false;
}
outputShape[negativeDimIdx] = 0;
} else {
if (inputTotalDimCount % outputTotalDimCount != 0) {
return false;
}
outputShape[negativeDimIdx] = inputTotalDimCount / outputTotalDimCount;
}
}
}

if (!setShapeToHostTensorData(outShapeTensor, outputShape)) {
return false;
}

return true;
}

} // namespace

bool OutShapeOfReshape::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) {
return evaluateOutShapeOfReshape(inputs[0], inputs[1], m_specialZero, outputs[0]);
}


} // namespace op
} // namespace vpu
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "vpu/ngraph/operations/out_shape_of_reshape.hpp"

#include "vpu/private_plugin_config.hpp"

#include <functional_test_utils/layer_test_utils.hpp>
#include <functional_test_utils/blob_utils.hpp>
#include <precision_utils.h>
#include <ngraph/opsets/opset3.hpp>

#include <tuple>
#include <vector>
#include <string>
#include <memory>

using InputShape = InferenceEngine::SizeVector;
using ShapeDescriptor = std::vector<int>;

using OutShapeOfReshapeParam = std::tuple<
InputShape, // Input shape
ShapeDescriptor, // out shape descriptor
bool>; // Special zero

using OutShapeOfReshapeTestParam = std::tuple<
OutShapeOfReshapeParam, // Shape params
LayerTestsUtils::TargetDevice>; // Device name


namespace LayerTestsDefinitions {

class OutShapeOfReshapeLayerTest : public testing::WithParamInterface<OutShapeOfReshapeTestParam>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<OutShapeOfReshapeTestParam>& obj) {
OutShapeOfReshapeParam shapesParam;
std::string targetDevice;
std::tie(shapesParam, targetDevice) = obj.param;

const auto& inputShape = std::get<0>(shapesParam);
const auto& outShapeDescriptor = std::get<1>(shapesParam);
const auto& specialZero = std::get<2>(shapesParam);

std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "OSD=" << CommonTestUtils::vec2str(outShapeDescriptor) << "_";
result << "SZ=" << std::to_string(specialZero) << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}

protected:
void SetUp() override {
SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);

OutShapeOfReshapeParam shapesParam;
std::tie(shapesParam, targetDevice) = this->GetParam();
inPrc = InferenceEngine::Precision::I32;
outPrc = InferenceEngine::Precision::I32;

const auto& inputShape = std::get<0>(shapesParam);
const auto& outShapeDescriptor = std::get<1>(shapesParam);
const auto& specialZero = std::get<2>(shapesParam);

auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc);

const auto inputShapeParam = std::make_shared<ngraph::opset3::Parameter>(
ngPrc, ngraph::Shape{inputShape.size()});
const auto outShapeDescriptorConst = std::make_shared<ngraph::opset3::Constant>(
ngPrc, ngraph::Shape{outShapeDescriptor.size()}, outShapeDescriptor);

const auto outShapeOfReshape = std::make_shared<ngraph::vpu::op::OutShapeOfReshape>(
inputShapeParam, outShapeDescriptorConst, specialZero);
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(outShapeOfReshape)};
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{inputShapeParam});
}

InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override {
OutShapeOfReshapeParam shapesParam;
std::string targetDevice;
std::tie(shapesParam, targetDevice) = this->GetParam();
const auto& inputShape = std::get<0>(shapesParam);

InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();

auto dataPtr = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob)->rwmap().as<int32_t*>();
for (size_t i = 0; i < blob->size(); ++i) {
dataPtr[i] = inputShape[i];
}

return blob;
}
};

TEST_P(OutShapeOfReshapeLayerTest, accuracy) {
Run();
}

std::vector<OutShapeOfReshapeParam> shapeParams = {
std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 0, 0, 64, 512 }, true),
std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 3, 2, 64, 512 }, false),
std::make_tuple(InputShape{ 2, 3, 0, 256 }, ShapeDescriptor{ 3, 8, 0, 512 }, false),
std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 2, 3, -1, 64 }, false),
std::make_tuple(InputShape{ 2, 3, 128, 256 }, ShapeDescriptor{ 2, -1, 0 }, true),
std::make_tuple(InputShape{ 2, 5, 5, 24 }, ShapeDescriptor{ 0, -1, 4 }, true),
std::make_tuple(InputShape{ 2, 5, 5, 0 }, ShapeDescriptor{ 0, 4 }, false),
};

INSTANTIATE_TEST_CASE_P(accuracy, OutShapeOfReshapeLayerTest,
::testing::Combine(
::testing::ValuesIn(shapeParams),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
OutShapeOfReshapeLayerTest::getTestCaseName);

} // namespace LayerTestsDefinitions