Skip to content
Merged
Show file tree
Hide file tree
Changes from 113 commits
Commits
Show all changes
119 commits
Select commit Hold shift + click to select a range
9bb19d9
add conv test
tiger100256-hu Jul 4, 2023
b8e8758
save code
tiger100256-hu Jul 12, 2023
4dc2838
save code
tiger100256-hu Jul 17, 2023
36a3ecc
fix fc and matmul test
tiger100256-hu Jul 20, 2023
5788f25
save code
tiger100256-hu Jul 26, 2023
d27ee6a
fix rebase
tiger100256-hu Aug 18, 2023
e225975
save code
tiger100256-hu Aug 29, 2023
2730276
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Aug 29, 2023
dfb062c
use ov::test::utils
tiger100256-hu Aug 29, 2023
9fc36ea
add fp16 config
tiger100256-hu Aug 29, 2023
2459054
add pooling test
tiger100256-hu Aug 29, 2023
b35147b
add softmax test
tiger100256-hu Aug 29, 2023
a618739
use fp16 instead f16 on function name
tiger100256-hu Aug 31, 2023
25c10bb
fix compile issue
tiger100256-hu Aug 31, 2023
b094142
fix test issue on avx512 machine
tiger100256-hu Sep 1, 2023
531aee6
save code
tiger100256-hu Sep 1, 2023
f57dc09
fix issue
tiger100256-hu Sep 4, 2023
15d1157
fix test code issue
tiger100256-hu Sep 6, 2023
7f46088
save code
tiger100256-hu Sep 7, 2023
9bf95da
fix test issue
tiger100256-hu Sep 11, 2023
66ca130
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Oct 8, 2023
99c55c1
fix merge master issue
tiger100256-hu Oct 8, 2023
116e26f
remove debug log
tiger100256-hu Oct 8, 2023
51e02d0
enable disabled pooling test case
tiger100256-hu Oct 8, 2023
94d6029
fix mistaken when merge master
tiger100256-hu Oct 9, 2023
4455e75
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Oct 9, 2023
52c616e
fix test case issue on amx fp16
tiger100256-hu Oct 10, 2023
1d5ecba
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Oct 11, 2023
b7cb9ca
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Oct 11, 2023
9a4c4e2
fix RISC-v build issue
tiger100256-hu Oct 11, 2023
f15cc36
also disable nightly test
tiger100256-hu Oct 12, 2023
caffe86
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Oct 27, 2023
5dce730
fix conflict with master issue
tiger100256-hu Oct 30, 2023
74d76b2
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Oct 31, 2023
14baefb
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Nov 10, 2023
aa73fe7
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Nov 16, 2023
4ebc41c
fix the skip names
tiger100256-hu Nov 16, 2023
6c82efa
fix compile issue
tiger100256-hu Nov 16, 2023
400afbc
fix reviews
tiger100256-hu Nov 22, 2023
e2f75dc
add skip for can't passed
tiger100256-hu Nov 23, 2023
3cfded2
fix selete type issue
tiger100256-hu Nov 24, 2023
bd2956d
fix the compile issue on RISCV64
tiger100256-hu Nov 24, 2023
7b317b5
remove comment code
tiger100256-hu Nov 27, 2023
c615bce
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Nov 27, 2023
ea67ae4
fix compilation issue
tiger100256-hu Nov 27, 2023
9020e88
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Nov 29, 2023
d51bdc8
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Nov 30, 2023
019d620
use ov::element::f16 instead "f16"
tiger100256-hu Nov 30, 2023
f52655f
fix test case failed issue
tiger100256-hu Dec 1, 2023
519881a
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Dec 4, 2023
75b61ff
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Dec 7, 2023
c1764f2
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Dec 11, 2023
a10c71e
use ov::element::f16
tiger100256-hu Dec 13, 2023
9e5a698
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Dec 20, 2023
8559343
docs/_static/images/ci/check_results.png: convert to Git LFS
tiger100256-hu Dec 28, 2023
667c4fa
docs/_static/images/ci/completed_job_list.png: convert to Git LFS
tiger100256-hu Dec 28, 2023
42cf2e0
docs/_static/images/ci/pipeline_artefacts.png: convert to Git LFS
tiger100256-hu Dec 28, 2023
3f8a981
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Dec 28, 2023
a85142a
fix softmax issue
tiger100256-hu Jan 5, 2024
7bb9383
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Jan 9, 2024
a520225
disable SubgraphWithBlockedFormat.smoke_CompareWithRefs_FP16
tiger100256-hu Jan 11, 2024
82b57cc
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Jan 16, 2024
5f25904
fix threshold
tiger100256-hu Jan 22, 2024
06d0ef5
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Jan 22, 2024
fcf6d7c
move function to intel cpu test binrary
tiger100256-hu Jan 26, 2024
3326b53
remove i4 nf4 test case
tiger100256-hu Jan 26, 2024
89c2a5b
only add fp16 test where bf16 test exists in subgraph dir
tiger100256-hu Jan 30, 2024
3d48da6
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Feb 1, 2024
4b22d16
fix compile issue
tiger100256-hu Feb 1, 2024
6121b1b
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Feb 1, 2024
7e4edee
fix conflict with master
tiger100256-hu Feb 1, 2024
e247d18
fix a failed test issue and compile issue on Arm
tiger100256-hu Feb 1, 2024
be56fb6
remove extra line in ov_subgraph.cpp
tiger100256-hu Feb 2, 2024
7a57406
check if add f16 instance where exists bf16 instance in SL
tiger100256-hu Feb 2, 2024
852a7fd
remove redundancy code and fix mistanken
tiger100256-hu Feb 4, 2024
281d66d
revert f16 test case in MatmulWeightsDecompression
tiger100256-hu Feb 4, 2024
b12699d
use cvs-131632 to track failed test case on gnr
tiger100256-hu Feb 4, 2024
8e6b9d1
fix build issue on RISCV
tiger100256-hu Feb 5, 2024
f5bea42
remove debug comment in CMakeList.txt
tiger100256-hu Feb 6, 2024
144e2d6
fix comments
tiger100256-hu Mar 6, 2024
80ba6a1
fix some review issue
tiger100256-hu Mar 13, 2024
84c69dc
fix code in matmul test
tiger100256-hu Mar 13, 2024
274cc67
fix testcase failed issue
tiger100256-hu Mar 14, 2024
b22135a
remove RISCV64/filter_cpu_info.cpp
tiger100256-hu Mar 14, 2024
92eaa7e
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Mar 14, 2024
75bdd4a
limit the test case of RISCV64
tiger100256-hu Mar 15, 2024
b0ec214
fix comment in subgraph test
tiger100256-hu Mar 15, 2024
216db9c
refactor the CMakeList.txt
tiger100256-hu Mar 19, 2024
f2ee522
fix testcase failed issue
tiger100256-hu Mar 28, 2024
5f18742
fix comment in subgraph test
tiger100256-hu Mar 28, 2024
5477aeb
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Apr 8, 2024
53332e1
try to fix riscv build issue
tiger100256-hu Apr 8, 2024
d1ae5e0
skip some test case in smoke_Conv_Sum_Broadcast_FP16
tiger100256-hu Apr 8, 2024
43340cc
fix logic error in filter function
tiger100256-hu Apr 8, 2024
32e190d
add skip on failed test
tiger100256-hu Apr 9, 2024
97990a3
fix comment
tiger100256-hu Apr 23, 2024
c3bd325
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Apr 25, 2024
99dfef5
remove the Optimized FP16 configure test in softmax
tiger100256-hu Apr 25, 2024
a45d625
make filterCPUInfoforDeviceFp16 common so no need to modify relate co…
tiger100256-hu May 6, 2024
7a1dc9d
will filter the fp16 test case in skip_tests_config.cpp
tiger100256-hu May 8, 2024
dda70b4
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu May 8, 2024
416b265
fix logic error in filterCpuInfoForDeviceWithFP16
tiger100256-hu May 8, 2024
5856353
fix testcase failed in matmul_decompress
tiger100256-hu May 10, 2024
c13b554
fix comment
tiger100256-hu May 30, 2024
aa21695
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu May 31, 2024
44172ff
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Jun 3, 2024
21650df
update logic of get_default_imp_precision_type
tiger100256-hu Jun 3, 2024
ec33e99
remove some skip
tiger100256-hu Jun 3, 2024
8580c05
remove dedicated class ConvSumInPlaceTest_FP16
tiger100256-hu Jun 6, 2024
3e1b210
fix comment
tiger100256-hu Jun 12, 2024
71c65fb
add CVS-143852 for disabled test case
tiger100256-hu Jun 12, 2024
aa406be
fix reorder num issue
tiger100256-hu Jun 12, 2024
561f7bd
remove skip test case on GNR, the test cases already pass
tiger100256-hu Jun 13, 2024
e8fcc91
fix comment
tiger100256-hu Jun 20, 2024
3f79081
use CamelCase instead of s_n_a_k_e in function
tiger100256-hu Jun 20, 2024
204e301
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Jun 20, 2024
5eb10de
fix format issue
tiger100256-hu Jun 21, 2024
8df8874
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Jun 28, 2024
e0d246a
Merge branch 'master' into huyuan/fp16_test
tiger100256-hu Jun 30, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/inference/dev_api/openvino/runtime/system_conf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,13 @@ OPENVINO_RUNTIME_API bool with_cpu_x86_avx512_core_amx_int8();
*/
OPENVINO_RUNTIME_API bool with_cpu_x86_avx512_core_amx_bf16();

/**
* @brief Checks whether CPU supports AMX fp16 capability
* @ingroup ov_dev_api_system_conf
* @return `True` is tAMX_FP16 instructions are available, `false` otherwise
*/
OPENVINO_RUNTIME_API bool with_cpu_x86_avx512_core_amx_fp16();

/**
* @brief Checks whether CPU supports AMX capability
* @ingroup ov_dev_api_system_conf
Expand Down
7 changes: 7 additions & 0 deletions src/inference/src/system_conf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,10 @@ bool with_cpu_x86_avx512_core_amx_bf16() {
return get_cpu_info().has(Xbyak::util::Cpu::tAMX_BF16);
}

bool with_cpu_x86_avx512_core_amx_fp16() {
return get_cpu_info().has(Xbyak::util::Cpu::tAMX_FP16);
}

bool with_cpu_x86_avx512_core_amx() {
return with_cpu_x86_avx512_core_amx_int8() || with_cpu_x86_avx512_core_amx_bf16();
}
Expand Down Expand Up @@ -131,6 +135,9 @@ bool with_cpu_x86_avx512_core_amx_int8() {
bool with_cpu_x86_avx512_core_amx_bf16() {
return false;
}
bool with_cpu_x86_avx512_core_amx_fp16() {
return false;
}
bool with_cpu_x86_avx512_core_amx() {
return false;
}
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1831,7 +1831,7 @@ void Graph::EnforceInferencePrecision() {

for (size_t i = 0; i < node->getOriginalInputsNumber(); i++) {
auto keepOriginalInputPrecisionAtPort = [](const NodePtr& node, const size_t inPort) {
// keep non-float precisions
// keep non-float32 precisions
if (node->getOriginalInputPrecisionAtPort(inPort) != ov::element::f32)
return true;

Expand Down Expand Up @@ -1875,7 +1875,7 @@ void Graph::EnforceInferencePrecision() {
}

for (size_t i = 0; i < node->getOriginalOutputsNumber(); i++) {
// keep non-float precisions
// keep non-float32 precisions
if (node->getOriginalOutputPrecisionAtPort(i) != ov::element::f32)
continue;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,11 +160,16 @@ void ConvolutionLayerCPUTest::SetUp() {
init_input_shapes({inputShape});

auto it = configuration.find(ov::hint::inference_precision.name());
if (it != configuration.end() && it->second.as<ov::element::Type>() == ov::element::bf16) {
ov::element::Type inference_precision = (it != configuration.end()) ?
it->second.as<ov::element::Type>() : ov::element::undefined;
if (inference_precision == ov::element::bf16) {
selectedType += "_BF16";
rel_threshold = 1e-2f;
if (selectedType == "jit_gemm_BF16")
rel_threshold = 0.05f;
} else if (inference_precision == ov::element::f16) {
selectedType += "_FP16";
rel_threshold = 0.00125f;
} else {
selectedType = makeSelectedTypeStr(selectedType, netType);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,9 +196,15 @@ void DeconvolutionLayerCPUTest::SetUp() {

std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet;

if (additionalConfig[ov::hint::inference_precision.name()] == ov::element::bf16) {
auto it = configuration.find(ov::hint::inference_precision.name());
ov::element::Type inference_precision = (it != configuration.end()) ?
it->second.as<ov::element::Type>() : ov::element::undefined;
if (inference_precision == ov::element::bf16) {
inType = outType = prec = ElementType::bf16;
rel_threshold = 1e-2f;
} else if (inference_precision == ov::element::f16) {
inType = outType = prec = ElementType::f16;
rel_threshold = 0.00125f;
} else {
inType = outType = prec;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,17 +118,22 @@ void MatMulLayerCPUTest::SetUp() {
configuration.insert(additionalConfig.begin(), additionalConfig.end());

auto it = additionalConfig.find(ov::hint::inference_precision.name());
if (it != additionalConfig.end() && it->second.as<ov::element::Type>() == ov::element::bf16) {
ov::element::Type inference_precision = (it != additionalConfig.end()) ?
it->second.as<ov::element::Type>() : ov::element::undefined;
if (inference_precision == ov::element::bf16) {
inType = outType = netType = ElementType::bf16;
rel_threshold = abs_threshold = 1e-2f;
} else if (inference_precision == ov::element::f16) {
inType = outType = netType = ElementType::f16;
rel_threshold = abs_threshold = 1e-4f;
} else {
inType = outType = netType;
rel_threshold = 1e-4f;
abs_threshold = 5e-4f;
}

cpuNodeType = nodeType == MatMulNodeType::MatMul ? "MatMul" : "FullyConnected";
selectedType = makeSelectedTypeStr(selectedType, outType);
selectedType = makeSelectedTypeStr(selectedType, get_default_imp_precision_type(outType, configuration));

ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netType, inShapeA)};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ std::string PoolingLayerCPUTest::getTestCaseName(const testing::TestParamInfo<po
bool isInt8;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = obj.param;
ov::AnyMap additionalConfig;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams, additionalConfig) = obj.param;

utils::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
Expand Down Expand Up @@ -53,6 +54,12 @@ std::string PoolingLayerCPUTest::getTestCaseName(const testing::TestParamInfo<po
results << "Rounding=" << roundingType << "_";
results << "AutoPad=" << padType << "_";
results << "INT8=" << isInt8 << "_";
if (!additionalConfig.empty()) {
results << "_PluginConf";
for (auto& item : additionalConfig) {
results << "_" << item.first << "=" << item.second.as<std::string>();
}
}

results << CPUTestsBase::getTestCaseName(cpuParams);
results << CpuTestWithFusing::getTestCaseName(fusingParams);
Expand All @@ -68,7 +75,9 @@ void PoolingLayerCPUTest::SetUp() {
bool isInt8;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = this->GetParam();
ov::AnyMap additionalConfig;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams, additionalConfig) = this->GetParam();
configuration.insert(additionalConfig.begin(), additionalConfig.end());

utils::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
Expand All @@ -87,7 +96,7 @@ void PoolingLayerCPUTest::SetUp() {
if (isInt8)
selectedType = selectedType + "_I8";
else
selectedType = makeSelectedTypeStr(selectedType, inPrc);
selectedType = makeSelectedTypeStr(selectedType, get_default_imp_precision_type(inPrc, configuration));

init_input_shapes({inputShapes});

Expand Down Expand Up @@ -119,7 +128,8 @@ std::string MaxPoolingV8LayerCPUTest::getTestCaseName(
InputShape inputShapes;
ElementType inPrc;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = obj.param;
ov::AnyMap additionalConfig;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams, additionalConfig) = obj.param;

std::vector<size_t> kernel, stride, dilation;
std::vector<size_t> padBegin, padEnd;
Expand All @@ -146,6 +156,12 @@ std::string MaxPoolingV8LayerCPUTest::getTestCaseName(
results << "PE" << ov::test::utils::vec2str(padEnd) << "_";
results << "Rounding=" << roundingType << "_";
results << "AutoPad=" << padType << "_";
if (!additionalConfig.empty()) {
results << "_PluginConf";
for (auto& item : additionalConfig) {
results << "_" << item.first << "=" << item.second.as<std::string>();
}
}

results << CPUTestsBase::getTestCaseName(cpuParams);
return results.str();
Expand All @@ -158,7 +174,9 @@ void MaxPoolingV8LayerCPUTest::SetUp() {
InputShape inputShapes;
ElementType inPrc;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = this->GetParam();
ov::AnyMap additionalConfig;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams, additionalConfig) = this->GetParam();
configuration.insert(additionalConfig.begin(), additionalConfig.end());

std::vector<size_t> kernel, stride, dilation;
std::vector<size_t> padBegin, padEnd;
Expand All @@ -172,7 +190,7 @@ void MaxPoolingV8LayerCPUTest::SetUp() {
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
selectedType = makeSelectedTypeStr(selectedType, inPrc);
selectedType = makeSelectedTypeStr(selectedType, get_default_imp_precision_type(inPrc, configuration));

init_input_shapes({inputShapes});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,15 @@ using poolLayerCpuTestParamsSet = std::tuple<poolSpecificParams,
ElementType, //inPrc
bool, // isInt8
CPUSpecificParams,
fusingSpecificParams>;
fusingSpecificParams,
ov::AnyMap>;


using maxPoolV8LayerCpuTestParamsSet = std::tuple<maxPoolV8SpecificParams,
InputShape,
ElementType,
CPUSpecificParams>;
CPUSpecificParams,
ov::AnyMap>;

class PoolingLayerCPUTest : public testing::WithParamInterface<poolLayerCpuTestParamsSet>,
virtual public SubgraphBaseTest, public CpuTestWithFusing {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ std::string SoftMaxLayerCPUTest::getTestCaseName(const testing::TestParamInfo<so
ElementType inType;
SoftMaxConfig config;
std::string targetDevice;
std::tie(inType, config, targetDevice, cpuParams) = obj.param;
ov::AnyMap additionalConfig;
std::tie(inType, config, targetDevice, cpuParams, additionalConfig) = obj.param;

std::ostringstream result;
result << "netPRC=" << inType << "_";
Expand All @@ -30,6 +31,12 @@ std::string SoftMaxLayerCPUTest::getTestCaseName(const testing::TestParamInfo<so
result << "axis=" << config.axis << "_";
result << "trgDev=" << targetDevice;
result << CPUTestsBase::getTestCaseName(cpuParams);
if (!additionalConfig.empty()) {
result << "_PluginConf";
for (auto& item : additionalConfig) {
result << "_" << item.first << "=" << item.second.as<std::string>();
}
}

return result.str();
}
Expand All @@ -38,7 +45,9 @@ void SoftMaxLayerCPUTest::SetUp() {
ElementType inType;
SoftMaxConfig config;
CPUSpecificParams cpuParams;
std::tie(inType, config, targetDevice, cpuParams) = this->GetParam();
ov::AnyMap additionalConfig;
std::tie(inType, config, targetDevice, cpuParams, additionalConfig) = this->GetParam();
configuration.insert(additionalConfig.begin(), additionalConfig.end());

std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
if (selectedType.empty()) {
Expand All @@ -47,8 +56,10 @@ void SoftMaxLayerCPUTest::SetUp() {

if (inType == ElementType::bf16) {
rel_threshold = 2e-2f;
} else if (inType == ElementType::f16) {
rel_threshold = 0.0025f;
}
selectedType = makeSelectedTypeStr(selectedType, inType);
selectedType = makeSelectedTypeStr(selectedType, get_default_imp_precision_type(inType, configuration));
init_input_shapes({config.inputShape});
ov::ParameterVector params;
for (auto&& shape : inputDynamicShapes)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ struct SoftMaxConfig {
typedef std::tuple<ElementType, // netPrecision
SoftMaxConfig, // softmaxTestConfig
std::string, // targetDevice
CPUSpecificParams>
CPUSpecificParams,
ov::AnyMap> //device_config
softmaxCPUTestParams;

class SoftMaxLayerCPUTest : public testing::WithParamInterface<softmaxCPUTestParams>,
Expand Down
Loading