diff --git a/src/core/include/openvino/core/model.hpp b/src/core/include/openvino/core/model.hpp index 1b300aa5304375..3a17b354364387 100644 --- a/src/core/include/openvino/core/model.hpp +++ b/src/core/include/openvino/core/model.hpp @@ -418,7 +418,7 @@ class OPENVINO_API Model : public std::enable_shared_from_this { */ template void set_rt_info(const T& argument, Args... args) { - ov::Any& arg = get_rt_arg(m_rt_info, args...); + ov::Any& arg = get_rt_arg(m_rt_info, std::move(args)...); arg = argument; } diff --git a/src/core/include/openvino/pass/pattern/op/label.hpp b/src/core/include/openvino/pass/pattern/op/label.hpp index 82fa50a6a5067c..bbfa626abc0a76 100644 --- a/src/core/include/openvino/pass/pattern/op/label.hpp +++ b/src/core/include/openvino/pass/pattern/op/label.hpp @@ -54,13 +54,13 @@ class OPENVINO_API Label : public Pattern { OutputVector()) {} Label(const element::Type& type, const PartialShape& s, ValuePredicate pred) - : Label(type, s, pred, OutputVector{}) {} + : Label(type, s, std::move(pred), OutputVector{}) {} Label(const element::Type& type, const PartialShape& s, NodePredicate pred) - : Label(type, s, as_value_predicate(pred), OutputVector{}) {} + : Label(type, s, as_value_predicate(std::move(pred)), OutputVector{}) {} - Label(const element::Type& type, const PartialShape& s, const NodePredicate pred, const NodeVector& wrapped_values) - : Label(type, s, as_value_predicate(pred), as_output_vector(wrapped_values)) {} + Label(const element::Type& type, const PartialShape& s, NodePredicate pred, const NodeVector& wrapped_values) + : Label(type, s, as_value_predicate(std::move(pred)), as_output_vector(wrapped_values)) {} /// \brief creates a Label node containing a sub-pattern described by the type and /// shape of \sa node. diff --git a/src/core/include/openvino/pass/pattern/op/pattern.hpp b/src/core/include/openvino/pass/pattern/op/pattern.hpp index 9af075f07e140b..ff78b5e93d4dd1 100644 --- a/src/core/include/openvino/pass/pattern/op/pattern.hpp +++ b/src/core/include/openvino/pass/pattern/op/pattern.hpp @@ -85,13 +85,7 @@ class OPENVINO_API Pattern : public Node { public: /// \brief \p a base class for \sa Skip and \sa Label /// - Pattern(const OutputVector& patterns, ValuePredicate pred) : Node(patterns), m_predicate(pred) { - if (!m_predicate) { - m_predicate = [](const Output&) { - return true; - }; - } - } + Pattern(const OutputVector& patterns, ValuePredicate pred); Pattern(const OutputVector& patterns) : Pattern(patterns, nullptr) {} diff --git a/src/core/reference/include/openvino/reference/ctc_loss.hpp b/src/core/reference/include/openvino/reference/ctc_loss.hpp index 3a253ee1745964..26591e9de4f6bb 100644 --- a/src/core/reference/include/openvino/reference/ctc_loss.hpp +++ b/src/core/reference/include/openvino/reference/ctc_loss.hpp @@ -121,12 +121,12 @@ void CTCLoss(const T* logits, for (size_t pos = start; pos < end; pos++) { newLogProb = prevLogProb; for (size_t bl = start; bl < pos; bl++) { - newLogProb += logProbabilities[bl].find(blankIndex)->second; + newLogProb += logProbabilities[bl][blankIndex]; } - newLogProb += logProbabilities[pos].find(targetD[targetIdx])->second; + newLogProb += logProbabilities[pos][targetD[targetIdx]]; if (end == static_cast(actualLogitLen)) { for (size_t ble = pos + 1; ble < static_cast(actualLogitLen); ble++) { - newLogProb += logProbabilities[ble].find(blankIndex)->second; + newLogProb += logProbabilities[ble][blankIndex]; } } findPaths(nextIdx, pos + 1, end + 1, newLogProb); @@ -136,21 +136,21 @@ void CTCLoss(const T* logits, newLogProb = prevLogProb; size_t next_start = pos + 1; for (size_t bl = start; bl < pos; bl++) { - newLogProb += logProbabilities[bl].find(blankIndex)->second; + newLogProb += logProbabilities[bl][blankIndex]; } if (end == static_cast(actualLogitLen)) { for (int64_t ble = pos + 1; ble < actualLogitLen; ble++) { - newLogProb += logProbabilities[ble].find(blankIndex)->second; + newLogProb += logProbabilities[ble][blankIndex]; } } if (targetIdx < decodedTargetLen - 1 && targetD[targetIdx] == targetD[targetIdx + 1]) { - newLogProb += logProbabilities[next_start++].find(blankIndex)->second; + newLogProb += logProbabilities[next_start++][blankIndex]; } for (int64_t bl = pos; bl >= st64; bl--) { - newLogProb += logProbabilities[bl].find(targetD[targetIdx])->second; + newLogProb += logProbabilities[bl][targetD[targetIdx]]; findPaths(nextIdx, next_start, end + 1, newLogProb); if (bl > 0) - newLogProb -= logProbabilities[bl - 1].find(blankIndex)->second; + newLogProb -= logProbabilities[bl - 1][blankIndex]; } } } diff --git a/src/core/reference/include/openvino/reference/detection_output.hpp b/src/core/reference/include/openvino/reference/detection_output.hpp index f0160880e4ef01..fc5f8890465357 100644 --- a/src/core/reference/include/openvino/reference/detection_output.hpp +++ b/src/core/reference/include/openvino/reference/detection_output.hpp @@ -256,7 +256,7 @@ class referenceDetectionOutput { if (attrs.background_label_id > -1 && label == attrs.background_label_id) { continue; } - const std::vector& labelLocPreds = locPreds[i].find(label)->second; + const auto& labelLocPreds = locPreds[i].at(label); DecodeBBoxes(currPrBbox, currPrVar, labelLocPreds, decodeBboxesImage[label]); } } @@ -277,10 +277,10 @@ class referenceDetectionOutput { if (attrs.background_label_id > -1 && label == attrs.background_label_id) { continue; } - const std::vector& labelArmLocPreds = armLocPreds[i].find(label)->second; + const auto& labelArmLocPreds = armLocPreds[i].at(label); std::vector decodePriorBboxes; DecodeBBoxes(currPrBbox, currPrVar, labelArmLocPreds, decodePriorBboxes); - const std::vector& labelLocPreds = locPreds[i].find(label)->second; + const auto& labelLocPreds = locPreds[i].at(label); DecodeBBoxes(decodePriorBboxes, currPrVar, labelLocPreds, decodeBboxesImage[label]); } } @@ -491,14 +491,16 @@ class referenceDetectionOutput { if (c == attrs.background_label_id) { continue; } - if (confScores.find(c) == confScores.end()) + const auto conf_score = confScores.find(c); + if (conf_score == confScores.end()) continue; - const std::vector& scores = confScores.find(c)->second; + const std::vector& scores = conf_score->second; int label = attrs.share_location ? -1 : c; - if (decodeBboxesImage.find(label) == decodeBboxesImage.end()) + const auto decode_bboxes = decodeBboxesImage.find(label); + if (decode_bboxes == decodeBboxesImage.end()) continue; - const std::vector& bboxes = decodeBboxesImage.find(label)->second; + const std::vector& bboxes = decode_bboxes->second; caffeNMS(bboxes, scores, indices[c]); numDet += static_cast(indices[c].size()); } @@ -513,9 +515,10 @@ class referenceDetectionOutput { for (auto it = indices.begin(); it != indices.end(); ++it) { int label = it->first; const std::vector& labelIndices = it->second; - if (confScores.find(label) == confScores.end()) + const auto conf_score = confScores.find(label); + if (conf_score == confScores.end()) continue; - const std::vector& scores = confScores.find(label)->second; + const std::vector& scores = conf_score->second; for (size_t j = 0; j < labelIndices.size(); ++j) { int idx = labelIndices[j]; scoreIndexPairs.push_back(std::make_pair(scores[idx], std::make_pair(label, idx))); @@ -547,11 +550,12 @@ class referenceDetectionOutput { const LabelBBox& decodeBboxesImage = decodeBboxes[i]; for (auto it = allIndices[i].begin(); it != allIndices[i].end(); ++it) { int label = it->first; - const std::vector& scores = confScores.find(label)->second; + const std::vector& scores = confScores.at(label); int loc_label = attrs.share_location ? -1 : label; - if (decodeBboxesImage.find(loc_label) == decodeBboxesImage.end()) + const auto decode_bboxes = decodeBboxesImage.find(loc_label); + if (decode_bboxes == decodeBboxesImage.end()) continue; - const std::vector& bboxes = decodeBboxesImage.find(loc_label)->second; + const std::vector& bboxes = decode_bboxes->second; std::vector& indices = it->second; for (size_t j = 0; j < indices.size(); ++j) { int idx = indices[j]; diff --git a/src/core/src/axis_vector.cpp b/src/core/src/axis_vector.cpp index 753af77926ef4b..39694bf3d3f271 100644 --- a/src/core/src/axis_vector.cpp +++ b/src/core/src/axis_vector.cpp @@ -31,6 +31,6 @@ ov::AxisVector& ov::AxisVector::operator=(const AxisVector& v) { } ov::AxisVector& ov::AxisVector::operator=(AxisVector&& v) noexcept { - static_cast*>(this)->operator=(v); + static_cast*>(this)->operator=(std::move(v)); return *this; } diff --git a/src/core/src/constant_fold_utils.cpp b/src/core/src/constant_fold_utils.cpp index f8da2e3b53e5c6..f8d81bd287631e 100644 --- a/src/core/src/constant_fold_utils.cpp +++ b/src/core/src/constant_fold_utils.cpp @@ -179,16 +179,17 @@ std::shared_ptr ov::util::convert_to_supported_precision(Node* const n type_relaxed->set_origin_input_type(origin_input_types[i], i); } - auto cloned_type_relaxed = std::dynamic_pointer_cast(cloned_node); - // Override TypeRelaxed types - for (size_t i = 0; i < num_inputs; i++) { - if (ov::util::is_type_unsupported(cloned_type_relaxed->get_origin_input_type(i))) { - cloned_type_relaxed->set_origin_input_type(cloned_node->get_input_element_type(i), i); + if (auto cloned_type_relaxed = std::dynamic_pointer_cast(cloned_node)) { + // Override TypeRelaxed types + for (size_t i = 0; i < num_inputs; i++) { + if (ov::util::is_type_unsupported(cloned_type_relaxed->get_origin_input_type(i))) { + cloned_type_relaxed->set_origin_input_type(cloned_node->get_input_element_type(i), i); + } } - } - for (size_t i = 0; i < cloned_node->get_output_size(); i++) { - if (ov::util::is_type_unsupported(cloned_node->get_output_element_type(i))) { - cloned_type_relaxed->set_overridden_output_type(element::f32, i); + for (size_t i = 0; i < cloned_node->get_output_size(); i++) { + if (ov::util::is_type_unsupported(cloned_node->get_output_element_type(i))) { + cloned_type_relaxed->set_overridden_output_type(element::f32, i); + } } } cloned_node->validate_and_infer_types(); diff --git a/src/core/src/graph_util.cpp b/src/core/src/graph_util.cpp index dd46ad5151e9ad..5078f9cf3b073b 100644 --- a/src/core/src/graph_util.cpp +++ b/src/core/src/graph_util.cpp @@ -341,7 +341,7 @@ void save_model(const std::shared_ptr& m, const std::string& ou ov::pass::Manager manager; manager.register_pass(); manager.register_pass(output_model, ""); - manager.run_passes(cloned); + manager.run_passes(std::move(cloned)); } bool is_used(Node* node); diff --git a/src/core/src/opsets/opset.cpp b/src/core/src/opsets/opset.cpp index 590cf12e7c170b..1013a4f353fb80 100644 --- a/src/core/src/opsets/opset.cpp +++ b/src/core/src/opsets/opset.cpp @@ -41,7 +41,7 @@ ov::Node* ov::OpSet::create(const std::string& name) const { return nullptr; } REGISTER_OP(m_name, name); - return m_factory_registry.find(type_info_it->second)->second(); + return m_factory_registry.at(type_info_it->second)(); } ov::Node* ov::OpSet::create_insensitive(const std::string& name) const { @@ -51,7 +51,7 @@ ov::Node* ov::OpSet::create_insensitive(const std::string& name) const { return nullptr; } REGISTER_OP(m_name, name); - return m_factory_registry.find(type_info_it->second)->second(); + return m_factory_registry.at(type_info_it->second)(); } bool ov::OpSet::contains_type(const ov::NodeTypeInfo& type_info) const { diff --git a/src/core/src/pass/graph_rewrite.cpp b/src/core/src/pass/graph_rewrite.cpp index 7c1942a7f25664..4d7de2c7e60a93 100644 --- a/src/core/src/pass/graph_rewrite.cpp +++ b/src/core/src/pass/graph_rewrite.cpp @@ -150,7 +150,7 @@ bool ov::pass::GraphRewrite::apply_matcher_passes(std::shared_ptr f, // Apply MatcherPass. In case if it returns true no other MatcherPasses will apply // to this node - bool status = m_pass->apply(node); + bool status = m_pass->apply(std::move(node)); // In case if MatcherPass registered nodes they will be added to the beginning of execution // queue diff --git a/src/core/src/pass/low_latency.cpp b/src/core/src/pass/low_latency.cpp index 2dd75aa072e4fb..d3ccd79769e59c 100644 --- a/src/core/src/pass/low_latency.cpp +++ b/src/core/src/pass/low_latency.cpp @@ -74,7 +74,7 @@ void unroll_single_iteration(const std::shared_ptr& su } outer_f->add_sinks(sub_graph_op->get_function()->get_sinks()); ov::copy_runtime_info(sub_graph_op, sub_graph_op->get_function()->get_ops()); - ov::copy_runtime_info(sub_graph_op, new_ops); + ov::copy_runtime_info(sub_graph_op, std::move(new_ops)); } ov::Output create_init_subgraph(const ov::Output& in_node, ov::pass::NodeRegistry& to) { diff --git a/src/core/src/pass/make_stateful.cpp b/src/core/src/pass/make_stateful.cpp index 8a39e667e8d19a..4b6c26e59c8a8e 100644 --- a/src/core/src/pass/make_stateful.cpp +++ b/src/core/src/pass/make_stateful.cpp @@ -100,7 +100,7 @@ bool ov::pass::MakeStateful::run_on_model(const std::shared_ptr& f) { const auto& res = m_param_res_pairs[i].second; // Create Variable - std::string var_name = variable_names[i]; + const auto& var_name = variable_names[i]; auto variable = std::make_shared( ov::op::util::VariableInfo{param->get_partial_shape(), param->get_element_type(), var_name}); variables.push_back(variable); diff --git a/src/core/src/pass/sdpa_to_paged_attention.cpp b/src/core/src/pass/sdpa_to_paged_attention.cpp index 0d71c6a4b0d8dc..ace48f53f68a55 100644 --- a/src/core/src/pass/sdpa_to_paged_attention.cpp +++ b/src/core/src/pass/sdpa_to_paged_attention.cpp @@ -139,7 +139,7 @@ bool ov::pass::SDPAToPagedAttention::run_on_model(const std::shared_ptradd_parameters(kv_parameters); model->add_parameters(model_remaining_params); - model->add_parameters({max_context_len}); + model->add_parameters({std::move(max_context_len)}); model->validate_nodes_and_infer_types(); return true; } diff --git a/src/core/src/pass/serialize.cpp b/src/core/src/pass/serialize.cpp index c45d14f3bb59aa..b4ae8f09c98d82 100644 --- a/src/core/src/pass/serialize.cpp +++ b/src/core/src/pass/serialize.cpp @@ -943,7 +943,7 @@ void ngfunction_2_ir(pugi::xml_node& netXml, for (const auto& res : model.get_results()) { result.emplace_back(res); } - sorted_ops = result; + sorted_ops = std::move(result); } for (const auto& n : sorted_ops) { diff --git a/src/core/src/pattern/op/pattern.cpp b/src/core/src/pattern/op/pattern.cpp index e0e0810deb5e59..93187026f5677c 100644 --- a/src/core/src/pattern/op/pattern.cpp +++ b/src/core/src/pattern/op/pattern.cpp @@ -11,19 +11,34 @@ namespace ov { namespace pass { namespace pattern { namespace op { +namespace { +constexpr bool node_value_true_predicate(const Output&) { + return true; +} +} // namespace + +struct NodeValuePredicate { + bool operator()(const Output& value) const { + return pred(value.get_node_shared_ptr()); + } + + NodePredicate pred; +}; + +Pattern::Pattern(const OutputVector& patterns, ValuePredicate pred) + : Node(patterns), + m_predicate(pred ? std::move(pred) : node_value_true_predicate) {} + // The symbols are required to be in cpp file to workaround RTTI issue on Android LLVM ValuePredicate Pattern::get_predicate() const { return m_predicate; } + ValuePredicate as_value_predicate(NodePredicate pred) { - if (pred == nullptr) { - return [](const Output&) { - return true; - }; + if (pred) { + return NodeValuePredicate{std::move(pred)}; } else { - return [pred](const Output& value) { - return pred(value.get_node_shared_ptr()); - }; + return node_value_true_predicate; } } } // namespace op diff --git a/src/core/src/preprocess/preprocess_impls.cpp b/src/core/src/preprocess/preprocess_impls.cpp index a3a3c36291fc22..13a4c6f1353312 100644 --- a/src/core/src/preprocess/preprocess_impls.cpp +++ b/src/core/src/preprocess/preprocess_impls.cpp @@ -69,7 +69,7 @@ InputInfo::InputInfoImpl::InputInfoData InputInfo::InputInfoImpl::create_new_par [&](int64_t v) -> const Dimension& { return new_param_shape[v]; }); - new_param_shape = PartialShape(dims); + new_param_shape = PartialShape(std::move(dims)); } } else { Layout new_layout; @@ -77,7 +77,7 @@ InputInfo::InputInfoImpl::InputInfoData InputInfo::InputInfoImpl::create_new_par get_preprocess()->calculate_param_shape(new_param_shape, res.m_model_layout); if (res.m_tensor_layout.empty()) { // Reusing param's layout according to converted calculated layout - res.m_tensor_layout = new_layout; + res.m_tensor_layout = std::move(new_layout); } } diff --git a/src/core/src/preprocess/preprocess_steps_impl.cpp b/src/core/src/preprocess/preprocess_steps_impl.cpp index 522e545b714509..e494b1112d9fd9 100644 --- a/src/core/src/preprocess/preprocess_steps_impl.cpp +++ b/src/core/src/preprocess/preprocess_steps_impl.cpp @@ -349,7 +349,7 @@ void PreStepsList::add_convert_layout_impl(const Layout& layout) { auto perm_constant = op::v0::Constant::create(element::i64, Shape{permutation.size()}, permutation); auto transpose = std::make_shared(node, perm_constant); - context.layout() = dst_layout; // Update context's current layout + context.layout() = std::move(dst_layout); // Update context's current layout // return false to avoid excess function revalidations as layout conversion // doesn't require shape or type propagation. return std::make_tuple(std::vector>{transpose}, false); @@ -552,7 +552,7 @@ void PreStepsList::add_convert_color_impl(const ColorFormat& dst_format) { ov::Strides(weights_shape.size() - 2, 1)); if (is_converted) { - // Round values according to OpenCV rule before converting to integral values + // Roundp values according to OpenCV rule before converting to integral values auto round_val = std::make_shared(node, ov::op::v5::Round::RoundMode::HALF_TO_EVEN); node = std::make_shared(round_val, elem_type); @@ -565,7 +565,7 @@ void PreStepsList::add_convert_color_impl(const ColorFormat& dst_format) { node = std::make_shared(node, perm_constant); } context.color_format() = dst_format; - return std::make_tuple(std::vector>{node}, true); + return std::make_tuple(std::vector>{std::move(node)}, true); } if (context.color_format() == ColorFormat::RGBX) { if (dst_format == ColorFormat::RGB) { @@ -722,7 +722,7 @@ void PostStepsList::add_convert_layout_impl(const Layout& layout) { auto perm_constant = op::v0::Constant::create(element::i64, Shape{permutation.size()}, permutation); auto transpose = std::make_shared(node, perm_constant); - context.layout() = dst_layout; // Update context's current layout + context.layout() = std::move(dst_layout); // Update context's current layout return std::make_tuple(Output(transpose), true); }, "convert layout " + layout.to_string()); diff --git a/src/core/src/runtime/itensor.cpp b/src/core/src/runtime/itensor.cpp index e7ce33c28dfd19..203297c671d401 100644 --- a/src/core/src/runtime/itensor.cpp +++ b/src/core/src/runtime/itensor.cpp @@ -147,8 +147,8 @@ void ITensor::copy_to(const std::shared_ptr& dst) const { max_pos[inverted_idx] = shape[inverted_idx]; cur_pos[inverted_idx] = 0; } - src_strides = src_str; - dst_strides = dst_str; + src_strides = std::move(src_str); + dst_strides = std::move(dst_str); } const auto update_index = [](const ov::Shape& pos, const ov::Shape& shape, const ov::Strides& strides) { diff --git a/src/core/src/strides.cpp b/src/core/src/strides.cpp index c9e92b18fef74f..e09b99c3cb5e9e 100644 --- a/src/core/src/strides.cpp +++ b/src/core/src/strides.cpp @@ -29,6 +29,6 @@ ov::Strides& ov::Strides::operator=(const Strides& v) { } ov::Strides& ov::Strides::operator=(Strides&& v) noexcept { - static_cast*>(this)->operator=(v); + static_cast*>(this)->operator=(std::move(v)); return *this; } diff --git a/src/core/src/symbol.cpp b/src/core/src/symbol.cpp index a17fc62d5d2045..48c913bfb53881 100644 --- a/src/core/src/symbol.cpp +++ b/src/core/src/symbol.cpp @@ -26,5 +26,5 @@ void ov::symbol::set_equal(const std::shared_ptr& lhs, const std::shared auto lhs_root = ov::symbol::ancestor_of(lhs), rhs_root = ov::symbol::ancestor_of(rhs); if (lhs_root.get() == rhs_root.get()) return; // already are equal - lhs_root->m_parent = rhs_root; + lhs_root->m_parent = std::move(rhs_root); } diff --git a/src/core/src/tensor_util.cpp b/src/core/src/tensor_util.cpp index a0cb1909f26fed..394f342f8af994 100644 --- a/src/core/src/tensor_util.cpp +++ b/src/core/src/tensor_util.cpp @@ -10,10 +10,9 @@ ov::Tensor ov::util::greater_equal(const ov::Tensor& lhs, const ov::Tensor& rhs) { if (!lhs || !rhs) return {}; - Tensor result(element::boolean, {}); - TensorVector outputs = {result}; + TensorVector outputs{{element::boolean, {}}}; if (ov::op::v1::GreaterEqual().evaluate(outputs, {lhs, rhs})) - return outputs[0]; + return std::move(outputs[0]); else return {}; } @@ -25,7 +24,7 @@ bool ov::util::reduce_and(const ov::Tensor& t) { auto outputs = TensorVector{{element::boolean, Shape{}}}; auto axes = Tensor(element::i64, Shape{t.get_shape().size()}); std::iota(axes.data(), axes.data() + t.get_shape().size(), 0); - if (!ov::op::v1::ReduceLogicalAnd().evaluate(outputs, {t, axes})) + if (!ov::op::v1::ReduceLogicalAnd().evaluate(outputs, {t, std::move(axes)})) return false; return outputs[0].data(); } diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index 777f03e82b587e..6410f019f3edbf 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -287,7 +287,7 @@ bool evaluate_as_partial_shape(const ov::Output& output, ov::PartialSh const TensorSymbol& symbols = output.get_tensor().get_value_symbol(); OPENVINO_ASSERT(symbols.empty() || lower_bound.size() == symbols.size()); - std::vector resulting_pshape(lower_bound.size()); + pshape.resize(lower_bound.size()); for (size_t i = 0; i < lower_bound.size(); ++i) { auto low = lower_bound[i], up = upper_bound[i]; OPENVINO_ASSERT(low >= 0 && up >= 0, "Value for partial shape evaluation can't be lower than zero."); @@ -297,11 +297,10 @@ bool evaluate_as_partial_shape(const ov::Output& output, ov::PartialSh if (low == std::numeric_limits::max()) low = std::numeric_limits::max(); } - resulting_pshape[i] = {low, up}; + pshape[i] = {low, up}; if (!symbols.empty()) - resulting_pshape[i].set_symbol(symbols[i]); + pshape[i].set_symbol(symbols[i]); } - pshape = ov::PartialShape(resulting_pshape); shape_defined = true; } return shape_defined; diff --git a/src/core/tests/graph_rewrite.cpp b/src/core/tests/graph_rewrite.cpp index 587fc5bf3d114f..02369738741dcc 100644 --- a/src/core/tests/graph_rewrite.cpp +++ b/src/core/tests/graph_rewrite.cpp @@ -410,7 +410,7 @@ class CheckConsumers : public ov::pass::MatcherPass { * 4. Some GraphRewrite facilities */ auto cnt = consumers(node.get()); - if (node.use_count() != cnt + 7) { + if (node.use_count() != cnt + 6) { OPENVINO_THROW("Wrong number of consumers"); } @@ -440,5 +440,5 @@ TEST(GraphRewriteTest, nodes_use_count) { auto f = get_model(); pass::Manager m; m.register_pass(); - ASSERT_NO_THROW(m.run_passes(f)); + OV_ASSERT_NO_THROW(m.run_passes(f)); }