Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -802,6 +802,78 @@ std::set<std::vector<element::Type>> jit_hswish_emitter::get_supported_precision
return {{element::f32}};
}

/// IS_FINITE ///
jit_is_finite_emitter::jit_is_finite_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const std::shared_ptr<ov::Node>& node)
: jit_emitter(host, host_isa, node, get_arithmetic_binary_exec_precision(node)) {
auto isNaN = ov::as_type_ptr<ov::op::v10::IsNaN>(node);
if (isNaN == nullptr) {
OV_CPU_JIT_EMITTER_THROW("Can't cast to ov::op::v10::IsNaN");
}

prepare_table();
}

jit_is_finite_emitter::jit_is_finite_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const ov::element::Type exec_prc)
: jit_emitter(host, host_isa, exec_prc) {
prepare_table();
}

size_t jit_is_finite_emitter::get_inputs_count() const { return 1; }

size_t jit_is_finite_emitter::get_aux_vecs_count() const { return 2; }

size_t jit_is_finite_emitter::get_aux_gprs_count() const { return 1; }

std::set<std::vector<element::Type>> jit_is_finite_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}};
}

void jit_is_finite_emitter::emit_impl(const std::vector<size_t>& in_vec_idxs, const std::vector<size_t>& out_vec_idxs) const {
if (host_isa_ == dnnl::impl::cpu::aarch64::asimd) {
emit_isa<dnnl::impl::cpu::aarch64::asimd>(in_vec_idxs, out_vec_idxs);
} else {
OV_CPU_JIT_EMITTER_THROW("Can't create jit eltwise kernel");
}
}

template <dnnl::impl::cpu::aarch64::cpu_isa_t isa>
void jit_is_finite_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const {
OV_CPU_JIT_EMITTER_ASSERT(exec_prc_ == ov::element::f32, "unsupported precision: " + exec_prc_.to_string());

using TReg = typename dnnl::impl::cpu::aarch64::cpu_isa_traits<isa>::TReg;

TReg src = TReg(in_vec_idxs[0]);
TReg dst = TReg(out_vec_idxs[0]);
TReg aux0 = TReg(aux_vec_idxs[0]);
TReg aux1 = TReg(aux_vec_idxs[1]);

// According to the IEEE standard, NaN values have the odd property that comparisons involving them are always false.
h->fcmeq(aux0.s, src.s, src.s);
h->not_(aux0.b16, aux0.b16);

h->fabs(src.s, src.s);
h->ld1r(aux1.s, table_val2("inf"));
h->fcmeq(src.s, src.s, aux1.s);

h->orr(dst.b16, aux0.b16, src.b16);

h->not_(dst.b16, dst.b16);

h->ld1r(aux0.s, table_val2("one"));
h->and_(dst.b16, dst.b16, aux0.b16);
}

void jit_is_finite_emitter::register_table_entries() {
// Registers constant values that comply with the IEEE 754 standard.
push_arg_entry_of("one", 0x3f800000, true);
push_arg_entry_of("zero", 0x00000000, true);
push_arg_entry_of("inf", 0x7F800000, true);
}

/// IS_INF ///

jit_is_inf_emitter::jit_is_inf_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,33 @@ class jit_hswish_emitter : public jit_emitter {
void emit_isa(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const;
};

class jit_is_finite_emitter : public jit_emitter {
public:
jit_is_finite_emitter(dnnl::impl::cpu::aarch64::jit_generator *host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const ov::element::Type exec_prc = ov::element::f32);

jit_is_finite_emitter(dnnl::impl::cpu::aarch64::jit_generator *host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const std::shared_ptr<ov::Node>& node);

size_t get_inputs_count() const override;

size_t get_aux_vecs_count() const override;

size_t get_aux_gprs_count() const override;

static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);

private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;

template <dnnl::impl::cpu::aarch64::cpu_isa_t isa>
void emit_isa(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const;

void register_table_entries() override;
};

class jit_is_nan_emitter : public jit_emitter {
public:
jit_is_nan_emitter(dnnl::impl::cpu::aarch64::jit_generator *host,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ bool JitEltwiseExecutor::isSupported(
Algorithm::EltwiseGeluErf,
Algorithm::EltwiseGeluTanh,
Algorithm::EltwiseHswish,
Algorithm::EltwiseIsFinite,
Algorithm::EltwiseIsInf,
Algorithm::EltwiseIsNaN,
Algorithm::EltwiseMaximum,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -649,6 +649,7 @@ std::shared_ptr<jit_emitter> jit_uni_eltwise_generic<isa>::create_eltwise_emitte
OV_CASE(Algorithm::EltwiseExp, ov::intel_cpu::aarch64::jit_exp_emitter),
OV_CASE(Algorithm::EltwiseFloor, ov::intel_cpu::aarch64::jit_floor_emitter),
OV_CASE(Algorithm::EltwiseHswish, ov::intel_cpu::aarch64::jit_hswish_emitter),
OV_CASE(Algorithm::EltwiseIsFinite, ov::intel_cpu::aarch64::jit_is_finite_emitter),
OV_CASE(Algorithm::EltwiseIsInf, ov::intel_cpu::aarch64::jit_is_inf_emitter),
OV_CASE(Algorithm::EltwiseIsNaN, ov::intel_cpu::aarch64::jit_is_nan_emitter),
OV_CASE(Algorithm::EltwiseMaximum, ov::intel_cpu::aarch64::jit_maximum_emitter),
Expand Down Expand Up @@ -823,6 +824,7 @@ std::set<std::vector<element::Type>> eltwise_precision_helper::get_supported_pre
OV_CASE(Algorithm::EltwiseGeluErf, jit_gelu_erf_emitter),
OV_CASE(Algorithm::EltwiseGeluTanh, jit_gelu_tanh_emitter),
OV_CASE(Algorithm::EltwiseHswish, jit_hswish_emitter),
OV_CASE(Algorithm::EltwiseIsFinite, jit_is_finite_emitter),
OV_CASE(Algorithm::EltwiseIsInf, jit_is_inf_emitter),
OV_CASE(Algorithm::EltwiseIsNaN, jit_is_nan_emitter),
OV_CASE(Algorithm::EltwiseMaximum, jit_maximum_emitter),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,12 @@ void ActivationLayerCPUTest::generate_inputs(const std::vector<ov::Shape>& targe
// cover Sign NAN test case
if ((activationType == utils::ActivationTypes::Sign) && funcInput.get_element_type() == ov::element::f32) {
static_cast<float*>(tensor.data())[0] = std::numeric_limits<float>::quiet_NaN();
} else if ((activationType == utils::ActivationTypes::IsFinite) && funcInput.get_element_type() == ov::element::f32 && tensor.get_size() >= 5) {
static_cast<float*>(tensor.data())[0] = std::numeric_limits<float>::quiet_NaN(); // nan
static_cast<float*>(tensor.data())[1] = std::numeric_limits<float>::signaling_NaN(); // nan
static_cast<float*>(tensor.data())[2] = std::sqrt(-1); // -nan
static_cast<float*>(tensor.data())[3] = std::numeric_limits<float>::infinity(); // infinite
static_cast<float*>(tensor.data())[4] = -std::numeric_limits<float>::infinity(); // -infinite
}
} else {
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]);
Expand Down Expand Up @@ -175,6 +181,7 @@ std::string ActivationLayerCPUTest::getPrimitiveType(const utils::ActivationType
(activation_type == utils::ActivationTypes::HSwish) ||
(activation_type == utils::ActivationTypes::IsInf) ||
(activation_type == utils::ActivationTypes::HardSigmoid) ||
(activation_type == utils::ActivationTypes::IsFinite) ||
(activation_type == utils::ActivationTypes::IsNaN) ||
(activation_type == utils::ActivationTypes::Mish) ||
(activation_type == utils::ActivationTypes::GeluErf) ||
Expand All @@ -192,7 +199,8 @@ std::string ActivationLayerCPUTest::getPrimitiveType(const utils::ActivationType
}
#endif
if ((activation_type == utils::ActivationTypes::Floor) ||
(activation_type == utils::ActivationTypes::IsNaN)) {
(activation_type == utils::ActivationTypes::IsNaN) ||
(activation_type == utils::ActivationTypes::IsFinite)) {
return "ref";
}
return "acl";
Expand Down Expand Up @@ -229,6 +237,7 @@ const std::map<utils::ActivationTypes, std::vector<std::vector<float>>>& activat
{GeluTanh, {{}}},
{SoftSign, {{}}},
{SoftPlus, {{}}},
{IsFinite, {{}}},
{IsNaN, {{}}},
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes
{ActivationTypes::GeluErf, {}},
{ActivationTypes::GeluTanh, {}},
{ActivationTypes::Swish, {{0.4f}}},
{ActivationTypes::IsFinite, {}},
{ActivationTypes::IsInf, {}},
{ActivationTypes::IsNaN, {{}}},
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ static std::map<ActivationTypes, std::string> activationNames = {
{ActivationTypes::GeluTanh, "GeluTanh"},
{ActivationTypes::SoftSign, "SoftSign"},
{ActivationTypes::IsInf, "IsInf"},
{ActivationTypes::IsFinite, "IsFinite"},
{ActivationTypes::IsNaN, "IsNaN"},
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ enum ActivationTypes {
GeluErf,
GeluTanh,
SoftSign,
IsFinite,
IsInf,
IsNaN,
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "openvino/op/hard_sigmoid.hpp"
#include "openvino/op/hsigmoid.hpp"
#include "openvino/op/hswish.hpp"
#include "openvino/op/is_finite.hpp"
#include "openvino/op/is_inf.hpp"
#include "openvino/op/is_nan.hpp"
#include "openvino/op/log.hpp"
Expand Down Expand Up @@ -146,6 +147,8 @@ std::shared_ptr<ov::Node> make_activation(const ov::Output<Node>& in,
return std::make_shared<ov::op::v7::Gelu>(in, ov::op::GeluApproximationMode::TANH);
case ov::test::utils::ActivationTypes::SoftSign:
return std::make_shared<ov::op::v9::SoftSign>(in);
case ov::test::utils::ActivationTypes::IsFinite:
return std::make_shared<ov::op::v10::IsFinite>(in);
case ov::test::utils::ActivationTypes::IsInf:
return std::make_shared<ov::op::v10::IsInf>(in);
case ov::test::utils::ActivationTypes::IsNaN:
Expand Down