From 4ed8bd6c3e8143f1b74bc0ce8489b0ce6e217c0d Mon Sep 17 00:00:00 2001 From: dijopaul <87994875+dijopaul@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:20:03 +0530 Subject: [PATCH 1/9] Main backup (#12) * Add nnlib as submodule * Adding nnlib submodule * Integrated nnlib API unde backends/cadence/hifi * Fix review comments on PR#3 * Add nnlib as submodule * Adding nnlib submodule * Integrated nnlib API unde backends/cadence/hifi * Fix review comments on PR#3 * Incorporated feedback from Meta team. * lint errors fixed * Adding Sub operator optimized version * Add optimization for add, mul operators * Adding Div operator * Modified div mod to cover truncate and floor modes --------- Co-authored-by: cad-audio <86048415+cad-audio@users.noreply.github.com> Co-authored-by: cad-audio --- backends/cadence/hifi/kernels/CMakeLists.txt | 4 + backends/cadence/hifi/kernels/kernels.h | 40 ++ .../cadence/hifi/operators/CMakeLists.txt | 26 +- backends/cadence/hifi/operators/op_add.cpp | 123 ++++ backends/cadence/hifi/operators/op_div.cpp | 370 ++++++++++ backends/cadence/hifi/operators/op_mul.cpp | 216 ++++++ backends/cadence/hifi/operators/op_sub.cpp | 257 +++++++ .../nnlib/xa_nn_elm_add_f32_broadcast.c | 428 ++++++++++++ .../nnlib/xa_nn_elm_div_f32_broadcast.c | 419 ++++++++++++ .../nnlib/xa_nn_elm_div_mode_f32_broadcast.c | 644 ++++++++++++++++++ .../nnlib/xa_nn_elm_mul_f32_broadcast.c | 360 ++++++++++ 11 files changed, 2874 insertions(+), 13 deletions(-) create mode 100644 backends/cadence/hifi/operators/op_add.cpp create mode 100644 backends/cadence/hifi/operators/op_div.cpp create mode 100644 backends/cadence/hifi/operators/op_mul.cpp create mode 100644 backends/cadence/hifi/operators/op_sub.cpp create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c create mode 100644 backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 15d1a4ddd52..0ff3d1fde60 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -9,6 +9,10 @@ add_library( cadence_kernels kernels.cpp ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/matmul_asym8uxasym8u_asym8u.cpp + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c ) target_include_directories( diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index b5659824615..8faf06713b7 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -12,6 +12,46 @@ #include #include +/* For NNLIB APIs */ +#include "xa_nnlib_kernels_api.h" + +/* Potential NNLIB function/APIs */ +extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_div_mode_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + WORD32 mode); + +extern "C" WORD32 xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( + FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape, + WORD32 mode); + +extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + namespace impl { namespace HiFi { namespace kernels { diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 8da6169cda1..d56d19fc37f 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -20,32 +20,32 @@ endif() # ATen compliant ops that are needed to run this model. set(_aten_ops__srcs - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/activation_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/copy_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/broadcast_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/index_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/kernel_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/matmul_ops_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/reduce_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/repeat_util.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_add.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_div.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_full.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_mul.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sigmoid.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_slice_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_softmax.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sub.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_view_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp" -) + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/activation_ops_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/broadcast_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/copy_ops_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/index_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/kernel_ops_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/matmul_ops_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/reduce_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/repeat_util.cpp" + ) add_library(aten_ops_cadence ${_aten_ops__srcs}) target_link_libraries(aten_ops_cadence PUBLIC executorch) target_link_libraries(aten_ops_cadence PRIVATE cadence_kernels) diff --git a/backends/cadence/hifi/operators/op_add.cpp b/backends/cadence/hifi/operators/op_add.cpp new file mode 100644 index 00000000000..15ba5c250e7 --- /dev/null +++ b/backends/cadence/hifi/operators/op_add.cpp @@ -0,0 +1,123 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include "kernels.h" + +namespace torch { +namespace executor { +namespace native { + +#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ + +Tensor& add_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + const Scalar& alpha, + Tensor& out) { + (void)ctx; + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_CHECK_MSG(a_type == ScalarType::Float, "Input tensor not a float.\n"); + ET_CHECK_MSG(b_type == ScalarType::Float, "Input tensor not a float.\n"); + ET_CHECK_MSG(out_type == ScalarType::Float, "Output tensor not a float.\n"); + + ET_CHECK(canCast(common_type, out_type)); + + using CTYPE_A = float; + using CTYPE_B = float; + using CTYPE_IN = float; + using CTYPE_OUT = float; + CTYPE_IN alpha_val; + ET_EXTRACT_SCALAR(alpha, alpha_val); + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + int fall_back = 0; + /*find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if( (out_type != ScalarType::Float) || (alpha_val != 1.0)) + fall_back = 1; + + if( (a_dim == 0) || (b_dim == 0) ) + fall_back = 1; + + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + + + if (!fall_back) + { + const float* const a_data = a.const_data_ptr(); + const float* const b_data = b.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if(broadcast == 1) + { + int out_shape[NNLIB_MAX_DIM]; + int inp1_shape[NNLIB_MAX_DIM]; + int inp2_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + + int off_o = NNLIB_MAX_DIM - out.dim(); + int off_a = NNLIB_MAX_DIM - a.dim(); + int off_b = NNLIB_MAX_DIM - b.dim(); + + for(int i = 0; i < out.dim(); i++) + out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + + xa_nn_elm_add_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, + b_data, inp2_shape); + } + else + xa_nn_elm_add_f32xf32_f32(out_data, a_data, b_data, out.numel()); + + } + else + { + apply_binary_elementwise_fn( + [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted + alpha_val * b_casted; + + return static_cast(value); + }, + a, + b, + out); + } + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp new file mode 100644 index 00000000000..dc6a22ea4de --- /dev/null +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -0,0 +1,370 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kernels.h" + +namespace torch { +namespace executor { +namespace native { + +#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ + +namespace { + +ScalarType get_compute_type(ScalarType a_type, ScalarType b_type) { + if (isFloatingType(a_type) && isFloatingType(b_type)) { + return promoteTypes(a_type, b_type); + } else if (isFloatingType(a_type)) { + return a_type; + } else if (isFloatingType(b_type)) { + return b_type; + } + return ScalarType::Float; +} + +} // namespace + +Tensor& +div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + + ET_KERNEL_CHECK( + ctx, + !isComplexType(a_type) && !isQIntType(a_type) && !isBitsType(a_type), + InvalidArgument, + out); + ET_KERNEL_CHECK( + ctx, + !isComplexType(b_type) && !isQIntType(b_type) && !isBitsType(b_type), + InvalidArgument, + out); + + ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + int fall_back = 0; + /*find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + fall_back = 1; + + if( (a_dim == 0) || (b_dim == 0) ) + fall_back = 1; + + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + + if(!fall_back) + { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + + if(broadcast == 1) + { + + int out_shape[NNLIB_MAX_DIM]; + int inp1_shape[NNLIB_MAX_DIM]; + int inp2_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + + int off_o = NNLIB_MAX_DIM - out.dim(); + int off_a = NNLIB_MAX_DIM - a.dim(); + int off_b = NNLIB_MAX_DIM - b.dim(); + for(int i = 0; i < out.dim(); i++) + out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + + xa_nn_elm_div_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } + else + { + + xa_nn_elm_div_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } + } + else + { + ScalarType common_type = get_compute_type(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { + ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + }); + } + + return out; +} + +Tensor& div_out_mode( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + exec_aten::optional mode, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = get_compute_type(a_type, b_type); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); + + // Allow casting float -> integral here + // non-bool -> bool is still disallowed + ET_KERNEL_CHECK( + ctx, + !(common_type != ScalarType::Bool && out_type == ScalarType::Bool), + InvalidArgument, + out); + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + int fall_back = 0; + /*find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + fall_back = 1; + + if( (a_dim == 0) || (b_dim == 0) ) + fall_back = 1; + + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + int mode_val = -1; + if (mode.has_value() && mode.value() == "trunc") + mode_val = 0; + else if (mode.has_value() && mode.value() == "floor") + mode_val = 1; + else + fall_back = 1; + + if(!fall_back) + { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + + if(broadcast) + { + int out_shape[NNLIB_MAX_DIM]; + int inp1_shape[NNLIB_MAX_DIM]; + int inp2_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; + } + + int off_o = NNLIB_MAX_DIM - out.dim(); + int off_a = NNLIB_MAX_DIM - a.dim(); + int off_b = NNLIB_MAX_DIM - b.dim(); + + for(int i = 0; i < out.dim(); i++) + out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + + xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape, mode_val); + } + else + { + xa_nn_elm_div_mode_f32xf32_f32(out_data, a_data, b_data, out.numel(), mode_val); + } + } + else + { + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out_mode", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { + apply_binary_elementwise_fn( + [mode](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = std::floor(value); + } + return static_cast(value); + }, + a, + b, + out); + }); + }); + }); + }); + } + + return out; +} + +Tensor& div_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType common_type = isFloatingType(a_type) ? a_type : ScalarType::Float; + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.Scalar_out", CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "div.Scalar_out", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.Scalar_out", CTYPE, [&]() { + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE b_casted = static_cast(b_val); + + apply_unary_map_fn( + [b_casted](const CTYPE_A val_a) { + CTYPE a_casted = static_cast(val_a); + CTYPE value = a_casted / b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + + return out; +} + +Tensor& div_scalar_mode_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + exec_aten::optional mode, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType common_type = utils::promote_type_with_scalar(a_type, b); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + constexpr auto name = "div.Scalar_mode_out"; + + ET_SWITCH_REALB_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, name, CTYPE, [&]() { + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE b_casted = static_cast(b_val); + + apply_unary_map_fn( + [b_casted, mode](const CTYPE_A val_a) { + CTYPE a_casted = static_cast(val_a); + CTYPE value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = utils::floor_divide(a_casted, b_casted); + } + return value; + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/operators/op_mul.cpp b/backends/cadence/hifi/operators/op_mul.cpp new file mode 100644 index 00000000000..c430bfa0740 --- /dev/null +++ b/backends/cadence/hifi/operators/op_mul.cpp @@ -0,0 +1,216 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include "kernels.h" + +#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ + +namespace torch { +namespace executor { +namespace native { + +namespace { +template < + bool can_cast, + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MulInner; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MulInner { + static void run(const Tensor& a, const Tensor& b, Tensor& out) { + apply_binary_elementwise_fn( + // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted * b_casted; + + return static_cast(value); + }, + a, + b, + out); + } +}; + +struct ReportCanCastBug { + static void run(const Tensor&, const Tensor&, Tensor&) { + ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); + } +}; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct MulInner + : public ReportCanCastBug {}; +} // namespace + +Tensor& +mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType out_type = out.scalar_type(); + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + int fall_back = 0; + /*find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + + if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + fall_back = 1; + + if( (a_dim == 0) || (b_dim == 0) ) + fall_back = 1; + + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + + if(!fall_back) + { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + + if(broadcast == 1) + { + int out_shape[NNLIB_MAX_DIM]; + int inp1_shape[NNLIB_MAX_DIM]; + int inp2_shape[NNLIB_MAX_DIM]; + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + int off_o = NNLIB_MAX_DIM - out.dim(); + int off_a = NNLIB_MAX_DIM - a.dim(); + int off_b = NNLIB_MAX_DIM - b.dim(); + for(int i = 0; i < out.dim(); i++){ + out_shape[i+off_o] = out.size(i);} + for(int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + + xa_nn_elm_mul_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } + else + { + xa_nn_elm_mul_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } + } + else + { + ET_SWITCH_REALHB_TYPES(a_type, ctx, "mul.out", CTYPE_A, [&]() { + ET_SWITCH_REALHB_TYPES(b_type, ctx, "mul.out", CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + ET_SWITCH_REALHB_TYPES(out_type, ctx, "mul.out", CTYPE_OUT, [&]() { + MulInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, out); + }); + }); + }); + } + + return out; +} + +Tensor& mul_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType common_type = + utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + + if (common_type == ScalarType::Half) { + common_type = ScalarType::Float; + } + + ET_SWITCH_REALHB_TYPES(a_type, ctx, "mul.Scalar_out", CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "mul.Scalar_out", CTYPE_B, [&]() { + ET_SWITCH_REALB_TYPES( + common_type, ctx, "mul.Scalar_out", CTYPE_IN, [&]() { + ET_SWITCH_REALHB_TYPES( + out_type, ctx, "mul.Scalar_out", CTYPE_OUT, [&]() { + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE_IN b_casted = static_cast(b_val); + + apply_unary_map_fn( + [b_casted](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN value = a_casted * b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + }); + }); + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/operators/op_sub.cpp b/backends/cadence/hifi/operators/op_sub.cpp new file mode 100644 index 00000000000..a98bb7c0da2 --- /dev/null +++ b/backends/cadence/hifi/operators/op_sub.cpp @@ -0,0 +1,257 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include "kernels.h" + +#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ + +namespace torch { +namespace executor { +namespace native { +namespace { + +template < + bool can_cast, + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct SubInner; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct SubInner { + static void + run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { + apply_binary_elementwise_fn( + // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) + [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted - alpha_val * b_casted; + + return static_cast(value); + }, + a, + b, + out); + } +}; + +template +struct ReportCanCastBug { + static void run(const Tensor&, const Tensor&, CTYPE_IN, Tensor&) { + ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); + } +}; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct SubInner + : public ReportCanCastBug {}; + +} // namespace + +Tensor& sub_out( + RuntimeContext& ctx, + const Tensor& a, + const Tensor& b, + const Scalar& alpha, + Tensor& out) { + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK(ctx, tensor_is_realh_type(out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); + ScalarType alpha_type = utils::get_scalar_dtype(alpha); + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); + + float alpha_val; + utils::extract_scalar(alpha, &alpha_val); + + constexpr auto name = "sub.out"; + + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + int fall_back = 0; + /*find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + if( (out_type != ScalarType::Float) || (alpha_val != 1.0)) + fall_back = 1; + + if( (a_dim == 0) || (b_dim == 0) ) + fall_back = 1; + + if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) + fall_back = 1; + + + if(!fall_back) + { + /*logic to find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + + const float* const a_data = a.const_data_ptr(); + const float* const b_data = b.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if(broadcast == 1) + { + int out_shape[NNLIB_MAX_DIM]; + int inp1_shape[NNLIB_MAX_DIM]; + int inp2_shape[NNLIB_MAX_DIM]; + + for(int i = 0; i < NNLIB_MAX_DIM; i++) + { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + + int off_o = NNLIB_MAX_DIM - out_dim; + int off_a = NNLIB_MAX_DIM - a_dim; + int off_b = NNLIB_MAX_DIM - b_dim; + for(int i = 0; i < out_dim; i++) + out_shape[i+off_o] = out.size(i); + for(int i = 0; i < a_dim; i++) + inp1_shape[i+off_a] = a.size(i); + for(int i = 0; i < b_dim; i++) + inp2_shape[i+off_b] = b.size(i); + + xa_nn_elm_sub_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape,b_data, inp2_shape); + } + else + { + xa_nn_elm_sub_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } + + } + else + { + + ET_SWITCH_REALH_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REALH_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + CTYPE_IN alpha_val; + utils::extract_scalar(alpha, &alpha_val); + ET_SWITCH_REALH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { + SubInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, alpha_val, out); + }); + }); + }); + } + + return out; +} + +Tensor& sub_scalar_out( + RuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + const Scalar& alpha, + Tensor& out) { + (void)ctx; + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ET_KERNEL_CHECK(ctx, tensor_is_realh_type(out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType alpha_type = utils::get_scalar_dtype(alpha); + ScalarType common_type = + utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + ET_KERNEL_CHECK(ctx, canCast(alpha_type, common_type), InvalidArgument, out); + + if (common_type == ScalarType::Half) { + common_type = ScalarType::Float; + } + + constexpr auto name = "sub.Scalar_out"; + + ET_SWITCH_REALH_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_REAL_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = typename utils::promote_type_with_scalar_type< + CTYPE_A, + CTYPE_B, + /*half_to_float*/ true>::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE_IN b_casted = static_cast(b_val); + + CTYPE_IN alpha_val; + utils::extract_scalar(alpha, &alpha_val); + + using CTYPE_OUT = typename std::conditional< + std::is_same::value, + internal::F2, + CTYPE_IN>::type; + + apply_unary_map_fn( + [b_casted, alpha_val](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN value = a_casted - alpha_val * b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c new file mode 100644 index 00000000000..9eab22b05b7 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_add_f32_broadcast.c @@ -0,0 +1,428 @@ +/******************************************************************************* +* Copyright (c) 2018-2024 Cadence Design Systems, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to use this Software with Cadence processor cores only and +* not with any other processors and platforms, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +******************************************************************************/ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +#include "xa_nnlib_kernels_api.h" + + +#if HAVE_VFPU +static void internal_elm_add_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 + inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_ADD_SX2(x2, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_ADD_SX2(x2, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_ADD_S(b0, a0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For computing inp1 + inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_ADD_SX2(x1, x2); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_ADD_SX2(x1, x2); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_ADD_S(a0, b0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_add_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 + inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_add_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_add_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_add_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_add_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; + +} + diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c new file mode 100644 index 00000000000..03b8d625186 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c @@ -0,0 +1,419 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + +#if HAVE_VFPU +static void internal_elm_div_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 - inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x2, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x2, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(b0, a0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For computing inp1 - inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x1, x2); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x1, x2); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(a0, b0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_div_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 - inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_div_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_div_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_div_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_div_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c new file mode 100644 index 00000000000..95b449f43f7 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c @@ -0,0 +1,644 @@ +#include "xa_type_def.h" +#include "xa_nnlib_common_fpu.h" +#include "xa_nn_common.h" +#include "xa_nnlib_err_chk.h" +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_kernels_api.h" + +#if !HAVE_VFPU + DISCARD_FUN_FOR_NONVOID_RETURN( + WORD32, xa_nn_elm_div_mode_f32xf32_f32, + ( + FLOAT32 *p_out, + const FLOAT32 *p_inp1, + const FLOAT32 *p_inp2, + WORD32 num_elm, + WORD32 mode + ) + ) +#else +WORD32 xa_nn_elm_div_mode_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + WORD32 mode) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp1, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp2, -1); + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp1, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp2, sizeof(FLOAT32), -1); + /* Basic Parameter checks */ + XA_NNLIB_ARG_CHK_COND((num_elm <= 0), -1); + XA_NNLIB_ARG_CHK_COND(((mode != 0) && (mode != 1)), -1); + + int i; + xtfloatx2 *inp1 = (xtfloatx2 *)p_inp1; + xtfloatx2 *inp2 = (xtfloatx2 *)p_inp2; + xtfloatx2 *out = (xtfloatx2 *)p_out; + xtfloatx2 x1, x2, y; + ae_valign inp1_a, inp2_a, out_a; + + inp1_a = XT_LASX2PP(inp1); + inp2_a = XT_LASX2PP(inp2); + out_a = AE_ZALIGN64(); + /* Each iteration of loop is independent so safe to use concurrent pragma */ + if(mode == 0) + { +#pragma concurrent /* Each iteration of loop is independent so safe to use concurrent pragma */ + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + y = XT_DIV_SX2(x1, x2); + y = FITRUNC_SX2(y); + XT_SASX2IP(y, out_a, out); + } + } + else + { +#pragma concurrent + for(i=0;i < num_elm>>1;i++) + { + XT_LASX2IP(x1, inp1_a, inp1); + XT_LASX2IP(x2, inp2_a, inp2); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + XT_SASX2IP(y, out_a, out); + } + } + XT_SASX2POSFP(out_a, out); + + // Remainder Loop + if (num_elm & 1) + { + xtfloat a1, a2, a; + XT_LSIP(a1, (xtfloat *)inp1, 0); + XT_LSIP(a2, (xtfloat *)inp2, 0); + a = XT_DIV_S(a1, a2); + if(mode == 0) + a = FITRUNC_S(a); + else + a = FIFLOOR_S(a); + XT_SSI(a, (xtfloat *)out, 0); + } + + return 0; +} +#endif + +#if HAVE_VFPU +static void internal_elm_div_mode_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag, + WORD32 mode) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + /* For computing inp2 - inp1 */ + if(sign_flag){ + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + if(mode == 0) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x2, x1); + y = FITRUNC_SX2(y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x2, x1); + y = FIFLOOR_SX2(y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + if(mode == 0) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x2, x1); + y = FITRUNC_SX2(y); + XT_SASX2IP(y, out_a, p_c); + } + } + else + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x2, x1); + y = FIFLOOR_SX2(y); + XT_SASX2IP(y, out_a, p_c); + } + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(b0, a0); + if(mode == 0) + c0 = FITRUNC_S(c0); + else + c0 = FIFLOOR_S(c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } + /* For computing inp1 - inp2 */ + else + { + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + if(mode == 0) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x1, x2); + y = FITRUNC_SX2(y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + }/* if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0))*/ + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + if(mode == 0) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x1, x2); + y = FITRUNC_SX2(y); + XT_SASX2IP(y, out_a, p_c); + } + } + else + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_DIV_SX2(x1, x2); + y = FIFLOOR_SX2(y); + XT_SASX2IP(y, out_a, p_c); + } + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_DIV_S(a0, b0); + if(mode == 0) + c0 = FITRUNC_S(c0); + else + c0 = FIFLOOR_S(c0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } + } +} + +static void internal_elm_div_mode_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag, + WORD32 mode) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + /* For computing inp2 - inp1 */ + if(sign_flag){ + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + if(mode == 0) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + XA_NNLIB_ARG_CHK_COND(((mode != 0) && (mode != 1)), -1); + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_div_mode_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag, + mode); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_div_mode_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag, + mode); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_div_mode_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag, + mode); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_div_mode_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag, + mode); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c new file mode 100644 index 00000000000..b9aa102a15f --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c @@ -0,0 +1,360 @@ +/******************************************************************************* +* Copyright (c) 2018-2024 Cadence Design Systems, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to use this Software with Cadence processor cores only and +* not with any other processors and platforms, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be included +* in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +******************************************************************************/ +#include "xa_type_def.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_common_fpu.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nn_common.h" +#include "nnlib-hifi4/xa_nnlib/algo/common/include/xa_nnlib_err_chk.h" +#include "nnlib-hifi4/xa_nnlib/algo/kernels/basic/hifi4/xa_nn_basic_state.h" +#include "nnlib-hifi4/xa_nnlib/include/nnlib/xa_nnlib_kernels_api.h" + +#if HAVE_VFPU +static void internal_elm_mul_broadcast_2D_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 out_lc, + WORD32 in_lc, + xtbool sign_flag) +{ + int i, j; + + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + int num_simd2_ops; + int num_scalar_ops; + + if(out_lc) + { + num_simd2_ops = in_lc >> 1; + num_scalar_ops = in_lc & 1; + } + else + { + num_simd2_ops = (in_lc >> 2) << 1; + num_scalar_ops = in_lc & 3; + } + + xtfloatx2 x1, x2, y; + xtfloat a0, b0, c0; + + for(i = 0; i < out_lc; i++) + { + p_a = (xtfloatx2 *)&p_inp1[i * in_lc]; + p_b = (xtfloatx2 *)p_inp2; + p_c = (xtfloatx2 *)&p_out[i * in_lc]; + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_b)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(j = 0; j < num_simd2_ops; j++) + { + XT_LSX2IP(x1, p_a, 2 * sizeof(FLOAT32)); + XT_LSX2IP(x2, p_b, 2 * sizeof(FLOAT32)); + y = XT_MUL_SX2(x2, x1); + XT_SSX2IP(y, p_c, 2 * sizeof(FLOAT32)); + } + } + else + { + ae_valign vinp1, vinp2, out_a = AE_ZALIGN64(); + vinp1 = XT_LASX2PP(p_a); + vinp2 = XT_LASX2PP(p_b); + for(j = 0; j < num_simd2_ops; j++) + { + XT_LASX2IP(x1, vinp1, p_a); + XT_LASX2IP(x2, vinp2, p_b); + y = XT_MUL_SX2(x2, x1); + XT_SASX2IP(y, out_a, p_c); + } + XT_SASX2POSFP(out_a, (xtfloatx2 *)p_c); + } + if(num_scalar_ops !=0) + { + XT_LSIP(a0, (xtfloat *)p_a, sizeof(FLOAT32)); + XT_LSIP(b0, (xtfloat *)p_b, sizeof(FLOAT32)); + c0 = XT_MUL_S(b0, a0); + XT_SSI(c0, (xtfloat *)p_c, 0); + } + } +} + +static void internal_elm_mul_broadcast_f32xf32_f32(FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, + xtbool sign_flag) +{ + int i; + xtfloatx2 * __restrict__ p_a = (xtfloatx2 *)p_inp1; + xtfloatx2 * __restrict__ p_b = (xtfloatx2 *)p_inp2; + xtfloatx2 *__restrict__ p_c = (xtfloatx2 *)p_out; + + const int num_simd2_ops = num_elm >> 1; + const int num_scalar_ops = num_elm & 1; + + xtfloat a0_7, out; + xtfloatx2 x1, x2, y; + x2 = XT_LSI((xtfloat *)p_b, 0); + + if(((((unsigned)p_a)&7) == 0) && ((((unsigned)p_c)&7) == 0)) + { + for(i=0; i p_inp2_shape[i] ? p_inp1_shape[i] : p_inp2_shape[i]))) + { + return -1; + } + } + + WORD32 inp1_strides[4], inp2_strides[4]; + inp1_strides[3] = 1; + inp2_strides[3] = 1; + for(i = 2; i >= 0; i--) + { + ae_int32x2 d_str, d_shape; + d_str = AE_MOVDA32X2(inp1_strides[i + 1], inp2_strides[i + 1]); + d_shape = AE_MOVDA32X2(p_inp1_shape[i + 1], p_inp2_shape[i + 1]); + d_str = AE_MULP32X2(d_str, d_shape); + inp1_strides[i] = AE_MOVAD32_H(d_str); + inp2_strides[i] = AE_MOVAD32_L(d_str); + } + + int need_broadcast = 0; + int inp1_const = 1, inp2_const = 1; + for(i = 0; i < 4; i++) + { + if(p_inp1_shape[i] != p_inp2_shape[i]) + { + if(p_inp1_shape[i] == 1) + inp1_strides[i] = 0; + else + inp2_strides[i] = 0; + + need_broadcast = 1; + } + if(p_inp1_shape[i] != 1) + inp1_const &= 0; + if(p_inp2_shape[i] != 1) + inp2_const &= 0; + } + int itr0, itr1, itr2; + + FLOAT32 *p_out_tmp = p_out; + const FLOAT32 *__restrict__ p_inp1_tmp = p_inp1; + const FLOAT32 *__restrict__ p_inp2_tmp = p_inp2; + if(need_broadcast == 0) + { + sign_flag = 0; + internal_elm_mul_broadcast_2D_f32xf32_f32( + p_out, + p_inp1, + p_inp2, + 1, + p_out_shape[0] * inp1_strides[0], + sign_flag); + } + else if(inp1_strides[3] == inp2_strides[3]) + { + WORD32 in_lc, out_lc; + sign_flag = 0; + in_lc = p_out_shape[2] * p_out_shape[3]; + out_lc = 1; + if(inp1_strides[2] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[2]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + else if(inp2_strides[2] == 0) + { + in_lc = p_out_shape[3]; + out_lc = p_out_shape[2]; + } + + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + internal_elm_mul_broadcast_2D_f32xf32_f32( + p_out_tmp, + p_inp1_tmp0, + p_inp2_tmp0, + out_lc, + in_lc, + sign_flag); + p_out_tmp += in_lc * out_lc; + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + else if(inp1_const == 1 || inp2_const == 1) + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + sign_flag = 1; + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + } + internal_elm_mul_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp, + p_inp2_tmp, + p_out_shape[0] * p_out_shape[1] * p_out_shape[2] * p_out_shape[3], + sign_flag); + } + else + { + sign_flag = 0; + if(inp1_strides[3] == 0) + { + const FLOAT32 *tmp; + tmp = p_inp1_tmp; p_inp1_tmp = p_inp2_tmp; p_inp2_tmp = tmp; + sign_flag = 1; + int tmp_strides[3]; + tmp_strides[0] = inp1_strides[0]; + tmp_strides[1] = inp1_strides[1]; + tmp_strides[2] = inp1_strides[2]; + + inp1_strides[0] = inp2_strides[0]; + inp1_strides[1] = inp2_strides[1]; + inp1_strides[2] = inp2_strides[2]; + + inp2_strides[0] = tmp_strides[0]; + inp2_strides[1] = tmp_strides[1]; + inp2_strides[2] = tmp_strides[2]; + } + for(itr0 = 0; itr0 < p_out_shape[0]; itr0++) + { + const FLOAT32 *__restrict__ p_inp1_tmp0 = p_inp1_tmp; + const FLOAT32 *__restrict__ p_inp2_tmp0 = p_inp2_tmp; + for(itr1 = 0; itr1 < p_out_shape[1]; itr1++) + { + const FLOAT32 *__restrict__ p_inp1_tmp1 = p_inp1_tmp0; + const FLOAT32 *__restrict__ p_inp2_tmp1 = p_inp2_tmp0; + for(itr2 = 0; itr2 < p_out_shape[2]; itr2++) + { + { + internal_elm_mul_broadcast_f32xf32_f32( + p_out_tmp, + p_inp1_tmp1, + p_inp2_tmp1, + p_out_shape[3], + sign_flag); + } + p_out_tmp += p_out_shape[3]; + p_inp1_tmp1 += inp1_strides[2]; + p_inp2_tmp1 += inp2_strides[2]; + } + p_inp1_tmp0 += inp1_strides[1]; + p_inp2_tmp0 += inp2_strides[1]; + } + p_inp1_tmp += inp1_strides[0]; + p_inp2_tmp += inp2_strides[0]; + } + } + return 0; +} +#endif From 6ad490aac289d080ecbdad00c4038021937111e1 Mon Sep 17 00:00:00 2001 From: dijopaul Date: Wed, 11 Sep 2024 06:19:38 -0700 Subject: [PATCH 2/9] Adding sigmoid optimizations --- .../cadence/hifi/operators/CMakeLists.txt | 2 +- .../cadence/hifi/operators/op_sigmoid.cpp | 72 +++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 backends/cadence/hifi/operators/op_sigmoid.cpp diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index d56d19fc37f..509b7d1447b 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -23,6 +23,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_add.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_div.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" @@ -30,7 +31,6 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_embedding.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_full.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_permute_copy.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_sigmoid.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_slice_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_softmax.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_split_with_sizes_copy.cpp" diff --git a/backends/cadence/hifi/operators/op_sigmoid.cpp b/backends/cadence/hifi/operators/op_sigmoid.cpp new file mode 100644 index 00000000000..0b7a72bcc73 --- /dev/null +++ b/backends/cadence/hifi/operators/op_sigmoid.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include +#include +#include "kernels.h" + +namespace torch { +namespace executor { +namespace native { + +using Tensor = exec_aten::Tensor; + +Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { + (void)ctx; + + ET_KERNEL_CHECK( + ctx, in.scalar_type() != ScalarType::Bool, InvalidArgument, out); + ET_KERNEL_CHECK(ctx, tensor_is_floating_type(out), InvalidArgument, out); + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, in.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ScalarType in_type = in.scalar_type(); + ScalarType out_type = out.scalar_type(); + + int fall_back = 0; + if((in_type != ScalarType::Float) || (out_type != ScalarType::Float)) + fall_back = 1; + + if(!fall_back) + { + float* data_in = in.mutable_data_ptr(); + float* data_out = out.mutable_data_ptr(); + xa_nn_vec_sigmoid_f32_f32(data_out, data_in, in.numel()); + } + else + { + ET_SWITCH_REALHB_TYPES(in_type, ctx, "sigmoid.out", CTYPE_IN, [&]() { + ET_SWITCH_FLOATH_TYPES(out_type, ctx, "sigmoid.out", CTYPE_OUT, [&]() { + apply_unary_map_fn( + [](const CTYPE_IN val_in) { + // perform math in double to preserve precision + double in_casted = static_cast(val_in); + double out_val = 1.0 / (1.0 + exp(-in_casted)); + return static_cast(out_val); + }, + in.const_data_ptr(), + out.mutable_data_ptr(), + in.numel()); + }); + }); + } + + return out; +} + +} // namespace native +} // namespace executor +} // namespace torch From c0b10052a4f47991f6e3340b851747eb4b58a443 Mon Sep 17 00:00:00 2001 From: dijopaul Date: Thu, 12 Sep 2024 04:48:42 -0700 Subject: [PATCH 3/9] Adding tanh optimizations --- backends/cadence/aot/functions_hifi.yaml | 5 +++ .../cadence/hifi/operators/CMakeLists.txt | 2 + backends/cadence/hifi/operators/op_tanh.cpp | 40 +++++++++++++++++++ 3 files changed, 47 insertions(+) create mode 100644 backends/cadence/hifi/operators/op_tanh.cpp diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index 729db66850a..bab47eccff4 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -92,6 +92,11 @@ - arg_meta: null kernel_name: torch::executor::sub_out +- op: tanh.out + kernels: + - arg_meta: null + kernel_name: torch::executor::tanh_out + - op: view_copy.out kernels: - arg_meta: null diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 509b7d1447b..15e6c280ef6 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -25,6 +25,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mul.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" @@ -37,6 +38,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_view_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/activation_ops_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/broadcast_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/copy_ops_util.cpp" diff --git a/backends/cadence/hifi/operators/op_tanh.cpp b/backends/cadence/hifi/operators/op_tanh.cpp new file mode 100644 index 00000000000..47a057fe774 --- /dev/null +++ b/backends/cadence/hifi/operators/op_tanh.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include "kernels.h" + +namespace torch { +namespace executor { +namespace native { + +Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { + + int fall_back = 0; + if((in.scalar_type() != ScalarType::Float) || (out.scalar_type() != ScalarType::Float)) + fall_back = 1; + + if(!fall_back) + { + float* data_in = in.mutable_data_ptr(); + float* data_out = out.mutable_data_ptr(); + xa_nn_vec_tanh_f32_f32(data_out, data_in, (int)in.numel()); + return out; + } + else + { + return internal::unary_ufunc_realhb_to_floath(std::tanh, ctx, in, out); + } + +} + +} // namespace native +} // namespace executor +} // namespace torch From a8c4f667f1c8d197fceeee95e7cb184136d6ac74 Mon Sep 17 00:00:00 2001 From: dijopaul Date: Thu, 26 Sep 2024 01:02:14 -0700 Subject: [PATCH 4/9] Fixing review comments in 5483 --- backends/cadence/hifi/operators/op_add.cpp | 239 ++++++++++++++---- backends/cadence/hifi/operators/op_div.cpp | 167 ++++++------ backends/cadence/hifi/operators/op_mul.cpp | 75 +++--- .../cadence/hifi/operators/op_sigmoid.cpp | 42 +-- backends/cadence/hifi/operators/op_sub.cpp | 84 +++--- backends/cadence/hifi/operators/op_tanh.cpp | 14 +- 6 files changed, 380 insertions(+), 241 deletions(-) diff --git a/backends/cadence/hifi/operators/op_add.cpp b/backends/cadence/hifi/operators/op_add.cpp index 15ba5c250e7..38585b40055 100644 --- a/backends/cadence/hifi/operators/op_add.cpp +++ b/backends/cadence/hifi/operators/op_add.cpp @@ -9,82 +9,140 @@ #include #include #include +#include #include #include -#include "kernels.h" +#include namespace torch { namespace executor { namespace native { +namespace { -#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ +template < + bool can_cast, + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct AddInner; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct AddInner { + static void + run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { + apply_binary_elementwise_fn( + // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) + [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted + alpha_val * b_casted; + + return static_cast(value); + }, + a, + b, + out); + } +}; + +template +struct ReportCanCastBug { + static void run(const Tensor&, const Tensor&, CTYPE_IN, Tensor&) { + ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); + } +}; + +template < + typename CTYPE_A, + typename CTYPE_B, + typename CTYPE_IN, + typename CTYPE_OUT> +struct AddInner + : public ReportCanCastBug {}; + +} // namespace Tensor& add_out( - RuntimeContext& ctx, + KernelRuntimeContext& ctx, const Tensor& a, const Tensor& b, const Scalar& alpha, Tensor& out) { - (void)ctx; + ET_KERNEL_CHECK( + ctx, + resize_to_broadcast_target_size(a, b, out) == Error::Ok, + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, + executorch::runtime::tensor_is_realhbbf16_type(out), + InvalidArgument, + out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); - ScalarType common_type = promoteTypes(a_type, b_type); + ScalarType alpha_type = utils::get_scalar_dtype(alpha); + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); - ET_CHECK_MSG(a_type == ScalarType::Float, "Input tensor not a float.\n"); - ET_CHECK_MSG(b_type == ScalarType::Float, "Input tensor not a float.\n"); - ET_CHECK_MSG(out_type == ScalarType::Float, "Output tensor not a float.\n"); - - ET_CHECK(canCast(common_type, out_type)); - - using CTYPE_A = float; - using CTYPE_B = float; - using CTYPE_IN = float; - using CTYPE_OUT = float; - CTYPE_IN alpha_val; - ET_EXTRACT_SCALAR(alpha, alpha_val); + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); + + float alpha_val; + utils::extract_scalar(alpha, &alpha_val); + constexpr auto name = "add.out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); - int fall_back = 0; + bool optimized = 1; /*find broadcast*/ - const int a_is_broadcasted = !out.sizes().equals(a.sizes()); - const int b_is_broadcasted = !out.sizes().equals(b.sizes()); - const int broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - if( (out_type != ScalarType::Float) || (alpha_val != 1.0)) - fall_back = 1; + if((out_type != ScalarType::Float) || (alpha_val != 1.0)) + optimized = 0; - if( (a_dim == 0) || (b_dim == 0) ) - fall_back = 1; + if((a_dim == 0) || (b_dim == 0) ) + optimized = 0; - if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) - fall_back = 1; + if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; - if (!fall_back) + if(optimized) { const float* const a_data = a.const_data_ptr(); const float* const b_data = b.const_data_ptr(); float* const out_data = out.mutable_data_ptr(); if(broadcast == 1) { - int out_shape[NNLIB_MAX_DIM]; - int inp1_shape[NNLIB_MAX_DIM]; - int inp2_shape[NNLIB_MAX_DIM]; + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < NNLIB_MAX_DIM; i++) + for(int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; } - int off_o = NNLIB_MAX_DIM - out.dim(); - int off_a = NNLIB_MAX_DIM - a.dim(); - int off_b = NNLIB_MAX_DIM - b.dim(); + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); for(int i = 0; i < out.dim(); i++) out_shape[i+off_o] = out.size(i); @@ -97,24 +155,109 @@ Tensor& add_out( b_data, inp2_shape); } else + { xa_nn_elm_add_f32xf32_f32(out_data, a_data, b_data, out.numel()); - + } + + return out; } - else - { - apply_binary_elementwise_fn( - [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN b_casted = static_cast(val_b); - CTYPE_IN value = a_casted + alpha_val * b_casted; - - return static_cast(value); - }, - a, - b, + + ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REALHBBF16_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + CTYPE_IN alpha_val; + utils::extract_scalar(alpha, &alpha_val); + + ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { + AddInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, alpha_val, out); + }); + }); + }); + + return out; +} + +Tensor& add_scalar_out( + KernelRuntimeContext& ctx, + const Tensor& a, + const Scalar& b, + const Scalar& alpha, + Tensor& out) { + + // Resize for dynamic shape + ET_KERNEL_CHECK_MSG( + ctx, + resize_tensor(out, a.sizes()) == Error::Ok, + InvalidArgument, + out, + "Failed to resize output tensor."); + + ET_KERNEL_CHECK( + ctx, + executorch::runtime::tensor_is_realhbbf16_type(out), + InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = utils::get_scalar_dtype(b); + ScalarType alpha_type = utils::get_scalar_dtype(alpha); + ScalarType common_type = + utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); + + /*When Half first compute the result in float precision + and then downcast to half*/ + if (common_type == ScalarType::Half) { + common_type = ScalarType::Float; } + constexpr auto name = "add.Scalar_out"; + + ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = typename utils::promote_type_with_scalar_type< + CTYPE_A, + CTYPE_B, + /*half_to_float*/ true>::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + + CTYPE_B b_val; + utils::extract_scalar(b, &b_val); + CTYPE_IN b_casted = static_cast(b_val); + + CTYPE_IN alpha_val; + utils::extract_scalar(alpha, &alpha_val); + + using CTYPE_OUT = typename std::conditional< + std::is_same::value, + internal::F2, + CTYPE_IN>::type; + + apply_unary_map_fn( + [b_casted, alpha_val](const CTYPE_A val_a) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN value = a_casted + alpha_val * b_casted; + return static_cast(value); + }, + a.const_data_ptr(), + out.mutable_data_ptr(), + out.numel()); + }); + }); + return out; } diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index dc6a22ea4de..057132e7bc7 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -13,14 +13,11 @@ #include #include #include - -#include "kernels.h" +#include namespace torch { namespace executor { namespace native { - -#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ namespace { @@ -61,25 +58,26 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); - int fall_back = 0; + bool optimized = 1; /*find broadcast*/ - const int a_is_broadcasted = !out.sizes().equals(a.sizes()); - const int b_is_broadcasted = !out.sizes().equals(b.sizes()); - const int broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) - fall_back = 1; + optimized = 0; - if( (a_dim == 0) || (b_dim == 0) ) - fall_back = 1; + if((a_dim == 0) || (b_dim == 0) ) + optimized = 0; - if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) - fall_back = 1; + if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; - if(!fall_back) + if(optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); @@ -88,20 +86,20 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { if(broadcast == 1) { - int out_shape[NNLIB_MAX_DIM]; - int inp1_shape[NNLIB_MAX_DIM]; - int inp2_shape[NNLIB_MAX_DIM]; + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < NNLIB_MAX_DIM; i++) + for(int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; } - int off_o = NNLIB_MAX_DIM - out.dim(); - int off_a = NNLIB_MAX_DIM - a.dim(); - int off_b = NNLIB_MAX_DIM - b.dim(); + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); for(int i = 0; i < out.dim(); i++) out_shape[i+off_o] = out.size(i); for(int i = 0; i < a.dim(); i++) @@ -116,34 +114,34 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { xa_nn_elm_div_f32xf32_f32(out_data, a_data, b_data, out.numel()); } + + return out; } - else - { - ScalarType common_type = get_compute_type(a_type, b_type); - ScalarType out_type = out.scalar_type(); - ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + ScalarType common_type = get_compute_type(a_type, b_type); + ScalarType out_type = out.scalar_type(); - ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out", CTYPE_A, [&]() { - ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { - ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { - ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { - apply_binary_elementwise_fn( - [](const CTYPE_A val_a, const CTYPE_B val_b) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN b_casted = static_cast(val_b); - CTYPE_IN value = a_casted / b_casted; + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); - return static_cast(value); - }, - a, - b, - out); - }); + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { + ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + + return static_cast(value); + }, + a, + b, + out); }); }); }); - } + }); return out; } @@ -174,33 +172,33 @@ Tensor& div_out_mode( !(common_type != ScalarType::Bool && out_type == ScalarType::Bool), InvalidArgument, out); - + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); - int fall_back = 0; + bool optimized = 1; /*find broadcast*/ - const int a_is_broadcasted = !out.sizes().equals(a.sizes()); - const int b_is_broadcasted = !out.sizes().equals(b.sizes()); - const int broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) - fall_back = 1; + optimized = 0; - if( (a_dim == 0) || (b_dim == 0) ) - fall_back = 1; + if((a_dim == 0) || (b_dim == 0)) + optimized = 0; - if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) - fall_back = 1; + if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; int mode_val = -1; if (mode.has_value() && mode.value() == "trunc") mode_val = 0; else if (mode.has_value() && mode.value() == "floor") mode_val = 1; else - fall_back = 1; + optimized = 0; - if(!fall_back) + if(optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); @@ -208,20 +206,20 @@ Tensor& div_out_mode( if(broadcast) { - int out_shape[NNLIB_MAX_DIM]; - int inp1_shape[NNLIB_MAX_DIM]; - int inp2_shape[NNLIB_MAX_DIM]; + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < NNLIB_MAX_DIM; i++) + for(int i = 0; i < kNnlibMaxDim; i++) { inp1_shape[i] = 1; inp2_shape[i] = 1; out_shape[i] = 1; } - int off_o = NNLIB_MAX_DIM - out.dim(); - int off_a = NNLIB_MAX_DIM - a.dim(); - int off_b = NNLIB_MAX_DIM - b.dim(); + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); for(int i = 0; i < out.dim(); i++) out_shape[i+off_o] = out.size(i); @@ -236,33 +234,33 @@ Tensor& div_out_mode( { xa_nn_elm_div_mode_f32xf32_f32(out_data, a_data, b_data, out.numel(), mode_val); } + + return out; } - else - { - ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out_mode", CTYPE_A, [&]() { - ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { - ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { - ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { - apply_binary_elementwise_fn( - [mode](const CTYPE_A val_a, const CTYPE_B val_b) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN b_casted = static_cast(val_b); - CTYPE_IN value = a_casted / b_casted; - if (mode.has_value() && mode.value() == "trunc") { - value = std::trunc(value); - } else if (mode.has_value() && mode.value() == "floor") { - value = std::floor(value); - } - return static_cast(value); - }, - a, - b, - out); - }); + + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out_mode", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { + apply_binary_elementwise_fn( + [mode](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = std::floor(value); + } + return static_cast(value); + }, + a, + b, + out); }); }); }); - } + }); return out; } @@ -318,7 +316,6 @@ Tensor& div_scalar_mode_out( const Scalar& b, exec_aten::optional mode, Tensor& out) { - (void)ctx; // Resize for dynamic shape ET_KERNEL_CHECK_MSG( diff --git a/backends/cadence/hifi/operators/op_mul.cpp b/backends/cadence/hifi/operators/op_mul.cpp index c430bfa0740..05173e831c6 100644 --- a/backends/cadence/hifi/operators/op_mul.cpp +++ b/backends/cadence/hifi/operators/op_mul.cpp @@ -11,9 +11,8 @@ #include #include #include -#include "kernels.h" +#include -#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ namespace torch { namespace executor { @@ -79,27 +78,28 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ScalarType b_type = b.scalar_type(); ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); - int fall_back = 0; + bool optimized = 1; /*find broadcast*/ - const int a_is_broadcasted = !out.sizes().equals(a.sizes()); - const int b_is_broadcasted = !out.sizes().equals(b.sizes()); - const int broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) - fall_back = 1; + optimized = 0; if( (a_dim == 0) || (b_dim == 0) ) - fall_back = 1; + optimized = 0; - if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) - fall_back = 1; + if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; - if(!fall_back) + if(optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); @@ -107,18 +107,18 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { if(broadcast == 1) { - int out_shape[NNLIB_MAX_DIM]; - int inp1_shape[NNLIB_MAX_DIM]; - int inp2_shape[NNLIB_MAX_DIM]; - for(int i = 0; i < NNLIB_MAX_DIM; i++) + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + for(int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; } - int off_o = NNLIB_MAX_DIM - out.dim(); - int off_a = NNLIB_MAX_DIM - a.dim(); - int off_b = NNLIB_MAX_DIM - b.dim(); + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); for(int i = 0; i < out.dim(); i++){ out_shape[i+off_o] = out.size(i);} for(int i = 0; i < a.dim(); i++) @@ -132,26 +132,26 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { { xa_nn_elm_mul_f32xf32_f32(out_data, a_data, b_data, out.numel()); } - } - else - { - ET_SWITCH_REALHB_TYPES(a_type, ctx, "mul.out", CTYPE_A, [&]() { - ET_SWITCH_REALHB_TYPES(b_type, ctx, "mul.out", CTYPE_B, [&]() { - using CTYPE_IN = typename torch::executor:: - promote_types::type; - ET_DCHECK(CppTypeToScalarType::value == common_type); - ET_SWITCH_REALHB_TYPES(out_type, ctx, "mul.out", CTYPE_OUT, [&]() { - MulInner< - can_cast::value, - CTYPE_A, - CTYPE_B, - CTYPE_IN, - CTYPE_OUT>::run(a, b, out); - }); - }); - }); + + return out; } + ET_SWITCH_REALHB_TYPES(a_type, ctx, "mul.out", CTYPE_A, [&]() { + ET_SWITCH_REALHB_TYPES(b_type, ctx, "mul.out", CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + ET_SWITCH_REALHB_TYPES(out_type, ctx, "mul.out", CTYPE_OUT, [&]() { + MulInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, out); + }); + }); + }); + return out; } @@ -160,7 +160,6 @@ Tensor& mul_scalar_out( const Tensor& a, const Scalar& b, Tensor& out) { - (void)ctx; // Resize for dynamic shape ET_KERNEL_CHECK_MSG( @@ -180,6 +179,8 @@ Tensor& mul_scalar_out( ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); + /*When Half first compute the result in float precision + and then downcast to half*/ if (common_type == ScalarType::Half) { common_type = ScalarType::Float; } diff --git a/backends/cadence/hifi/operators/op_sigmoid.cpp b/backends/cadence/hifi/operators/op_sigmoid.cpp index 0b7a72bcc73..6c54e053f9c 100644 --- a/backends/cadence/hifi/operators/op_sigmoid.cpp +++ b/backends/cadence/hifi/operators/op_sigmoid.cpp @@ -10,7 +10,7 @@ #include #include -#include "kernels.h" +#include namespace torch { namespace executor { @@ -36,33 +36,33 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { ScalarType in_type = in.scalar_type(); ScalarType out_type = out.scalar_type(); - int fall_back = 0; + bool optimized = 1; if((in_type != ScalarType::Float) || (out_type != ScalarType::Float)) - fall_back = 1; + optimized = 0; - if(!fall_back) + if(optimized) { float* data_in = in.mutable_data_ptr(); float* data_out = out.mutable_data_ptr(); xa_nn_vec_sigmoid_f32_f32(data_out, data_in, in.numel()); + + return out; } - else - { - ET_SWITCH_REALHB_TYPES(in_type, ctx, "sigmoid.out", CTYPE_IN, [&]() { - ET_SWITCH_FLOATH_TYPES(out_type, ctx, "sigmoid.out", CTYPE_OUT, [&]() { - apply_unary_map_fn( - [](const CTYPE_IN val_in) { - // perform math in double to preserve precision - double in_casted = static_cast(val_in); - double out_val = 1.0 / (1.0 + exp(-in_casted)); - return static_cast(out_val); - }, - in.const_data_ptr(), - out.mutable_data_ptr(), - in.numel()); - }); - }); - } + + ET_SWITCH_REALHB_TYPES(in_type, ctx, "sigmoid.out", CTYPE_IN, [&]() { + ET_SWITCH_FLOATH_TYPES(out_type, ctx, "sigmoid.out", CTYPE_OUT, [&]() { + apply_unary_map_fn( + [](const CTYPE_IN val_in) { + // perform math in double to preserve precision + double in_casted = static_cast(val_in); + double out_val = 1.0 / (1.0 + exp(-in_casted)); + return static_cast(out_val); + }, + in.const_data_ptr(), + out.mutable_data_ptr(), + in.numel()); + }); + }); return out; } diff --git a/backends/cadence/hifi/operators/op_sub.cpp b/backends/cadence/hifi/operators/op_sub.cpp index a98bb7c0da2..d297bc0c699 100644 --- a/backends/cadence/hifi/operators/op_sub.cpp +++ b/backends/cadence/hifi/operators/op_sub.cpp @@ -12,9 +12,7 @@ #include #include #include -#include "kernels.h" - -#define NNLIB_MAX_DIM 4 /* Add fallback if broadcast and dim > 4 */ +#include namespace torch { namespace executor { @@ -97,27 +95,28 @@ Tensor& sub_out( utils::extract_scalar(alpha, &alpha_val); constexpr auto name = "sub.out"; + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); - int fall_back = 0; + bool optimized = 1; /*find broadcast*/ - const int a_is_broadcasted = !out.sizes().equals(a.sizes()); - const int b_is_broadcasted = !out.sizes().equals(b.sizes()); - const int broadcast = (a_is_broadcasted || b_is_broadcasted); + const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); + const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - if( (out_type != ScalarType::Float) || (alpha_val != 1.0)) - fall_back = 1; + if((out_type != ScalarType::Float) || (alpha_val != 1.0)) + optimized = 0; - if( (a_dim == 0) || (b_dim == 0) ) - fall_back = 1; + if((a_dim == 0) || (b_dim == 0)) + optimized = 0; - if((broadcast == 1) && (max_dim > NNLIB_MAX_DIM)) - fall_back = 1; + if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; - if(!fall_back) + if(optimized) { /*logic to find broadcast*/ const int a_is_broadcasted = !out.sizes().equals(a.sizes()); @@ -129,20 +128,20 @@ Tensor& sub_out( float* const out_data = out.mutable_data_ptr(); if(broadcast == 1) { - int out_shape[NNLIB_MAX_DIM]; - int inp1_shape[NNLIB_MAX_DIM]; - int inp2_shape[NNLIB_MAX_DIM]; + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < NNLIB_MAX_DIM; i++) + for(int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; } - int off_o = NNLIB_MAX_DIM - out_dim; - int off_a = NNLIB_MAX_DIM - a_dim; - int off_b = NNLIB_MAX_DIM - b_dim; + int off_o = kNnlibMaxDim - out_dim; + int off_a = kNnlibMaxDim - a_dim; + int off_b = kNnlibMaxDim - b_dim; for(int i = 0; i < out_dim; i++) out_shape[i+off_o] = out.size(i); for(int i = 0; i < a_dim; i++) @@ -156,29 +155,28 @@ Tensor& sub_out( { xa_nn_elm_sub_f32xf32_f32(out_data, a_data, b_data, out.numel()); } - + + return out; } - else - { + - ET_SWITCH_REALH_TYPES(a_type, ctx, name, CTYPE_A, [&]() { - ET_SWITCH_REALH_TYPES(b_type, ctx, name, CTYPE_B, [&]() { - using CTYPE_IN = typename torch::executor:: - promote_types::type; - ET_DCHECK(CppTypeToScalarType::value == common_type); - CTYPE_IN alpha_val; - utils::extract_scalar(alpha, &alpha_val); - ET_SWITCH_REALH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { - SubInner< - can_cast::value, - CTYPE_A, - CTYPE_B, - CTYPE_IN, - CTYPE_OUT>::run(a, b, alpha_val, out); - }); - }); - }); - } + ET_SWITCH_REALH_TYPES(a_type, ctx, name, CTYPE_A, [&]() { + ET_SWITCH_REALH_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + CTYPE_IN alpha_val; + utils::extract_scalar(alpha, &alpha_val); + ET_SWITCH_REALH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { + SubInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, alpha_val, out); + }); + }); + }); return out; } @@ -211,6 +209,8 @@ Tensor& sub_scalar_out( ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); ET_KERNEL_CHECK(ctx, canCast(alpha_type, common_type), InvalidArgument, out); + /*When Half first compute the result in float precision + and then downcast to half*/ if (common_type == ScalarType::Half) { common_type = ScalarType::Float; } diff --git a/backends/cadence/hifi/operators/op_tanh.cpp b/backends/cadence/hifi/operators/op_tanh.cpp index 47a057fe774..f06b57a3688 100644 --- a/backends/cadence/hifi/operators/op_tanh.cpp +++ b/backends/cadence/hifi/operators/op_tanh.cpp @@ -9,7 +9,7 @@ #include #include #include -#include "kernels.h" +#include namespace torch { namespace executor { @@ -17,21 +17,19 @@ namespace native { Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { - int fall_back = 0; + bool optimized = 1; if((in.scalar_type() != ScalarType::Float) || (out.scalar_type() != ScalarType::Float)) - fall_back = 1; + optimized = 0; - if(!fall_back) + if(optimized) { float* data_in = in.mutable_data_ptr(); float* data_out = out.mutable_data_ptr(); xa_nn_vec_tanh_f32_f32(data_out, data_in, (int)in.numel()); return out; } - else - { - return internal::unary_ufunc_realhb_to_floath(std::tanh, ctx, in, out); - } + + return internal::unary_ufunc_realhb_to_floath(std::tanh, ctx, in, out); } From 99a772ccbd1a5dda1cde0415a8bd37e32ad86c74 Mon Sep 17 00:00:00 2001 From: dijopaul Date: Tue, 1 Oct 2024 09:01:32 -0700 Subject: [PATCH 5/9] Adding cflags to prevent compilation halts --- backends/cadence/cadence.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/cadence/cadence.cmake b/backends/cadence/cadence.cmake index 25f241f205c..2ad95746ced 100644 --- a/backends/cadence/cadence.cmake +++ b/backends/cadence/cadence.cmake @@ -41,8 +41,8 @@ set(CMAKE_CROSSCOMPILING TRUE) set(CMAKE_C_COMPILER ${TOOLCHAIN_HOME}/bin/${CROSS_COMPILE_TARGET}-clang) set(CMAKE_CXX_COMPILER ${TOOLCHAIN_HOME}/bin/${CROSS_COMPILE_TARGET}-clang++) -set(CMAKE_C_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") -set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") +set(CMAKE_C_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls -fno-strict-aliasing") +set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls -fno-strict-aliasing") set(CMAKE_SYSROOT ${TOOLCHAIN_HOME}/${SYSROOT_TARGET}) set(CMAKE_LINKER ${TOOLCHAIN_HOME}/bin/xt-ld) add_link_options(-lm -stdlib=libc++ -Wl,--no-as-needed -static) From 95436222f26a13db60853c01307790120bfa048f Mon Sep 17 00:00:00 2001 From: dijopaul Date: Fri, 4 Oct 2024 05:33:51 -0700 Subject: [PATCH 6/9] Adding cflags to prevent compilation halts --- backends/cadence/cadence.cmake | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/backends/cadence/cadence.cmake b/backends/cadence/cadence.cmake index 2ad95746ced..cb6a25315ea 100644 --- a/backends/cadence/cadence.cmake +++ b/backends/cadence/cadence.cmake @@ -41,8 +41,11 @@ set(CMAKE_CROSSCOMPILING TRUE) set(CMAKE_C_COMPILER ${TOOLCHAIN_HOME}/bin/${CROSS_COMPILE_TARGET}-clang) set(CMAKE_CXX_COMPILER ${TOOLCHAIN_HOME}/bin/${CROSS_COMPILE_TARGET}-clang++) -set(CMAKE_C_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls -fno-strict-aliasing") -set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls -fno-strict-aliasing") +set(CMAKE_C_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") +set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") +#workaround for larger compilation time +SET(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -fno-strict-aliasing") + set(CMAKE_SYSROOT ${TOOLCHAIN_HOME}/${SYSROOT_TARGET}) set(CMAKE_LINKER ${TOOLCHAIN_HOME}/bin/xt-ld) add_link_options(-lm -stdlib=libc++ -Wl,--no-as-needed -static) From d45e25d7c8ad3d0917ffdfccaf66c1ebd426b55a Mon Sep 17 00:00:00 2001 From: dijopaul Date: Mon, 7 Oct 2024 05:15:25 -0700 Subject: [PATCH 7/9] Changing name space of optimized ops; Remove unused ops from file --- backends/cadence/aot/functions_hifi.yaml | 14 +-- backends/cadence/hifi/operators/op_add.cpp | 102 +++------------- backends/cadence/hifi/operators/op_div.cpp | 114 ++---------------- backends/cadence/hifi/operators/op_mul.cpp | 76 ++---------- .../cadence/hifi/operators/op_sigmoid.cpp | 15 ++- backends/cadence/hifi/operators/op_sub.cpp | 98 +++------------ backends/cadence/hifi/operators/op_tanh.cpp | 16 ++- 7 files changed, 88 insertions(+), 347 deletions(-) diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index bab47eccff4..3a274adacaf 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -25,7 +25,7 @@ - op: add.out kernels: - arg_meta: null - kernel_name: torch::executor::add_out + kernel_name: impl::HiFi::add_out - op: bmm.out kernels: @@ -45,12 +45,12 @@ - op: div.out kernels: - arg_meta: null - kernel_name: torch::executor::div_out + kernel_name: impl::HiFi::div_out - op: div.out_mode kernels: - arg_meta: null - kernel_name: torch::executor::div_out_mode + kernel_name: impl::HiFi::div_out_mode - op: embedding.out kernels: @@ -65,7 +65,7 @@ - op: mul.out kernels: - arg_meta: null - kernel_name: torch::executor::mul_out + kernel_name: impl::HiFi::mul_out - op: permute_copy.out kernels: @@ -75,7 +75,7 @@ - op: sigmoid.out kernels: - arg_meta: null - kernel_name: torch::executor::sigmoid_out + kernel_name: impl::HiFi::sigmoid_out - op: slice_copy.Tensor_out kernels: @@ -90,12 +90,12 @@ - op: sub.out kernels: - arg_meta: null - kernel_name: torch::executor::sub_out + kernel_name: impl::HiFi::sub_out - op: tanh.out kernels: - arg_meta: null - kernel_name: torch::executor::tanh_out + kernel_name: impl::HiFi::tanh_out - op: view_copy.out kernels: diff --git a/backends/cadence/hifi/operators/op_add.cpp b/backends/cadence/hifi/operators/op_add.cpp index 38585b40055..883cc74d6ca 100644 --- a/backends/cadence/hifi/operators/op_add.cpp +++ b/backends/cadence/hifi/operators/op_add.cpp @@ -14,11 +14,19 @@ #include #include -namespace torch { -namespace executor { +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::can_cast; +using executorch::runtime::CppTypeToScalarType; +using executorch::runtime::KernelRuntimeContext; +using torch::executor::Error; + +namespace impl { +namespace HiFi { namespace native { -namespace { +namespace { template < bool can_cast, typename CTYPE_A, @@ -35,7 +43,7 @@ template < struct AddInner { static void run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { - apply_binary_elementwise_fn( + torch::executor::apply_binary_elementwise_fn( // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { CTYPE_IN a_casted = static_cast(val_a); @@ -89,7 +97,7 @@ Tensor& add_out( ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); - ScalarType alpha_type = utils::get_scalar_dtype(alpha); + ScalarType alpha_type = torch::executor::native::utils::get_scalar_dtype(alpha); ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); @@ -98,7 +106,7 @@ Tensor& add_out( ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); float alpha_val; - utils::extract_scalar(alpha, &alpha_val); + torch::executor::native::utils::extract_scalar(alpha, &alpha_val); constexpr auto name = "add.out"; constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ @@ -168,7 +176,7 @@ Tensor& add_out( promote_types::type; ET_DCHECK(CppTypeToScalarType::value == common_type); CTYPE_IN alpha_val; - utils::extract_scalar(alpha, &alpha_val); + torch::executor::native::utils::extract_scalar(alpha, &alpha_val); ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { AddInner< @@ -184,83 +192,7 @@ Tensor& add_out( return out; } -Tensor& add_scalar_out( - KernelRuntimeContext& ctx, - const Tensor& a, - const Scalar& b, - const Scalar& alpha, - Tensor& out) { - - // Resize for dynamic shape - ET_KERNEL_CHECK_MSG( - ctx, - resize_tensor(out, a.sizes()) == Error::Ok, - InvalidArgument, - out, - "Failed to resize output tensor."); - - ET_KERNEL_CHECK( - ctx, - executorch::runtime::tensor_is_realhbbf16_type(out), - InvalidArgument, - out); - ET_KERNEL_CHECK( - ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); - - ScalarType a_type = a.scalar_type(); - ScalarType b_type = utils::get_scalar_dtype(b); - ScalarType alpha_type = utils::get_scalar_dtype(alpha); - ScalarType common_type = - utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false); - ScalarType out_type = out.scalar_type(); - - ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); - ET_KERNEL_CHECK( - ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); - - /*When Half first compute the result in float precision - and then downcast to half*/ - if (common_type == ScalarType::Half) { - common_type = ScalarType::Float; - } - - constexpr auto name = "add.Scalar_out"; - - ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, name, CTYPE_A, [&]() { - ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { - using CTYPE_IN = typename utils::promote_type_with_scalar_type< - CTYPE_A, - CTYPE_B, - /*half_to_float*/ true>::type; - ET_DCHECK(CppTypeToScalarType::value == common_type); - - CTYPE_B b_val; - utils::extract_scalar(b, &b_val); - CTYPE_IN b_casted = static_cast(b_val); - - CTYPE_IN alpha_val; - utils::extract_scalar(alpha, &alpha_val); - - using CTYPE_OUT = typename std::conditional< - std::is_same::value, - internal::F2, - CTYPE_IN>::type; - - apply_unary_map_fn( - [b_casted, alpha_val](const CTYPE_A val_a) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN value = a_casted + alpha_val * b_casted; - return static_cast(value); - }, - a.const_data_ptr(), - out.mutable_data_ptr(), - out.numel()); - }); - }); - - return out; -} +} // namespace impl +} // namespace HiFi } // namespace native -} // namespace executor -} // namespace torch diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index 057132e7bc7..41220e5d0b7 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -15,8 +15,14 @@ #include #include -namespace torch { -namespace executor { +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using torch::executor::Error; + +namespace impl { +namespace HiFi { namespace native { namespace { @@ -127,7 +133,7 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { - apply_binary_elementwise_fn( + torch::executor::apply_binary_elementwise_fn( [](const CTYPE_A val_a, const CTYPE_B val_b) { CTYPE_IN a_casted = static_cast(val_a); CTYPE_IN b_casted = static_cast(val_b); @@ -242,7 +248,7 @@ Tensor& div_out_mode( ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { - apply_binary_elementwise_fn( + torch::executor::apply_binary_elementwise_fn( [mode](const CTYPE_A val_a, const CTYPE_B val_b) { CTYPE_IN a_casted = static_cast(val_a); CTYPE_IN b_casted = static_cast(val_b); @@ -265,103 +271,7 @@ Tensor& div_out_mode( return out; } -Tensor& div_scalar_out( - RuntimeContext& ctx, - const Tensor& a, - const Scalar& b, - Tensor& out) { - (void)ctx; - - // Resize for dynamic shape - ET_KERNEL_CHECK_MSG( - ctx, - resize_tensor(out, a.sizes()) == Error::Ok, - InvalidArgument, - out, - "Failed to resize output tensor."); - - ScalarType a_type = a.scalar_type(); - ScalarType b_type = utils::get_scalar_dtype(b); - ScalarType common_type = isFloatingType(a_type) ? a_type : ScalarType::Float; - ScalarType out_type = out.scalar_type(); - - ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); - - ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.Scalar_out", CTYPE_A, [&]() { - ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "div.Scalar_out", CTYPE_B, [&]() { - ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.Scalar_out", CTYPE, [&]() { - CTYPE_B b_val; - utils::extract_scalar(b, &b_val); - CTYPE b_casted = static_cast(b_val); - - apply_unary_map_fn( - [b_casted](const CTYPE_A val_a) { - CTYPE a_casted = static_cast(val_a); - CTYPE value = a_casted / b_casted; - return static_cast(value); - }, - a.const_data_ptr(), - out.mutable_data_ptr(), - out.numel()); - }); - }); - }); - - return out; -} - -Tensor& div_scalar_mode_out( - RuntimeContext& ctx, - const Tensor& a, - const Scalar& b, - exec_aten::optional mode, - Tensor& out) { - - // Resize for dynamic shape - ET_KERNEL_CHECK_MSG( - ctx, - resize_tensor(out, a.sizes()) == Error::Ok, - InvalidArgument, - out, - "Failed to resize output tensor."); - - ScalarType a_type = a.scalar_type(); - ScalarType b_type = utils::get_scalar_dtype(b); - ScalarType common_type = utils::promote_type_with_scalar(a_type, b); - ScalarType out_type = out.scalar_type(); - - ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); - - constexpr auto name = "div.Scalar_mode_out"; - - ET_SWITCH_REALB_TYPES(a_type, ctx, name, CTYPE_A, [&]() { - ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, name, CTYPE_B, [&]() { - ET_SWITCH_REAL_TYPES(out_type, ctx, name, CTYPE, [&]() { - CTYPE_B b_val; - utils::extract_scalar(b, &b_val); - CTYPE b_casted = static_cast(b_val); - - apply_unary_map_fn( - [b_casted, mode](const CTYPE_A val_a) { - CTYPE a_casted = static_cast(val_a); - CTYPE value = a_casted / b_casted; - if (mode.has_value() && mode.value() == "trunc") { - value = std::trunc(value); - } else if (mode.has_value() && mode.value() == "floor") { - value = utils::floor_divide(a_casted, b_casted); - } - return value; - }, - a.const_data_ptr(), - out.mutable_data_ptr(), - out.numel()); - }); - }); - }); - - return out; -} +} // namespace impl +} // namespace HiFi } // namespace native -} // namespace executor -} // namespace torch diff --git a/backends/cadence/hifi/operators/op_mul.cpp b/backends/cadence/hifi/operators/op_mul.cpp index 05173e831c6..9200d9802e1 100644 --- a/backends/cadence/hifi/operators/op_mul.cpp +++ b/backends/cadence/hifi/operators/op_mul.cpp @@ -13,9 +13,16 @@ #include #include - -namespace torch { -namespace executor { +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using executorch::runtime::can_cast; +using executorch::runtime::CppTypeToScalarType; +using torch::executor::Error; + +namespace impl { +namespace HiFi { namespace native { namespace { @@ -34,7 +41,7 @@ template < typename CTYPE_OUT> struct MulInner { static void run(const Tensor& a, const Tensor& b, Tensor& out) { - apply_binary_elementwise_fn( + torch::executor::apply_binary_elementwise_fn( // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) [](const CTYPE_A val_a, const CTYPE_B val_b) { CTYPE_IN a_casted = static_cast(val_a); @@ -155,63 +162,6 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { return out; } -Tensor& mul_scalar_out( - RuntimeContext& ctx, - const Tensor& a, - const Scalar& b, - Tensor& out) { - - // Resize for dynamic shape - ET_KERNEL_CHECK_MSG( - ctx, - resize_tensor(out, a.sizes()) == Error::Ok, - InvalidArgument, - out, - "Failed to resize output tensor."); - - ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out); - - ScalarType a_type = a.scalar_type(); - ScalarType b_type = utils::get_scalar_dtype(b); - ScalarType common_type = - utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false); - ScalarType out_type = out.scalar_type(); - - ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); - - /*When Half first compute the result in float precision - and then downcast to half*/ - if (common_type == ScalarType::Half) { - common_type = ScalarType::Float; - } - - ET_SWITCH_REALHB_TYPES(a_type, ctx, "mul.Scalar_out", CTYPE_A, [&]() { - ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "mul.Scalar_out", CTYPE_B, [&]() { - ET_SWITCH_REALB_TYPES( - common_type, ctx, "mul.Scalar_out", CTYPE_IN, [&]() { - ET_SWITCH_REALHB_TYPES( - out_type, ctx, "mul.Scalar_out", CTYPE_OUT, [&]() { - CTYPE_B b_val; - utils::extract_scalar(b, &b_val); - CTYPE_IN b_casted = static_cast(b_val); - - apply_unary_map_fn( - [b_casted](const CTYPE_A val_a) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN value = a_casted * b_casted; - return static_cast(value); - }, - a.const_data_ptr(), - out.mutable_data_ptr(), - out.numel()); - }); - }); - }); - }); - - return out; -} - +} // namespace impl +} // namespace HiFi } // namespace native -} // namespace executor -} // namespace torch diff --git a/backends/cadence/hifi/operators/op_sigmoid.cpp b/backends/cadence/hifi/operators/op_sigmoid.cpp index 6c54e053f9c..fa408d4b0a0 100644 --- a/backends/cadence/hifi/operators/op_sigmoid.cpp +++ b/backends/cadence/hifi/operators/op_sigmoid.cpp @@ -12,8 +12,13 @@ #include #include -namespace torch { -namespace executor { +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using torch::executor::Error; + +namespace impl { +namespace HiFi { namespace native { using Tensor = exec_aten::Tensor; @@ -51,7 +56,7 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { ET_SWITCH_REALHB_TYPES(in_type, ctx, "sigmoid.out", CTYPE_IN, [&]() { ET_SWITCH_FLOATH_TYPES(out_type, ctx, "sigmoid.out", CTYPE_OUT, [&]() { - apply_unary_map_fn( + torch::executor::apply_unary_map_fn( [](const CTYPE_IN val_in) { // perform math in double to preserve precision double in_casted = static_cast(val_in); @@ -67,6 +72,6 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { return out; } +} // namespace impl +} // namespace HiFi } // namespace native -} // namespace executor -} // namespace torch diff --git a/backends/cadence/hifi/operators/op_sub.cpp b/backends/cadence/hifi/operators/op_sub.cpp index d297bc0c699..b9f35cafddb 100644 --- a/backends/cadence/hifi/operators/op_sub.cpp +++ b/backends/cadence/hifi/operators/op_sub.cpp @@ -14,11 +14,20 @@ #include #include -namespace torch { -namespace executor { +using exec_aten::Scalar; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::can_cast; +using executorch::runtime::CppTypeToScalarType; +using executorch::aten::RuntimeContext; +using torch::executor::Error; + + +namespace impl { +namespace HiFi { namespace native { -namespace { +namespace { template < bool can_cast, typename CTYPE_A, @@ -35,7 +44,7 @@ template < struct SubInner { static void run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { - apply_binary_elementwise_fn( + torch::executor::apply_binary_elementwise_fn( // NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) [alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { CTYPE_IN a_casted = static_cast(val_a); @@ -83,7 +92,7 @@ Tensor& sub_out( ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); - ScalarType alpha_type = utils::get_scalar_dtype(alpha); + ScalarType alpha_type = torch::executor::native::utils::get_scalar_dtype(alpha); ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); @@ -92,7 +101,7 @@ Tensor& sub_out( ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); float alpha_val; - utils::extract_scalar(alpha, &alpha_val); + torch::executor::native::utils::extract_scalar(alpha, &alpha_val); constexpr auto name = "sub.out"; constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ @@ -166,7 +175,7 @@ Tensor& sub_out( promote_types::type; ET_DCHECK(CppTypeToScalarType::value == common_type); CTYPE_IN alpha_val; - utils::extract_scalar(alpha, &alpha_val); + torch::executor::native::utils::extract_scalar(alpha, &alpha_val); ET_SWITCH_REALH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { SubInner< can_cast::value, @@ -181,77 +190,6 @@ Tensor& sub_out( return out; } -Tensor& sub_scalar_out( - RuntimeContext& ctx, - const Tensor& a, - const Scalar& b, - const Scalar& alpha, - Tensor& out) { - (void)ctx; - - // Resize for dynamic shape - ET_KERNEL_CHECK_MSG( - ctx, - resize_tensor(out, a.sizes()) == Error::Ok, - InvalidArgument, - out, - "Failed to resize output tensor."); - - ET_KERNEL_CHECK(ctx, tensor_is_realh_type(out), InvalidArgument, out); - - ScalarType a_type = a.scalar_type(); - ScalarType b_type = utils::get_scalar_dtype(b); - ScalarType alpha_type = utils::get_scalar_dtype(alpha); - ScalarType common_type = - utils::promote_type_with_scalar(a_type, b, /*half_to_float*/ false); - ScalarType out_type = out.scalar_type(); - - ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); - ET_KERNEL_CHECK(ctx, canCast(alpha_type, common_type), InvalidArgument, out); - - /*When Half first compute the result in float precision - and then downcast to half*/ - if (common_type == ScalarType::Half) { - common_type = ScalarType::Float; - } - - constexpr auto name = "sub.Scalar_out"; - - ET_SWITCH_REALH_TYPES(a_type, ctx, name, CTYPE_A, [&]() { - ET_SWITCH_SCALAR_OBJ_REAL_TYPES(b_type, ctx, name, CTYPE_B, [&]() { - using CTYPE_IN = typename utils::promote_type_with_scalar_type< - CTYPE_A, - CTYPE_B, - /*half_to_float*/ true>::type; - ET_DCHECK(CppTypeToScalarType::value == common_type); - - CTYPE_B b_val; - utils::extract_scalar(b, &b_val); - CTYPE_IN b_casted = static_cast(b_val); - - CTYPE_IN alpha_val; - utils::extract_scalar(alpha, &alpha_val); - - using CTYPE_OUT = typename std::conditional< - std::is_same::value, - internal::F2, - CTYPE_IN>::type; - - apply_unary_map_fn( - [b_casted, alpha_val](const CTYPE_A val_a) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN value = a_casted - alpha_val * b_casted; - return static_cast(value); - }, - a.const_data_ptr(), - out.mutable_data_ptr(), - out.numel()); - }); - }); - - return out; -} - +} // namespace impl +} // namespace HiFi } // namespace native -} // namespace executor -} // namespace torch diff --git a/backends/cadence/hifi/operators/op_tanh.cpp b/backends/cadence/hifi/operators/op_tanh.cpp index f06b57a3688..a80450b8d8d 100644 --- a/backends/cadence/hifi/operators/op_tanh.cpp +++ b/backends/cadence/hifi/operators/op_tanh.cpp @@ -11,10 +11,16 @@ #include #include -namespace torch { -namespace executor { +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::aten::RuntimeContext; +using torch::executor::Error; + +namespace impl { +namespace HiFi { namespace native { + Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { bool optimized = 1; @@ -29,10 +35,10 @@ Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { return out; } - return internal::unary_ufunc_realhb_to_floath(std::tanh, ctx, in, out); + return torch::executor::native::internal::unary_ufunc_realhb_to_floath(std::tanh, ctx, in, out); } +} // namespace impl +} // namespace HiFi } // namespace native -} // namespace executor -} // namespace torch From 4e89e2a0549fddd13335fd59542ef5043b3ecb5f Mon Sep 17 00:00:00 2001 From: cad-audio Date: Thu, 17 Oct 2024 16:57:29 -0700 Subject: [PATCH 8/9] fixed lint issues. --- 1.txt | 676 ++++++++++++++++++ backends/cadence/cadence.cmake | 2 +- backends/cadence/hifi/kernels/kernels.h | 12 +- backends/cadence/hifi/operators/op_add.cpp | 43 +- backends/cadence/hifi/operators/op_div.cpp | 73 +- backends/cadence/hifi/operators/op_mul.cpp | 36 +- .../cadence/hifi/operators/op_sigmoid.cpp | 11 +- backends/cadence/hifi/operators/op_sub.cpp | 37 +- backends/cadence/hifi/operators/op_tanh.cpp | 19 +- 9 files changed, 792 insertions(+), 117 deletions(-) create mode 100644 1.txt diff --git a/1.txt b/1.txt new file mode 100644 index 00000000000..96745dd0afa --- /dev/null +++ b/1.txt @@ -0,0 +1,676 @@ +diff --git a/backends/cadence/cadence.cmake b/backends/cadence/cadence.cmake +index cb6a2531..0fa55c6a 100644 +--- a/backends/cadence/cadence.cmake ++++ b/backends/cadence/cadence.cmake +@@ -44,7 +44,7 @@ set(CMAKE_CXX_COMPILER ${TOOLCHAIN_HOME}/bin/${CROSS_COMPILE_TARGET}-clang++) + set(CMAKE_C_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") + set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") + #workaround for larger compilation time +-SET(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -fno-strict-aliasing") ++set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -fno-strict-aliasing") + + set(CMAKE_SYSROOT ${TOOLCHAIN_HOME}/${SYSROOT_TARGET}) + set(CMAKE_LINKER ${TOOLCHAIN_HOME}/bin/xt-ld) +diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h +index 8faf0671..a08144d9 100644 +--- a/backends/cadence/hifi/kernels/kernels.h ++++ b/backends/cadence/hifi/kernels/kernels.h +@@ -16,21 +16,24 @@ + #include "xa_nnlib_kernels_api.h" + + /* Potential NNLIB function/APIs */ +-extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, ++extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32( ++ FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + +-extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, ++extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32( ++ FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, + const FLOAT32 * __restrict__ p_inp2, + const WORD32 *const p_inp2_shape); + +-extern "C" WORD32 xa_nn_elm_div_mode_f32xf32_f32(FLOAT32 * __restrict__ p_out, ++extern "C" WORD32 xa_nn_elm_div_mode_f32xf32_f32( ++ FLOAT32 * __restrict__ p_out, + const FLOAT32 * __restrict__ p_inp1, + const FLOAT32 * __restrict__ p_inp2, + WORD32 num_elm, +@@ -45,7 +48,8 @@ extern "C" WORD32 xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( + const WORD32 *const p_inp2_shape, + WORD32 mode); + +-extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, ++extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32( ++ FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp1, + const WORD32 *const p_inp1_shape, +diff --git a/backends/cadence/hifi/operators/op_add.cpp b/backends/cadence/hifi/operators/op_add.cpp +index 883cc74d..56adab71 100644 +--- a/backends/cadence/hifi/operators/op_add.cpp ++++ b/backends/cadence/hifi/operators/op_add.cpp +@@ -6,13 +6,13 @@ + * LICENSE file in the root directory of this source tree. + */ + ++#include + #include + #include + #include + #include + #include + #include +-#include + + using exec_aten::Scalar; + using exec_aten::ScalarType; +@@ -23,7 +23,7 @@ using executorch::runtime::KernelRuntimeContext; + using torch::executor::Error; + + namespace impl { +-namespace HiFi { ++namespace HiFi { + namespace native { + + namespace { +@@ -97,14 +97,15 @@ Tensor& add_out( + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); +- ScalarType alpha_type = torch::executor::native::utils::get_scalar_dtype(alpha); ++ ScalarType alpha_type = ++ torch::executor::native::utils::get_scalar_dtype(alpha); + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType out_type = out.scalar_type(); + + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); +- ++ + float alpha_val; + torch::executor::native::utils::extract_scalar(alpha, &alpha_val); + +@@ -119,30 +120,28 @@ Tensor& add_out( + const bool broadcast = (a_is_broadcasted || b_is_broadcasted); + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; +- +- if((out_type != ScalarType::Float) || (alpha_val != 1.0)) ++ ++ if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) + optimized = 0; +- +- if((a_dim == 0) || (b_dim == 0) ) ++ ++ if ((a_dim == 0) || (b_dim == 0) ) + optimized = 0; + +- if((broadcast == 1) && (max_dim > kNnlibMaxDim)) ++ if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; + + +- if(optimized) +- { ++ if (optimized) { + const float* const a_data = a.const_data_ptr(); + const float* const b_data = b.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); +- if(broadcast == 1) +- { ++ ++ if(broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + +- for(int i = 0; i < kNnlibMaxDim; i++) +- { ++ for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; +@@ -152,15 +151,15 @@ Tensor& add_out( + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + +- for(int i = 0; i < out.dim(); i++) ++ for (int i = 0; i < out.dim(); i++) + out_shape[i+off_o] = out.size(i); +- for(int i = 0; i < a.dim(); i++) ++ for (int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); +- for(int i = 0; i < b.dim(); i++) ++ for (int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + +- xa_nn_elm_add_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, +- b_data, inp2_shape); ++ xa_nn_elm_add_broadcast_4D_f32xf32_f32( ++ out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } + else + { +@@ -193,6 +192,6 @@ Tensor& add_out( + } + + +-} // namespace impl +-} // namespace HiFi + } // namespace native ++} // namespace HiFi ++} // namespace impl +diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp +index 41220e5d..e887e8b5 100644 +--- a/backends/cadence/hifi/operators/op_div.cpp ++++ b/backends/cadence/hifi/operators/op_div.cpp +@@ -6,6 +6,7 @@ + * LICENSE file in the root directory of this source tree. + */ + ++#include + #include + #include + #include +@@ -13,7 +14,6 @@ + #include + #include + #include +-#include + + using exec_aten::Scalar; + using exec_aten::ScalarType; +@@ -22,7 +22,7 @@ using executorch::aten::RuntimeContext; + using torch::executor::Error; + + namespace impl { +-namespace HiFi { ++namespace HiFi { + namespace native { + + namespace { +@@ -74,29 +74,27 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + +- if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) ++ if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + optimized = 0; + +- if((a_dim == 0) || (b_dim == 0) ) ++ if ((a_dim == 0) || (b_dim == 0) ) + optimized = 0; + +- if((broadcast == 1) && (max_dim > kNnlibMaxDim)) ++ if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; + +- if(optimized) +- { ++ if (optimized) { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + +- if(broadcast == 1) +- { ++ if (broadcast == 1) { + + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + +- for(int i = 0; i < kNnlibMaxDim; i++) ++ for (int i = 0; i < kNnlibMaxDim; i++) + { + out_shape[i] = 1; + inp1_shape[i] = 1; +@@ -106,34 +104,35 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); +- for(int i = 0; i < out.dim(); i++) ++ for (int i = 0; i < out.dim(); i++) + out_shape[i+off_o] = out.size(i); +- for(int i = 0; i < a.dim(); i++) ++ for (int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); +- for(int i = 0; i < b.dim(); i++) ++ for (int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + +- xa_nn_elm_div_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); ++ xa_nn_elm_div_broadcast_4D_f32xf32_f32( ++ out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } + else + { +- + xa_nn_elm_div_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } +- ++ + return out; + } +- ++ + ScalarType common_type = get_compute_type(a_type, b_type); + ScalarType out_type = out.scalar_type(); +- ++ + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); +- ++ + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out", CTYPE_A, [&]() { + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { + ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { +- torch::executor::apply_binary_elementwise_fn( ++ torch::executor:: ++ apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); +@@ -188,13 +187,13 @@ Tensor& div_out_mode( + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + +- if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) ++ if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + optimized = 0; + +- if((a_dim == 0) || (b_dim == 0)) ++ if ((a_dim == 0) || (b_dim == 0)) + optimized = 0; + +- if((broadcast == 1) && (max_dim > kNnlibMaxDim)) ++ if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; + int mode_val = -1; + if (mode.has_value() && mode.value() == "trunc") +@@ -204,20 +203,17 @@ Tensor& div_out_mode( + else + optimized = 0; + +- if(optimized) +- { ++ if (optimized) { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + +- if(broadcast) +- { ++ if (broadcast) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + +- for(int i = 0; i < kNnlibMaxDim; i++) +- { ++ for (int i = 0; i < kNnlibMaxDim; i++) { + inp1_shape[i] = 1; + inp2_shape[i] = 1; + out_shape[i] = 1; +@@ -227,18 +223,20 @@ Tensor& div_out_mode( + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + +- for(int i = 0; i < out.dim(); i++) ++ for (int i = 0; i < out.dim(); i++) + out_shape[i+off_o] = out.size(i); +- for(int i = 0; i < a.dim(); i++) ++ for (int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); +- for(int i = 0; i < b.dim(); i++) ++ for (int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + +- xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape, mode_val); ++ xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( ++ out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape, mode_val); + } + else + { +- xa_nn_elm_div_mode_f32xf32_f32(out_data, a_data, b_data, out.numel(), mode_val); ++ xa_nn_elm_div_mode_f32xf32_f32( ++ out_data, a_data, b_data, out.numel(), mode_val); + } + + return out; +@@ -248,7 +246,8 @@ Tensor& div_out_mode( + ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { + ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { + ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { +- torch::executor::apply_binary_elementwise_fn( ++ torch::executor:: ++ apply_binary_elementwise_fn( + [mode](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); +@@ -272,6 +271,6 @@ Tensor& div_out_mode( + } + + +-} // namespace impl +-} // namespace HiFi + } // namespace native ++} // namespace HiFi ++} // namespace impl +diff --git a/backends/cadence/hifi/operators/op_mul.cpp b/backends/cadence/hifi/operators/op_mul.cpp +index 9200d980..1b2e62cd 100644 +--- a/backends/cadence/hifi/operators/op_mul.cpp ++++ b/backends/cadence/hifi/operators/op_mul.cpp +@@ -6,12 +6,12 @@ + * LICENSE file in the root directory of this source tree. + */ + ++#include + #include + #include + #include + #include + #include +-#include + + using exec_aten::Scalar; + using exec_aten::ScalarType; +@@ -86,7 +86,7 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType out_type = out.scalar_type(); + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ +- ++ + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); + bool optimized = 1; + /*find broadcast*/ +@@ -97,28 +97,25 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + + +- if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) ++ if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + optimized = 0; + +- if( (a_dim == 0) || (b_dim == 0) ) ++ if ((a_dim == 0) || (b_dim == 0) ) + optimized = 0; + +- if((broadcast == 1) && (max_dim > kNnlibMaxDim)) ++ if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; + +- if(optimized) +- { ++ if (optimized) { + float* a_data = a.mutable_data_ptr(); + float* b_data = b.mutable_data_ptr(); + float* out_data = out.mutable_data_ptr(); + +- if(broadcast == 1) +- { ++ if (broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; +- for(int i = 0; i < kNnlibMaxDim; i++) +- { ++ for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; +@@ -126,14 +123,15 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); +- for(int i = 0; i < out.dim(); i++){ +- out_shape[i+off_o] = out.size(i);} +- for(int i = 0; i < a.dim(); i++) ++ for (int i = 0; i < out.dim(); i++) ++ out_shape[i+off_o] = out.size(i); ++ for (int i = 0; i < a.dim(); i++) + inp1_shape[i+off_a] = a.size(i); +- for(int i = 0; i < b.dim(); i++) ++ for (int i = 0; i < b.dim(); i++) + inp2_shape[i+off_b] = b.size(i); + +- xa_nn_elm_mul_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); ++ xa_nn_elm_mul_broadcast_4D_f32xf32_f32( ++ out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } + else + { +@@ -154,7 +152,7 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + CTYPE_A, + CTYPE_B, + CTYPE_IN, +- CTYPE_OUT>::run(a, b, out); ++ CTYPE_OUT>::run(a, b, out); + }); + }); + }); +@@ -162,6 +160,6 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { + return out; + } + +-} // namespace impl +-} // namespace HiFi + } // namespace native ++} // namespace HiFi ++} // namespace impl +diff --git a/backends/cadence/hifi/operators/op_sigmoid.cpp b/backends/cadence/hifi/operators/op_sigmoid.cpp +index fa408d4b..1ed89880 100644 +--- a/backends/cadence/hifi/operators/op_sigmoid.cpp ++++ b/backends/cadence/hifi/operators/op_sigmoid.cpp +@@ -8,9 +8,9 @@ + + #include + ++#include + #include + #include +-#include + + using exec_aten::ScalarType; + using exec_aten::Tensor; +@@ -18,7 +18,7 @@ using executorch::aten::RuntimeContext; + using torch::executor::Error; + + namespace impl { +-namespace HiFi { ++namespace HiFi { + namespace native { + + using Tensor = exec_aten::Tensor; +@@ -40,13 +40,12 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { + + ScalarType in_type = in.scalar_type(); + ScalarType out_type = out.scalar_type(); +- ++ + bool optimized = 1; +- if((in_type != ScalarType::Float) || (out_type != ScalarType::Float)) ++ if ((in_type != ScalarType::Float) || (out_type != ScalarType::Float)) + optimized = 0; + +- if(optimized) +- { ++ if (optimized) { + float* data_in = in.mutable_data_ptr(); + float* data_out = out.mutable_data_ptr(); + xa_nn_vec_sigmoid_f32_f32(data_out, data_in, in.numel()); +diff --git a/backends/cadence/hifi/operators/op_sub.cpp b/backends/cadence/hifi/operators/op_sub.cpp +index b9f35caf..d9958bf8 100644 +--- a/backends/cadence/hifi/operators/op_sub.cpp ++++ b/backends/cadence/hifi/operators/op_sub.cpp +@@ -6,25 +6,25 @@ + * LICENSE file in the root directory of this source tree. + */ + ++#include + #include + #include + #include + #include + #include + #include +-#include + + using exec_aten::Scalar; + using exec_aten::ScalarType; + using exec_aten::Tensor; ++using executorch::aten::RuntimeContext; + using executorch::runtime::can_cast; + using executorch::runtime::CppTypeToScalarType; +-using executorch::aten::RuntimeContext; + using torch::executor::Error; + + + namespace impl { +-namespace HiFi { ++namespace HiFi { + namespace native { + + namespace { +@@ -92,7 +92,8 @@ Tensor& sub_out( + + ScalarType a_type = a.scalar_type(); + ScalarType b_type = b.scalar_type(); +- ScalarType alpha_type = torch::executor::native::utils::get_scalar_dtype(alpha); ++ ScalarType alpha_type = ++ torch::executor::native::utils::get_scalar_dtype(alpha); + ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType out_type = out.scalar_type(); + +@@ -115,18 +116,17 @@ Tensor& sub_out( + int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); + max_dim = out.dim() > max_dim ? out.dim() : max_dim; + +- if((out_type != ScalarType::Float) || (alpha_val != 1.0)) ++ if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) + optimized = 0; + +- if((a_dim == 0) || (b_dim == 0)) ++ if ((a_dim == 0) || (b_dim == 0)) + optimized = 0; + +- if((broadcast == 1) && (max_dim > kNnlibMaxDim)) ++ if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) + optimized = 0; + + +- if(optimized) +- { ++ if (optimized) { + /*logic to find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); +@@ -135,14 +135,12 @@ Tensor& sub_out( + const float* const a_data = a.const_data_ptr(); + const float* const b_data = b.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); +- if(broadcast == 1) +- { ++ if (broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + +- for(int i = 0; i < kNnlibMaxDim; i++) +- { ++ for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; +@@ -151,14 +149,15 @@ Tensor& sub_out( + int off_o = kNnlibMaxDim - out_dim; + int off_a = kNnlibMaxDim - a_dim; + int off_b = kNnlibMaxDim - b_dim; +- for(int i = 0; i < out_dim; i++) ++ for (int i = 0; i < out_dim; i++) + out_shape[i+off_o] = out.size(i); +- for(int i = 0; i < a_dim; i++) ++ for (int i = 0; i < a_dim; i++) + inp1_shape[i+off_a] = a.size(i); +- for(int i = 0; i < b_dim; i++) ++ for (int i = 0; i < b_dim; i++) + inp2_shape[i+off_b] = b.size(i); + +- xa_nn_elm_sub_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape,b_data, inp2_shape); ++ xa_nn_elm_sub_broadcast_4D_f32xf32_f32( ++ out_data, out_shape, a_data, inp1_shape,b_data, inp2_shape); + } + else + { +@@ -190,6 +189,6 @@ Tensor& sub_out( + return out; + } + +-} // namespace impl +-} // namespace HiFi + } // namespace native ++} // namespace HiFi ++} // namespace impl +diff --git a/backends/cadence/hifi/operators/op_tanh.cpp b/backends/cadence/hifi/operators/op_tanh.cpp +index a80450b8..7989ac3b 100644 +--- a/backends/cadence/hifi/operators/op_tanh.cpp ++++ b/backends/cadence/hifi/operators/op_tanh.cpp +@@ -6,10 +6,10 @@ + * LICENSE file in the root directory of this source tree. + */ + ++#include + #include + #include + #include +-#include + + using exec_aten::ScalarType; + using exec_aten::Tensor; +@@ -17,28 +17,29 @@ using executorch::aten::RuntimeContext; + using torch::executor::Error; + + namespace impl { +-namespace HiFi { ++namespace HiFi { + namespace native { + + + Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { + + bool optimized = 1; +- if((in.scalar_type() != ScalarType::Float) || (out.scalar_type() != ScalarType::Float)) +- optimized = 0; ++ if ((in.scalar_type() != ScalarType::Float) || ++ (out.scalar_type() != ScalarType::Float)) ++ optimized = 0; + +- if(optimized) +- { ++ if (optimized) { + float* data_in = in.mutable_data_ptr(); + float* data_out = out.mutable_data_ptr(); + xa_nn_vec_tanh_f32_f32(data_out, data_in, (int)in.numel()); + return out; + } + +- return torch::executor::native::internal::unary_ufunc_realhb_to_floath(std::tanh, ctx, in, out); ++ return torch::executor::native::internal::unary_ufunc_realhb_to_floath( ++ std::tanh, ctx, in, out); + + } + +-} // namespace impl +-} // namespace HiFi + } // namespace native ++} // namespace HiFi ++} // namespace impl diff --git a/backends/cadence/cadence.cmake b/backends/cadence/cadence.cmake index cb6a25315ea..0fa55c6a65b 100644 --- a/backends/cadence/cadence.cmake +++ b/backends/cadence/cadence.cmake @@ -44,7 +44,7 @@ set(CMAKE_CXX_COMPILER ${TOOLCHAIN_HOME}/bin/${CROSS_COMPILE_TARGET}-clang++) set(CMAKE_C_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++ -mtext-section-literals -mlongcalls") #workaround for larger compilation time -SET(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -fno-strict-aliasing") +set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -fno-strict-aliasing") set(CMAKE_SYSROOT ${TOOLCHAIN_HOME}/${SYSROOT_TARGET}) set(CMAKE_LINKER ${TOOLCHAIN_HOME}/bin/xt-ld) diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index 8faf06713b7..a08144d9834 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -16,21 +16,24 @@ #include "xa_nnlib_kernels_api.h" /* Potential NNLIB function/APIs */ -extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, +extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32( + FLOAT32 * __restrict__ p_out, const WORD32 *const p_out_shape, const FLOAT32 * __restrict__ p_inp1, const WORD32 *const p_inp1_shape, const FLOAT32 * __restrict__ p_inp2, const WORD32 *const p_inp2_shape); -extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, +extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32( + FLOAT32 * __restrict__ p_out, const WORD32 *const p_out_shape, const FLOAT32 * __restrict__ p_inp1, const WORD32 *const p_inp1_shape, const FLOAT32 * __restrict__ p_inp2, const WORD32 *const p_inp2_shape); -extern "C" WORD32 xa_nn_elm_div_mode_f32xf32_f32(FLOAT32 * __restrict__ p_out, +extern "C" WORD32 xa_nn_elm_div_mode_f32xf32_f32( + FLOAT32 * __restrict__ p_out, const FLOAT32 * __restrict__ p_inp1, const FLOAT32 * __restrict__ p_inp2, WORD32 num_elm, @@ -45,7 +48,8 @@ extern "C" WORD32 xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( const WORD32 *const p_inp2_shape, WORD32 mode); -extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ p_out, +extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32( + FLOAT32 * __restrict__ p_out, const WORD32 *const p_out_shape, const FLOAT32 * __restrict__ p_inp1, const WORD32 *const p_inp1_shape, diff --git a/backends/cadence/hifi/operators/op_add.cpp b/backends/cadence/hifi/operators/op_add.cpp index 883cc74d6ca..56adab71a02 100644 --- a/backends/cadence/hifi/operators/op_add.cpp +++ b/backends/cadence/hifi/operators/op_add.cpp @@ -6,13 +6,13 @@ * LICENSE file in the root directory of this source tree. */ +#include #include #include #include #include #include #include -#include using exec_aten::Scalar; using exec_aten::ScalarType; @@ -23,7 +23,7 @@ using executorch::runtime::KernelRuntimeContext; using torch::executor::Error; namespace impl { -namespace HiFi { +namespace HiFi { namespace native { namespace { @@ -97,14 +97,15 @@ Tensor& add_out( ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); - ScalarType alpha_type = torch::executor::native::utils::get_scalar_dtype(alpha); + ScalarType alpha_type = + torch::executor::native::utils::get_scalar_dtype(alpha); ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); ET_KERNEL_CHECK( ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); - + float alpha_val; torch::executor::native::utils::extract_scalar(alpha, &alpha_val); @@ -119,30 +120,28 @@ Tensor& add_out( const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - - if((out_type != ScalarType::Float) || (alpha_val != 1.0)) + + if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) optimized = 0; - - if((a_dim == 0) || (b_dim == 0) ) + + if ((a_dim == 0) || (b_dim == 0) ) optimized = 0; - if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; - if(optimized) - { + if (optimized) { const float* const a_data = a.const_data_ptr(); const float* const b_data = b.const_data_ptr(); float* const out_data = out.mutable_data_ptr(); - if(broadcast == 1) - { + + if(broadcast == 1) { int out_shape[kNnlibMaxDim]; int inp1_shape[kNnlibMaxDim]; int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < kNnlibMaxDim; i++) - { + for (int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; @@ -152,15 +151,15 @@ Tensor& add_out( int off_a = kNnlibMaxDim - a.dim(); int off_b = kNnlibMaxDim - b.dim(); - for(int i = 0; i < out.dim(); i++) + for (int i = 0; i < out.dim(); i++) out_shape[i+off_o] = out.size(i); - for(int i = 0; i < a.dim(); i++) + for (int i = 0; i < a.dim(); i++) inp1_shape[i+off_a] = a.size(i); - for(int i = 0; i < b.dim(); i++) + for (int i = 0; i < b.dim(); i++) inp2_shape[i+off_b] = b.size(i); - xa_nn_elm_add_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, - b_data, inp2_shape); + xa_nn_elm_add_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); } else { @@ -193,6 +192,6 @@ Tensor& add_out( } -} // namespace impl -} // namespace HiFi } // namespace native +} // namespace HiFi +} // namespace impl diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index 41220e5d0b7..e887e8b51b2 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -6,6 +6,7 @@ * LICENSE file in the root directory of this source tree. */ +#include #include #include #include @@ -13,7 +14,6 @@ #include #include #include -#include using exec_aten::Scalar; using exec_aten::ScalarType; @@ -22,7 +22,7 @@ using executorch::aten::RuntimeContext; using torch::executor::Error; namespace impl { -namespace HiFi { +namespace HiFi { namespace native { namespace { @@ -74,29 +74,27 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) optimized = 0; - if((a_dim == 0) || (b_dim == 0) ) + if ((a_dim == 0) || (b_dim == 0) ) optimized = 0; - if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; - if(optimized) - { + if (optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); float* out_data = out.mutable_data_ptr(); - if(broadcast == 1) - { + if (broadcast == 1) { int out_shape[kNnlibMaxDim]; int inp1_shape[kNnlibMaxDim]; int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < kNnlibMaxDim; i++) + for (int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; @@ -106,34 +104,35 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { int off_o = kNnlibMaxDim - out.dim(); int off_a = kNnlibMaxDim - a.dim(); int off_b = kNnlibMaxDim - b.dim(); - for(int i = 0; i < out.dim(); i++) + for (int i = 0; i < out.dim(); i++) out_shape[i+off_o] = out.size(i); - for(int i = 0; i < a.dim(); i++) + for (int i = 0; i < a.dim(); i++) inp1_shape[i+off_a] = a.size(i); - for(int i = 0; i < b.dim(); i++) + for (int i = 0; i < b.dim(); i++) inp2_shape[i+off_b] = b.size(i); - xa_nn_elm_div_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + xa_nn_elm_div_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); } else { - xa_nn_elm_div_f32xf32_f32(out_data, a_data, b_data, out.numel()); } - + return out; } - + ScalarType common_type = get_compute_type(a_type, b_type); ScalarType out_type = out.scalar_type(); - + ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); - + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out", CTYPE_A, [&]() { ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { - torch::executor::apply_binary_elementwise_fn( + torch::executor:: + apply_binary_elementwise_fn( [](const CTYPE_A val_a, const CTYPE_B val_b) { CTYPE_IN a_casted = static_cast(val_a); CTYPE_IN b_casted = static_cast(val_b); @@ -188,13 +187,13 @@ Tensor& div_out_mode( int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) optimized = 0; - if((a_dim == 0) || (b_dim == 0)) + if ((a_dim == 0) || (b_dim == 0)) optimized = 0; - if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; int mode_val = -1; if (mode.has_value() && mode.value() == "trunc") @@ -204,20 +203,17 @@ Tensor& div_out_mode( else optimized = 0; - if(optimized) - { + if (optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); float* out_data = out.mutable_data_ptr(); - if(broadcast) - { + if (broadcast) { int out_shape[kNnlibMaxDim]; int inp1_shape[kNnlibMaxDim]; int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < kNnlibMaxDim; i++) - { + for (int i = 0; i < kNnlibMaxDim; i++) { inp1_shape[i] = 1; inp2_shape[i] = 1; out_shape[i] = 1; @@ -227,18 +223,20 @@ Tensor& div_out_mode( int off_a = kNnlibMaxDim - a.dim(); int off_b = kNnlibMaxDim - b.dim(); - for(int i = 0; i < out.dim(); i++) + for (int i = 0; i < out.dim(); i++) out_shape[i+off_o] = out.size(i); - for(int i = 0; i < a.dim(); i++) + for (int i = 0; i < a.dim(); i++) inp1_shape[i+off_a] = a.size(i); - for(int i = 0; i < b.dim(); i++) + for (int i = 0; i < b.dim(); i++) inp2_shape[i+off_b] = b.size(i); - xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape, mode_val); + xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape, mode_val); } else { - xa_nn_elm_div_mode_f32xf32_f32(out_data, a_data, b_data, out.numel(), mode_val); + xa_nn_elm_div_mode_f32xf32_f32( + out_data, a_data, b_data, out.numel(), mode_val); } return out; @@ -248,7 +246,8 @@ Tensor& div_out_mode( ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out_mode", CTYPE_B, [&]() { ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { - torch::executor::apply_binary_elementwise_fn( + torch::executor:: + apply_binary_elementwise_fn( [mode](const CTYPE_A val_a, const CTYPE_B val_b) { CTYPE_IN a_casted = static_cast(val_a); CTYPE_IN b_casted = static_cast(val_b); @@ -272,6 +271,6 @@ Tensor& div_out_mode( } -} // namespace impl -} // namespace HiFi } // namespace native +} // namespace HiFi +} // namespace impl diff --git a/backends/cadence/hifi/operators/op_mul.cpp b/backends/cadence/hifi/operators/op_mul.cpp index 9200d9802e1..1b2e62cdb0e 100644 --- a/backends/cadence/hifi/operators/op_mul.cpp +++ b/backends/cadence/hifi/operators/op_mul.cpp @@ -6,12 +6,12 @@ * LICENSE file in the root directory of this source tree. */ +#include #include #include #include #include #include -#include using exec_aten::Scalar; using exec_aten::ScalarType; @@ -86,7 +86,7 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ - + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); bool optimized = 1; /*find broadcast*/ @@ -97,28 +97,25 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { max_dim = out.dim() > max_dim ? out.dim() : max_dim; - if((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) + if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) optimized = 0; - if( (a_dim == 0) || (b_dim == 0) ) + if ((a_dim == 0) || (b_dim == 0) ) optimized = 0; - if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; - if(optimized) - { + if (optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); float* out_data = out.mutable_data_ptr(); - if(broadcast == 1) - { + if (broadcast == 1) { int out_shape[kNnlibMaxDim]; int inp1_shape[kNnlibMaxDim]; int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < kNnlibMaxDim; i++) - { + for (int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; @@ -126,14 +123,15 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { int off_o = kNnlibMaxDim - out.dim(); int off_a = kNnlibMaxDim - a.dim(); int off_b = kNnlibMaxDim - b.dim(); - for(int i = 0; i < out.dim(); i++){ - out_shape[i+off_o] = out.size(i);} - for(int i = 0; i < a.dim(); i++) + for (int i = 0; i < out.dim(); i++) + out_shape[i+off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) inp1_shape[i+off_a] = a.size(i); - for(int i = 0; i < b.dim(); i++) + for (int i = 0; i < b.dim(); i++) inp2_shape[i+off_b] = b.size(i); - xa_nn_elm_mul_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + xa_nn_elm_mul_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); } else { @@ -154,7 +152,7 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { CTYPE_A, CTYPE_B, CTYPE_IN, - CTYPE_OUT>::run(a, b, out); + CTYPE_OUT>::run(a, b, out); }); }); }); @@ -162,6 +160,6 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { return out; } -} // namespace impl -} // namespace HiFi } // namespace native +} // namespace HiFi +} // namespace impl diff --git a/backends/cadence/hifi/operators/op_sigmoid.cpp b/backends/cadence/hifi/operators/op_sigmoid.cpp index fa408d4b0a0..1ed89880d5c 100644 --- a/backends/cadence/hifi/operators/op_sigmoid.cpp +++ b/backends/cadence/hifi/operators/op_sigmoid.cpp @@ -8,9 +8,9 @@ #include +#include #include #include -#include using exec_aten::ScalarType; using exec_aten::Tensor; @@ -18,7 +18,7 @@ using executorch::aten::RuntimeContext; using torch::executor::Error; namespace impl { -namespace HiFi { +namespace HiFi { namespace native { using Tensor = exec_aten::Tensor; @@ -40,13 +40,12 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { ScalarType in_type = in.scalar_type(); ScalarType out_type = out.scalar_type(); - + bool optimized = 1; - if((in_type != ScalarType::Float) || (out_type != ScalarType::Float)) + if ((in_type != ScalarType::Float) || (out_type != ScalarType::Float)) optimized = 0; - if(optimized) - { + if (optimized) { float* data_in = in.mutable_data_ptr(); float* data_out = out.mutable_data_ptr(); xa_nn_vec_sigmoid_f32_f32(data_out, data_in, in.numel()); diff --git a/backends/cadence/hifi/operators/op_sub.cpp b/backends/cadence/hifi/operators/op_sub.cpp index b9f35cafddb..d9958bf8e6d 100644 --- a/backends/cadence/hifi/operators/op_sub.cpp +++ b/backends/cadence/hifi/operators/op_sub.cpp @@ -6,25 +6,25 @@ * LICENSE file in the root directory of this source tree. */ +#include #include #include #include #include #include #include -#include using exec_aten::Scalar; using exec_aten::ScalarType; using exec_aten::Tensor; +using executorch::aten::RuntimeContext; using executorch::runtime::can_cast; using executorch::runtime::CppTypeToScalarType; -using executorch::aten::RuntimeContext; using torch::executor::Error; namespace impl { -namespace HiFi { +namespace HiFi { namespace native { namespace { @@ -92,7 +92,8 @@ Tensor& sub_out( ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); - ScalarType alpha_type = torch::executor::native::utils::get_scalar_dtype(alpha); + ScalarType alpha_type = + torch::executor::native::utils::get_scalar_dtype(alpha); ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); @@ -115,18 +116,17 @@ Tensor& sub_out( int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - if((out_type != ScalarType::Float) || (alpha_val != 1.0)) + if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) optimized = 0; - if((a_dim == 0) || (b_dim == 0)) + if ((a_dim == 0) || (b_dim == 0)) optimized = 0; - if((broadcast == 1) && (max_dim > kNnlibMaxDim)) + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; - if(optimized) - { + if (optimized) { /*logic to find broadcast*/ const int a_is_broadcasted = !out.sizes().equals(a.sizes()); const int b_is_broadcasted = !out.sizes().equals(b.sizes()); @@ -135,14 +135,12 @@ Tensor& sub_out( const float* const a_data = a.const_data_ptr(); const float* const b_data = b.const_data_ptr(); float* const out_data = out.mutable_data_ptr(); - if(broadcast == 1) - { + if (broadcast == 1) { int out_shape[kNnlibMaxDim]; int inp1_shape[kNnlibMaxDim]; int inp2_shape[kNnlibMaxDim]; - for(int i = 0; i < kNnlibMaxDim; i++) - { + for (int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; @@ -151,14 +149,15 @@ Tensor& sub_out( int off_o = kNnlibMaxDim - out_dim; int off_a = kNnlibMaxDim - a_dim; int off_b = kNnlibMaxDim - b_dim; - for(int i = 0; i < out_dim; i++) + for (int i = 0; i < out_dim; i++) out_shape[i+off_o] = out.size(i); - for(int i = 0; i < a_dim; i++) + for (int i = 0; i < a_dim; i++) inp1_shape[i+off_a] = a.size(i); - for(int i = 0; i < b_dim; i++) + for (int i = 0; i < b_dim; i++) inp2_shape[i+off_b] = b.size(i); - xa_nn_elm_sub_broadcast_4D_f32xf32_f32(out_data, out_shape, a_data, inp1_shape,b_data, inp2_shape); + xa_nn_elm_sub_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape,b_data, inp2_shape); } else { @@ -190,6 +189,6 @@ Tensor& sub_out( return out; } -} // namespace impl -} // namespace HiFi } // namespace native +} // namespace HiFi +} // namespace impl diff --git a/backends/cadence/hifi/operators/op_tanh.cpp b/backends/cadence/hifi/operators/op_tanh.cpp index a80450b8d8d..7989ac3bfa9 100644 --- a/backends/cadence/hifi/operators/op_tanh.cpp +++ b/backends/cadence/hifi/operators/op_tanh.cpp @@ -6,10 +6,10 @@ * LICENSE file in the root directory of this source tree. */ +#include #include #include #include -#include using exec_aten::ScalarType; using exec_aten::Tensor; @@ -17,28 +17,29 @@ using executorch::aten::RuntimeContext; using torch::executor::Error; namespace impl { -namespace HiFi { +namespace HiFi { namespace native { Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { bool optimized = 1; - if((in.scalar_type() != ScalarType::Float) || (out.scalar_type() != ScalarType::Float)) - optimized = 0; + if ((in.scalar_type() != ScalarType::Float) || + (out.scalar_type() != ScalarType::Float)) + optimized = 0; - if(optimized) - { + if (optimized) { float* data_in = in.mutable_data_ptr(); float* data_out = out.mutable_data_ptr(); xa_nn_vec_tanh_f32_f32(data_out, data_in, (int)in.numel()); return out; } - return torch::executor::native::internal::unary_ufunc_realhb_to_floath(std::tanh, ctx, in, out); + return torch::executor::native::internal::unary_ufunc_realhb_to_floath( + std::tanh, ctx, in, out); } -} // namespace impl -} // namespace HiFi } // namespace native +} // namespace HiFi +} // namespace impl From a1340f7d6d1779b55a9194bd0e2cac48ffabbfa5 Mon Sep 17 00:00:00 2001 From: dijopaul Date: Mon, 21 Oct 2024 12:01:05 -0700 Subject: [PATCH 9/9] Namespace updates for cadence ops, adding 6 optimized ops --- backends/cadence/aot/functions_hifi.yaml | 12 +- backends/cadence/hifi/kernels/kernels.h | 44 +++++ .../cadence/hifi/operators/CMakeLists.txt | 3 +- backends/cadence/hifi/operators/op_add.cpp | 99 ++++++----- backends/cadence/hifi/operators/op_div.cpp | 158 ++++++++++-------- backends/cadence/hifi/operators/op_mul.cpp | 74 ++++---- .../cadence/hifi/operators/op_sigmoid.cpp | 20 ++- backends/cadence/hifi/operators/op_sub.cpp | 135 ++++++++------- backends/cadence/hifi/operators/op_tanh.cpp | 13 +- 9 files changed, 320 insertions(+), 238 deletions(-) diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index 590e7551eb7..70b2dd02076 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -45,12 +45,12 @@ - op: div.out kernels: - arg_meta: null - kernel_name: impl::HiFi::div_out + kernel_name: cadence::impl::HiFi::div_out - op: div.out_mode kernels: - arg_meta: null - kernel_name: impl::HiFi::div_out_mode + kernel_name: cadence::impl::HiFi::div_out_mode - op: embedding.out kernels: @@ -65,7 +65,7 @@ - op: mul.out kernels: - arg_meta: null - kernel_name: impl::HiFi::mul_out + kernel_name: cadence::impl::HiFi::mul_out - op: permute_copy.out kernels: @@ -75,7 +75,7 @@ - op: sigmoid.out kernels: - arg_meta: null - kernel_name: impl::HiFi::sigmoid_out + kernel_name: cadence::impl::HiFi::sigmoid_out - op: slice_copy.Tensor_out kernels: @@ -90,12 +90,12 @@ - op: sub.out kernels: - arg_meta: null - kernel_name: impl::HiFi::sub_out + kernel_name: cadence::impl::HiFi::sub_out - op: tanh.out kernels: - arg_meta: null - kernel_name: impl::HiFi::tanh_out + kernel_name: cadence::impl::HiFi::tanh_out - op: view_copy.out kernels: diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index 41e8ea1ef91..a206635a285 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -11,7 +11,51 @@ #include #include #include +/* For NNLIB APIs */ +#include "xa_nnlib_kernels_api.h" +/* Potential NNLIB function/APIs */ +extern "C" WORD32 xa_nn_elm_add_broadcast_4D_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_div_broadcast_4D_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape); + +extern "C" WORD32 xa_nn_elm_div_mode_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const FLOAT32* __restrict__ p_inp1, + const FLOAT32* __restrict__ p_inp2, + WORD32 num_elm, + WORD32 mode); + +extern "C" WORD32 xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape, + WORD32 mode); + +extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32( + FLOAT32* __restrict__ p_out, + const WORD32* const p_out_shape, + const FLOAT32* __restrict__ p_inp1, + const WORD32* const p_inp1_shape, + const FLOAT32* __restrict__ p_inp2, + const WORD32* const p_inp2_shape); + +namespace cadence { namespace impl { namespace HiFi { namespace kernels { diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 11b8097cab6..cbbb279e5d6 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -47,10 +47,11 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_to_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_view_copy.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_where.cpp" - "${EXECUTORCH_ROOT}/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/pattern/unary_ufunc_realhbbf16_to_floathbf16.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/activation_ops_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/broadcast_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/copy_ops_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/dtype_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/index_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/kernel_ops_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/matmul_ops_util.cpp" diff --git a/backends/cadence/hifi/operators/op_add.cpp b/backends/cadence/hifi/operators/op_add.cpp index 56adab71a02..10e06938f2e 100644 --- a/backends/cadence/hifi/operators/op_add.cpp +++ b/backends/cadence/hifi/operators/op_add.cpp @@ -83,7 +83,7 @@ Tensor& add_out( Tensor& out) { ET_KERNEL_CHECK( ctx, - resize_to_broadcast_target_size(a, b, out) == Error::Ok, + torch::executor::resize_to_broadcast_target_size(a, b, out) == Error::Ok, InvalidArgument, out); @@ -93,25 +93,36 @@ Tensor& add_out( InvalidArgument, out); ET_KERNEL_CHECK( - ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + ctx, + executorch::runtime::tensors_have_same_dim_order(a, b, out), + InvalidArgument, + out); ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); - ScalarType alpha_type = - torch::executor::native::utils::get_scalar_dtype(alpha); - ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType alpha_type = + torch::executor::native::utils::get_scalar_dtype(alpha); + ScalarType common_type = + executorch::runtime::promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); - ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); ET_KERNEL_CHECK( - ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); - + ctx, + executorch::runtime::canCast(common_type, out_type), + InvalidArgument, + out); + ET_KERNEL_CHECK( + ctx, + torch::executor::check_alpha_type(alpha_type, common_type), + InvalidArgument, + out); + float alpha_val; torch::executor::native::utils::extract_scalar(alpha, &alpha_val); constexpr auto name = "add.out"; constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ - + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); bool optimized = 1; /*find broadcast*/ @@ -124,51 +135,48 @@ Tensor& add_out( if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) optimized = 0; - if ((a_dim == 0) || (b_dim == 0) ) + if ((a_dim == 0) || (b_dim == 0)) optimized = 0; if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; - if (optimized) { - const float* const a_data = a.const_data_ptr(); - const float* const b_data = b.const_data_ptr(); - float* const out_data = out.mutable_data_ptr(); - - if(broadcast == 1) { - int out_shape[kNnlibMaxDim]; - int inp1_shape[kNnlibMaxDim]; - int inp2_shape[kNnlibMaxDim]; - - for (int i = 0; i < kNnlibMaxDim; i++) { - out_shape[i] = 1; - inp1_shape[i] = 1; - inp2_shape[i] = 1; - } - - int off_o = kNnlibMaxDim - out.dim(); - int off_a = kNnlibMaxDim - a.dim(); - int off_b = kNnlibMaxDim - b.dim(); - - for (int i = 0; i < out.dim(); i++) - out_shape[i+off_o] = out.size(i); - for (int i = 0; i < a.dim(); i++) - inp1_shape[i+off_a] = a.size(i); - for (int i = 0; i < b.dim(); i++) - inp2_shape[i+off_b] = b.size(i); - - xa_nn_elm_add_broadcast_4D_f32xf32_f32( - out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); - } - else - { - xa_nn_elm_add_f32xf32_f32(out_data, a_data, b_data, out.numel()); + const float* const a_data = a.const_data_ptr(); + const float* const b_data = b.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + + if (broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; } - return out; + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + xa_nn_elm_add_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } else { + xa_nn_elm_add_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } + + return out; } - + ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, name, CTYPE_A, [&]() { ET_SWITCH_REALHBBF16_TYPES(b_type, ctx, name, CTYPE_B, [&]() { using CTYPE_IN = typename torch::executor:: @@ -191,7 +199,6 @@ Tensor& add_out( return out; } - } // namespace native } // namespace HiFi } // namespace impl diff --git a/backends/cadence/hifi/operators/op_div.cpp b/backends/cadence/hifi/operators/op_div.cpp index e887e8b51b2..88e670b432f 100644 --- a/backends/cadence/hifi/operators/op_div.cpp +++ b/backends/cadence/hifi/operators/op_div.cpp @@ -13,7 +13,7 @@ #include #include #include -#include +#include using exec_aten::Scalar; using exec_aten::ScalarType; @@ -21,6 +21,7 @@ using exec_aten::Tensor; using executorch::aten::RuntimeContext; using torch::executor::Error; +namespace cadence { namespace impl { namespace HiFi { namespace native { @@ -28,11 +29,12 @@ namespace native { namespace { ScalarType get_compute_type(ScalarType a_type, ScalarType b_type) { - if (isFloatingType(a_type) && isFloatingType(b_type)) { - return promoteTypes(a_type, b_type); - } else if (isFloatingType(a_type)) { + if (executorch::runtime::isFloatingType(a_type) && + executorch::runtime::isFloatingType(b_type)) { + return executorch::runtime::promoteTypes(a_type, b_type); + } else if (executorch::runtime::isFloatingType(a_type)) { return a_type; - } else if (isFloatingType(b_type)) { + } else if (executorch::runtime::isFloatingType(b_type)) { return b_type; } return ScalarType::Float; @@ -44,7 +46,7 @@ Tensor& div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ET_KERNEL_CHECK( ctx, - resize_to_broadcast_target_size(a, b, out) == Error::Ok, + torch::executor::resize_to_broadcast_target_size(a, b, out) == Error::Ok, InvalidArgument, out); @@ -53,17 +55,22 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ET_KERNEL_CHECK( ctx, - !isComplexType(a_type) && !isQIntType(a_type) && !isBitsType(a_type), + !executorch::runtime::isComplexType(a_type) && + !executorch::runtime::isQIntType(a_type) && + !executorch::runtime::isBitsType(a_type), InvalidArgument, out); ET_KERNEL_CHECK( ctx, - !isComplexType(b_type) && !isQIntType(b_type) && !isBitsType(b_type), + !executorch::runtime::isComplexType(b_type) && + !executorch::runtime::isQIntType(b_type) && + !executorch::runtime::isBitsType(b_type), InvalidArgument, out); - ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); - + ET_KERNEL_CHECK( + ctx, executorch::runtime::tensor_is_real_type(out), InvalidArgument, out); + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); bool optimized = 1; @@ -73,49 +80,45 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - + if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) optimized = 0; - - if ((a_dim == 0) || (b_dim == 0) ) + + if ((a_dim == 0) || (b_dim == 0)) optimized = 0; if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; - + if (optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); float* out_data = out.mutable_data_ptr(); - + if (broadcast == 1) { - int out_shape[kNnlibMaxDim]; int inp1_shape[kNnlibMaxDim]; int inp2_shape[kNnlibMaxDim]; - - for (int i = 0; i < kNnlibMaxDim; i++) - { + + for (int i = 0; i < kNnlibMaxDim; i++) { out_shape[i] = 1; inp1_shape[i] = 1; inp2_shape[i] = 1; } - + int off_o = kNnlibMaxDim - out.dim(); int off_a = kNnlibMaxDim - a.dim(); int off_b = kNnlibMaxDim - b.dim(); for (int i = 0; i < out.dim(); i++) - out_shape[i+off_o] = out.size(i); + out_shape[i + off_o] = out.size(i); for (int i = 0; i < a.dim(); i++) - inp1_shape[i+off_a] = a.size(i); + inp1_shape[i + off_a] = a.size(i); for (int i = 0; i < b.dim(); i++) - inp2_shape[i+off_b] = b.size(i); - + inp2_shape[i + off_b] = b.size(i); + xa_nn_elm_div_broadcast_4D_f32xf32_f32( - out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); - } - else - { + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } else { xa_nn_elm_div_f32xf32_f32(out_data, a_data, b_data, out.numel()); } @@ -125,24 +128,28 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ScalarType common_type = get_compute_type(a_type, b_type); ScalarType out_type = out.scalar_type(); - ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, + executorch::runtime::canCast(common_type, out_type), + InvalidArgument, + out); ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.out", CTYPE_A, [&]() { ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "div.out", CTYPE_B, [&]() { ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out", CTYPE_IN, [&]() { ET_SWITCH_FLOAT_TYPES(out_type, ctx, "div.out", CTYPE_OUT, [&]() { torch::executor:: - apply_binary_elementwise_fn( - [](const CTYPE_A val_a, const CTYPE_B val_b) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN b_casted = static_cast(val_b); - CTYPE_IN value = a_casted / b_casted; - - return static_cast(value); - }, - a, - b, - out); + apply_binary_elementwise_fn( + [](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + + return static_cast(value); + }, + a, + b, + out); }); }); }); @@ -159,7 +166,7 @@ Tensor& div_out_mode( Tensor& out) { ET_KERNEL_CHECK( ctx, - resize_to_broadcast_target_size(a, b, out) == Error::Ok, + torch::executor::resize_to_broadcast_target_size(a, b, out) == Error::Ok, InvalidArgument, out); @@ -168,7 +175,8 @@ Tensor& div_out_mode( ScalarType common_type = get_compute_type(a_type, b_type); ScalarType out_type = out.scalar_type(); - ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, executorch::runtime::tensor_is_real_type(out), InvalidArgument, out); // Allow casting float -> integral here // non-bool -> bool is still disallowed @@ -186,23 +194,23 @@ Tensor& div_out_mode( const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - + if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) optimized = 0; - + if ((a_dim == 0) || (b_dim == 0)) optimized = 0; if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; int mode_val = -1; - if (mode.has_value() && mode.value() == "trunc") + if (mode.has_value() && mode.value() == "trunc") mode_val = 0; else if (mode.has_value() && mode.value() == "floor") mode_val = 1; else optimized = 0; - + if (optimized) { float* a_data = a.mutable_data_ptr(); float* b_data = b.mutable_data_ptr(); @@ -212,7 +220,7 @@ Tensor& div_out_mode( int out_shape[kNnlibMaxDim]; int inp1_shape[kNnlibMaxDim]; int inp2_shape[kNnlibMaxDim]; - + for (int i = 0; i < kNnlibMaxDim; i++) { inp1_shape[i] = 1; inp2_shape[i] = 1; @@ -224,21 +232,25 @@ Tensor& div_out_mode( int off_b = kNnlibMaxDim - b.dim(); for (int i = 0; i < out.dim(); i++) - out_shape[i+off_o] = out.size(i); + out_shape[i + off_o] = out.size(i); for (int i = 0; i < a.dim(); i++) - inp1_shape[i+off_a] = a.size(i); + inp1_shape[i + off_a] = a.size(i); for (int i = 0; i < b.dim(); i++) - inp2_shape[i+off_b] = b.size(i); - + inp2_shape[i + off_b] = b.size(i); + xa_nn_elm_div_mode_broadcast_4D_f32xf32_f32( - out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape, mode_val); - } - else - { + out_data, + out_shape, + a_data, + inp1_shape, + b_data, + inp2_shape, + mode_val); + } else { xa_nn_elm_div_mode_f32xf32_f32( - out_data, a_data, b_data, out.numel(), mode_val); + out_data, a_data, b_data, out.numel(), mode_val); } - + return out; } @@ -247,21 +259,21 @@ Tensor& div_out_mode( ET_SWITCH_FLOAT_TYPES(common_type, ctx, "div.out_mode", CTYPE_IN, [&]() { ET_SWITCH_REAL_TYPES(out_type, ctx, "div.out_mode", CTYPE_OUT, [&]() { torch::executor:: - apply_binary_elementwise_fn( - [mode](const CTYPE_A val_a, const CTYPE_B val_b) { - CTYPE_IN a_casted = static_cast(val_a); - CTYPE_IN b_casted = static_cast(val_b); - CTYPE_IN value = a_casted / b_casted; - if (mode.has_value() && mode.value() == "trunc") { - value = std::trunc(value); - } else if (mode.has_value() && mode.value() == "floor") { - value = std::floor(value); - } - return static_cast(value); - }, - a, - b, - out); + apply_binary_elementwise_fn( + [mode](const CTYPE_A val_a, const CTYPE_B val_b) { + CTYPE_IN a_casted = static_cast(val_a); + CTYPE_IN b_casted = static_cast(val_b); + CTYPE_IN value = a_casted / b_casted; + if (mode.has_value() && mode.value() == "trunc") { + value = std::trunc(value); + } else if (mode.has_value() && mode.value() == "floor") { + value = std::floor(value); + } + return static_cast(value); + }, + a, + b, + out); }); }); }); @@ -270,7 +282,7 @@ Tensor& div_out_mode( return out; } - } // namespace native } // namespace HiFi } // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_mul.cpp b/backends/cadence/hifi/operators/op_mul.cpp index 1b2e62cdb0e..ad12606bdf6 100644 --- a/backends/cadence/hifi/operators/op_mul.cpp +++ b/backends/cadence/hifi/operators/op_mul.cpp @@ -21,8 +21,9 @@ using executorch::runtime::can_cast; using executorch::runtime::CppTypeToScalarType; using torch::executor::Error; +namespace cadence { namespace impl { -namespace HiFi { +namespace HiFi { namespace native { namespace { @@ -75,17 +76,22 @@ Tensor& mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { ET_KERNEL_CHECK( ctx, - resize_to_broadcast_target_size(a, b, out) == Error::Ok, + torch::executor::resize_to_broadcast_target_size(a, b, out) == Error::Ok, InvalidArgument, out); - ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, + executorch::runtime::tensor_is_realhb_type(out), + InvalidArgument, + out); ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); - ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + ScalarType common_type = + executorch::runtime::promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); - constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ + constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); bool optimized = 1; @@ -96,13 +102,12 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - if ((a_type != ScalarType::Float) || (b_type != ScalarType::Float)) optimized = 0; - - if ((a_dim == 0) || (b_dim == 0) ) + + if ((a_dim == 0) || (b_dim == 0)) optimized = 0; - + if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; @@ -112,32 +117,30 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { float* out_data = out.mutable_data_ptr(); if (broadcast == 1) { - int out_shape[kNnlibMaxDim]; - int inp1_shape[kNnlibMaxDim]; - int inp2_shape[kNnlibMaxDim]; - for (int i = 0; i < kNnlibMaxDim; i++) { - out_shape[i] = 1; - inp1_shape[i] = 1; - inp2_shape[i] = 1; - } - int off_o = kNnlibMaxDim - out.dim(); - int off_a = kNnlibMaxDim - a.dim(); - int off_b = kNnlibMaxDim - b.dim(); - for (int i = 0; i < out.dim(); i++) - out_shape[i+off_o] = out.size(i); - for (int i = 0; i < a.dim(); i++) - inp1_shape[i+off_a] = a.size(i); - for (int i = 0; i < b.dim(); i++) - inp2_shape[i+off_b] = b.size(i); - - xa_nn_elm_mul_broadcast_4D_f32xf32_f32( - out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); - } - else - { - xa_nn_elm_mul_f32xf32_f32(out_data, a_data, b_data, out.numel()); + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; + } + int off_o = kNnlibMaxDim - out.dim(); + int off_a = kNnlibMaxDim - a.dim(); + int off_b = kNnlibMaxDim - b.dim(); + for (int i = 0; i < out.dim(); i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a.dim(); i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b.dim(); i++) + inp2_shape[i + off_b] = b.size(i); + + xa_nn_elm_mul_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } else { + xa_nn_elm_mul_f32xf32_f32(out_data, a_data, b_data, out.numel()); } - + return out; } @@ -156,10 +159,11 @@ mul_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { }); }); }); - + return out; } } // namespace native } // namespace HiFi } // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_sigmoid.cpp b/backends/cadence/hifi/operators/op_sigmoid.cpp index 1ed89880d5c..b9fa73b879f 100644 --- a/backends/cadence/hifi/operators/op_sigmoid.cpp +++ b/backends/cadence/hifi/operators/op_sigmoid.cpp @@ -17,6 +17,7 @@ using exec_aten::Tensor; using executorch::aten::RuntimeContext; using torch::executor::Error; +namespace cadence { namespace impl { namespace HiFi { namespace native { @@ -28,7 +29,11 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { ET_KERNEL_CHECK( ctx, in.scalar_type() != ScalarType::Bool, InvalidArgument, out); - ET_KERNEL_CHECK(ctx, tensor_is_floating_type(out), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, + executorch::runtime::tensor_is_floating_type(out), + InvalidArgument, + out); // Resize for dynamic shape ET_KERNEL_CHECK_MSG( @@ -43,13 +48,13 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { bool optimized = 1; if ((in_type != ScalarType::Float) || (out_type != ScalarType::Float)) - optimized = 0; - + optimized = 0; + if (optimized) { float* data_in = in.mutable_data_ptr(); float* data_out = out.mutable_data_ptr(); xa_nn_vec_sigmoid_f32_f32(data_out, data_in, in.numel()); - + return out; } @@ -66,11 +71,12 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { out.mutable_data_ptr(), in.numel()); }); - }); + }); return out; } -} // namespace impl -} // namespace HiFi } // namespace native +} // namespace HiFi +} // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_sub.cpp b/backends/cadence/hifi/operators/op_sub.cpp index d9958bf8e6d..0a362dbf959 100644 --- a/backends/cadence/hifi/operators/op_sub.cpp +++ b/backends/cadence/hifi/operators/op_sub.cpp @@ -22,7 +22,7 @@ using executorch::runtime::can_cast; using executorch::runtime::CppTypeToScalarType; using torch::executor::Error; - +namespace cadence { namespace impl { namespace HiFi { namespace native { @@ -84,29 +84,41 @@ Tensor& sub_out( Tensor& out) { ET_KERNEL_CHECK( ctx, - resize_to_broadcast_target_size(a, b, out) == Error::Ok, + torch::executor::resize_to_broadcast_target_size(a, b, out) == Error::Ok, InvalidArgument, out); - ET_KERNEL_CHECK(ctx, tensor_is_realh_type(out), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, + executorch::runtime::tensor_is_realh_type(out), + InvalidArgument, + out); ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); ScalarType alpha_type = - torch::executor::native::utils::get_scalar_dtype(alpha); - ScalarType common_type = promoteTypes(a_type, b_type, /*half_to_float*/ true); + torch::executor::native::utils::get_scalar_dtype(alpha); + ScalarType common_type = + executorch::runtime::promoteTypes(a_type, b_type, /*half_to_float*/ true); ScalarType out_type = out.scalar_type(); - ET_KERNEL_CHECK(ctx, canCast(common_type, out_type), InvalidArgument, out); ET_KERNEL_CHECK( - ctx, check_alpha_type(alpha_type, common_type), InvalidArgument, out); - + ctx, + executorch::runtime::canCast(common_type, out_type), + InvalidArgument, + out); + ET_KERNEL_CHECK( + ctx, + torch::executor::check_alpha_type(alpha_type, common_type), + InvalidArgument, + out); + float alpha_val; torch::executor::native::utils::extract_scalar(alpha, &alpha_val); constexpr auto name = "sub.out"; constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ - + int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); bool optimized = 1; /*find broadcast*/ @@ -115,73 +127,69 @@ Tensor& sub_out( const bool broadcast = (a_is_broadcasted || b_is_broadcasted); int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); max_dim = out.dim() > max_dim ? out.dim() : max_dim; - + if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) optimized = 0; - + if ((a_dim == 0) || (b_dim == 0)) optimized = 0; if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) optimized = 0; - if (optimized) { - /*logic to find broadcast*/ - const int a_is_broadcasted = !out.sizes().equals(a.sizes()); - const int b_is_broadcasted = !out.sizes().equals(b.sizes()); - const int broadcast = (a_is_broadcasted || b_is_broadcasted); - - const float* const a_data = a.const_data_ptr(); - const float* const b_data = b.const_data_ptr(); - float* const out_data = out.mutable_data_ptr(); - if (broadcast == 1) { - int out_shape[kNnlibMaxDim]; - int inp1_shape[kNnlibMaxDim]; - int inp2_shape[kNnlibMaxDim]; - - for (int i = 0; i < kNnlibMaxDim; i++) { - out_shape[i] = 1; - inp1_shape[i] = 1; - inp2_shape[i] = 1; - } - - int off_o = kNnlibMaxDim - out_dim; - int off_a = kNnlibMaxDim - a_dim; - int off_b = kNnlibMaxDim - b_dim; - for (int i = 0; i < out_dim; i++) - out_shape[i+off_o] = out.size(i); - for (int i = 0; i < a_dim; i++) - inp1_shape[i+off_a] = a.size(i); - for (int i = 0; i < b_dim; i++) - inp2_shape[i+off_b] = b.size(i); - - xa_nn_elm_sub_broadcast_4D_f32xf32_f32( - out_data, out_shape, a_data, inp1_shape,b_data, inp2_shape); - } - else - { - xa_nn_elm_sub_f32xf32_f32(out_data, a_data, b_data, out.numel()); + /*logic to find broadcast*/ + const int a_is_broadcasted = !out.sizes().equals(a.sizes()); + const int b_is_broadcasted = !out.sizes().equals(b.sizes()); + const int broadcast = (a_is_broadcasted || b_is_broadcasted); + + const float* const a_data = a.const_data_ptr(); + const float* const b_data = b.const_data_ptr(); + float* const out_data = out.mutable_data_ptr(); + if (broadcast == 1) { + int out_shape[kNnlibMaxDim]; + int inp1_shape[kNnlibMaxDim]; + int inp2_shape[kNnlibMaxDim]; + + for (int i = 0; i < kNnlibMaxDim; i++) { + out_shape[i] = 1; + inp1_shape[i] = 1; + inp2_shape[i] = 1; } - - return out; + + int off_o = kNnlibMaxDim - out_dim; + int off_a = kNnlibMaxDim - a_dim; + int off_b = kNnlibMaxDim - b_dim; + for (int i = 0; i < out_dim; i++) + out_shape[i + off_o] = out.size(i); + for (int i = 0; i < a_dim; i++) + inp1_shape[i + off_a] = a.size(i); + for (int i = 0; i < b_dim; i++) + inp2_shape[i + off_b] = b.size(i); + + xa_nn_elm_sub_broadcast_4D_f32xf32_f32( + out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); + } else { + xa_nn_elm_sub_f32xf32_f32(out_data, a_data, b_data, out.numel()); + } + + return out; } - ET_SWITCH_REALH_TYPES(a_type, ctx, name, CTYPE_A, [&]() { - ET_SWITCH_REALH_TYPES(b_type, ctx, name, CTYPE_B, [&]() { - using CTYPE_IN = typename torch::executor:: - promote_types::type; - ET_DCHECK(CppTypeToScalarType::value == common_type); - CTYPE_IN alpha_val; - torch::executor::native::utils::extract_scalar(alpha, &alpha_val); - ET_SWITCH_REALH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { - SubInner< - can_cast::value, - CTYPE_A, - CTYPE_B, - CTYPE_IN, - CTYPE_OUT>::run(a, b, alpha_val, out); + ET_SWITCH_REALH_TYPES(b_type, ctx, name, CTYPE_B, [&]() { + using CTYPE_IN = typename torch::executor:: + promote_types::type; + ET_DCHECK(CppTypeToScalarType::value == common_type); + CTYPE_IN alpha_val; + torch::executor::native::utils::extract_scalar(alpha, &alpha_val); + ET_SWITCH_REALH_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { + SubInner< + can_cast::value, + CTYPE_A, + CTYPE_B, + CTYPE_IN, + CTYPE_OUT>::run(a, b, alpha_val, out); }); }); }); @@ -192,3 +200,4 @@ Tensor& sub_out( } // namespace native } // namespace HiFi } // namespace impl +} // namespace cadence \ No newline at end of file diff --git a/backends/cadence/hifi/operators/op_tanh.cpp b/backends/cadence/hifi/operators/op_tanh.cpp index 7989ac3bfa9..13578beb887 100644 --- a/backends/cadence/hifi/operators/op_tanh.cpp +++ b/backends/cadence/hifi/operators/op_tanh.cpp @@ -16,18 +16,17 @@ using exec_aten::Tensor; using executorch::aten::RuntimeContext; using torch::executor::Error; +namespace cadence { namespace impl { namespace HiFi { namespace native { - Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { - bool optimized = 1; - if ((in.scalar_type() != ScalarType::Float) || + if ((in.scalar_type() != ScalarType::Float) || (out.scalar_type() != ScalarType::Float)) optimized = 0; - + if (optimized) { float* data_in = in.mutable_data_ptr(); float* data_out = out.mutable_data_ptr(); @@ -35,11 +34,11 @@ Tensor& tanh_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) { return out; } - return torch::executor::native::internal::unary_ufunc_realhb_to_floath( - std::tanh, ctx, in, out); - + return torch::executor::native::internal:: + unary_ufunc_realhbbf16_to_floathbf16(std::tanh, ctx, in, out); } } // namespace native } // namespace HiFi } // namespace impl +} // namespace cadence \ No newline at end of file