-
Notifications
You must be signed in to change notification settings - Fork 606
Updating cadence ops with new name space, rebasing 6 optimized ops #6407
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
17 commits
Select commit
Hold shift + click to select a range
4ed8bd6
Main backup (#12)
dijopaul 71d78a1
Merge pull request #7 from dijopaul/main
cad-audio 6ad490a
Adding sigmoid optimizations
dijopaul c0b1005
Adding tanh optimizations
dijopaul 41d7533
Merge pull request #8 from dijopaul/main
cad-audio a8c4f66
Fixing review comments in 5483
dijopaul 99a772c
Adding cflags to prevent compilation halts
dijopaul 8064895
Merge pull request #9 from dijopaul/main
cad-audio 9543622
Adding cflags to prevent compilation halts
dijopaul d45e25d
Changing name space of optimized ops; Remove unused ops from file
dijopaul a3581f1
Merge branch 'main' of https://github.com/dijopaul/executorch
dijopaul fd955cf
Merge pull request #11 from dijopaul/main
cad-audio 5851fca
Merge pull request #10 from dijopaul/main_cflags
cad-audio 605f374
Merge branch 'main' into main
cad-audio 4e89e2a
fixed lint issues.
cad-audio e9d9f6c
Merge branch 'main' into main
cad-audio a1340f7
Namespace updates for cadence ops, adding 6 optimized ops
dijopaul File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,204 @@ | ||
/* | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* All rights reserved. | ||
* | ||
* This source code is licensed under the BSD-style license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
|
||
#include <executorch/backends/cadence/hifi/kernels/kernels.h> | ||
#include <executorch/kernels/portable/cpu/scalar_utils.h> | ||
#include <executorch/kernels/portable/cpu/util/broadcast_util.h> | ||
#include <executorch/kernels/portable/cpu/util/functional_util.h> | ||
#include <executorch/kernels/portable/cpu/util/kernel_ops_util.h> | ||
#include <executorch/runtime/kernel/kernel_includes.h> | ||
#include <executorch/runtime/platform/assert.h> | ||
|
||
using exec_aten::Scalar; | ||
using exec_aten::ScalarType; | ||
using exec_aten::Tensor; | ||
using executorch::runtime::can_cast; | ||
using executorch::runtime::CppTypeToScalarType; | ||
using executorch::runtime::KernelRuntimeContext; | ||
using torch::executor::Error; | ||
|
||
namespace impl { | ||
namespace HiFi { | ||
namespace native { | ||
|
||
namespace { | ||
template < | ||
bool can_cast, | ||
typename CTYPE_A, | ||
typename CTYPE_B, | ||
typename CTYPE_IN, | ||
typename CTYPE_OUT> | ||
struct AddInner; | ||
|
||
template < | ||
typename CTYPE_A, | ||
typename CTYPE_B, | ||
typename CTYPE_IN, | ||
typename CTYPE_OUT> | ||
struct AddInner<true, CTYPE_A, CTYPE_B, CTYPE_IN, CTYPE_OUT> { | ||
static void | ||
run(const Tensor& a, const Tensor& b, CTYPE_IN alpha_val, Tensor& out) { | ||
torch::executor::apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>( | ||
// NOLINTNEXTLINE(facebook-hte-ConstantArgumentPassByValue) | ||
[alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) { | ||
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a); | ||
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b); | ||
CTYPE_IN value = a_casted + alpha_val * b_casted; | ||
|
||
return static_cast<CTYPE_OUT>(value); | ||
}, | ||
a, | ||
b, | ||
out); | ||
} | ||
}; | ||
|
||
template <typename CTYPE_IN> | ||
struct ReportCanCastBug { | ||
static void run(const Tensor&, const Tensor&, CTYPE_IN, Tensor&) { | ||
ET_DCHECK_MSG(false, "BUG: canCast should have been checked above"); | ||
} | ||
}; | ||
|
||
template < | ||
typename CTYPE_A, | ||
typename CTYPE_B, | ||
typename CTYPE_IN, | ||
typename CTYPE_OUT> | ||
struct AddInner<false, CTYPE_A, CTYPE_B, CTYPE_IN, CTYPE_OUT> | ||
: public ReportCanCastBug<CTYPE_IN> {}; | ||
|
||
} // namespace | ||
|
||
Tensor& add_out( | ||
KernelRuntimeContext& ctx, | ||
const Tensor& a, | ||
const Tensor& b, | ||
const Scalar& alpha, | ||
Tensor& out) { | ||
ET_KERNEL_CHECK( | ||
ctx, | ||
torch::executor::resize_to_broadcast_target_size(a, b, out) == Error::Ok, | ||
InvalidArgument, | ||
out); | ||
|
||
ET_KERNEL_CHECK( | ||
ctx, | ||
executorch::runtime::tensor_is_realhbbf16_type(out), | ||
InvalidArgument, | ||
out); | ||
ET_KERNEL_CHECK( | ||
ctx, | ||
executorch::runtime::tensors_have_same_dim_order(a, b, out), | ||
InvalidArgument, | ||
out); | ||
|
||
ScalarType a_type = a.scalar_type(); | ||
ScalarType b_type = b.scalar_type(); | ||
ScalarType alpha_type = | ||
torch::executor::native::utils::get_scalar_dtype(alpha); | ||
ScalarType common_type = | ||
executorch::runtime::promoteTypes(a_type, b_type, /*half_to_float*/ true); | ||
ScalarType out_type = out.scalar_type(); | ||
|
||
ET_KERNEL_CHECK( | ||
ctx, | ||
executorch::runtime::canCast(common_type, out_type), | ||
InvalidArgument, | ||
out); | ||
ET_KERNEL_CHECK( | ||
ctx, | ||
torch::executor::check_alpha_type(alpha_type, common_type), | ||
InvalidArgument, | ||
out); | ||
|
||
float alpha_val; | ||
torch::executor::native::utils::extract_scalar(alpha, &alpha_val); | ||
|
||
constexpr auto name = "add.out"; | ||
constexpr int kNnlibMaxDim = 4; /*fallback if broadcast and dim > 4 */ | ||
|
||
int a_dim = a.dim(), b_dim = b.dim(), out_dim = out.dim(); | ||
bool optimized = 1; | ||
/*find broadcast*/ | ||
const bool a_is_broadcasted = !out.sizes().equals(a.sizes()); | ||
const bool b_is_broadcasted = !out.sizes().equals(b.sizes()); | ||
const bool broadcast = (a_is_broadcasted || b_is_broadcasted); | ||
int max_dim = a.dim() > b.dim() ? a.dim() : b.dim(); | ||
max_dim = out.dim() > max_dim ? out.dim() : max_dim; | ||
|
||
if ((out_type != ScalarType::Float) || (alpha_val != 1.0)) | ||
optimized = 0; | ||
|
||
if ((a_dim == 0) || (b_dim == 0)) | ||
optimized = 0; | ||
|
||
if ((broadcast == 1) && (max_dim > kNnlibMaxDim)) | ||
optimized = 0; | ||
|
||
if (optimized) { | ||
const float* const a_data = a.const_data_ptr<float>(); | ||
const float* const b_data = b.const_data_ptr<float>(); | ||
float* const out_data = out.mutable_data_ptr<float>(); | ||
|
||
if (broadcast == 1) { | ||
int out_shape[kNnlibMaxDim]; | ||
int inp1_shape[kNnlibMaxDim]; | ||
int inp2_shape[kNnlibMaxDim]; | ||
|
||
for (int i = 0; i < kNnlibMaxDim; i++) { | ||
out_shape[i] = 1; | ||
inp1_shape[i] = 1; | ||
inp2_shape[i] = 1; | ||
} | ||
|
||
int off_o = kNnlibMaxDim - out.dim(); | ||
int off_a = kNnlibMaxDim - a.dim(); | ||
int off_b = kNnlibMaxDim - b.dim(); | ||
|
||
for (int i = 0; i < out.dim(); i++) | ||
out_shape[i + off_o] = out.size(i); | ||
for (int i = 0; i < a.dim(); i++) | ||
inp1_shape[i + off_a] = a.size(i); | ||
for (int i = 0; i < b.dim(); i++) | ||
inp2_shape[i + off_b] = b.size(i); | ||
|
||
xa_nn_elm_add_broadcast_4D_f32xf32_f32( | ||
out_data, out_shape, a_data, inp1_shape, b_data, inp2_shape); | ||
} else { | ||
xa_nn_elm_add_f32xf32_f32(out_data, a_data, b_data, out.numel()); | ||
} | ||
|
||
return out; | ||
} | ||
|
||
ET_SWITCH_REALHBBF16_TYPES(a_type, ctx, name, CTYPE_A, [&]() { | ||
ET_SWITCH_REALHBBF16_TYPES(b_type, ctx, name, CTYPE_B, [&]() { | ||
using CTYPE_IN = typename torch::executor:: | ||
promote_types<CTYPE_A, CTYPE_B, /*half_to_float*/ true>::type; | ||
ET_DCHECK(CppTypeToScalarType<CTYPE_IN>::value == common_type); | ||
CTYPE_IN alpha_val; | ||
torch::executor::native::utils::extract_scalar(alpha, &alpha_val); | ||
|
||
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&]() { | ||
AddInner< | ||
can_cast<CTYPE_IN, CTYPE_OUT>::value, | ||
CTYPE_A, | ||
CTYPE_B, | ||
CTYPE_IN, | ||
CTYPE_OUT>::run(a, b, alpha_val, out); | ||
}); | ||
}); | ||
}); | ||
|
||
return out; | ||
} | ||
|
||
} // namespace native | ||
} // namespace HiFi | ||
} // namespace impl |
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
probably should be cadence::impl::HiFi too, but we can do that in a follow up if this can be merged
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I will create a new PR for this
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
#6433