Skip to content

Commit c275c64

Browse files
committed
Revert "Add vectorization in elementwise_util (#9432)"
This reverts commit 4c35fe0.
1 parent a2e898e commit c275c64

24 files changed

+41
-370
lines changed

.lintrunner.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -272,8 +272,6 @@ exclude_patterns = [
272272
'exir/verification/bindings.cpp',
273273
'extension/**',
274274
# Uses properly-gated (ET_USE_PYTORCH_HEADERS) ATen include.
275-
'kernels/portable/cpu/util/elementwise_util.h',
276-
'kernels/portable/cpu/util/math_util.h',
277275
'kernels/portable/cpu/util/vectorized_math.h',
278276
'kernels/optimized/**',
279277
'runtime/core/exec_aten/**',

kernels/portable/cpu/op_add.cpp

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -102,18 +102,14 @@ Tensor& add_scalar_out(
102102
static constexpr const char op_name[] = "add.Scalar_out";
103103

104104
ET_SWITCH_REALB_TYPES(compute_type, ctx, op_name, CTYPE_COMPUTE, [&]() {
105-
CTYPE_COMPUTE val_b = utils::scalar_to<CTYPE_COMPUTE>(b);
106-
CTYPE_COMPUTE val_alpha = utils::scalar_to<CTYPE_COMPUTE>(alpha);
107-
auto val_alpha_times_b = val_alpha * val_b;
108105
utils::apply_unitensor_elementwise_fn<
109106
CTYPE_COMPUTE,
110107
op_name,
111108
utils::SupportedTensorDtypes::SAME_AS_COMMON>(
112-
[val_alpha_times_b](const auto val_a) {
113-
// Cast here supports vectorization; either it does nothing
114-
// or it casts from CTYPE_COMPUTE to
115-
// Vectorized<CTYPE_COMPUTE>.
116-
return val_a + decltype(val_a)(val_alpha_times_b);
109+
[b, alpha](const auto val_a) {
110+
CTYPE_COMPUTE val_b = utils::scalar_to<CTYPE_COMPUTE>(b);
111+
CTYPE_COMPUTE val_alpha = utils::scalar_to<CTYPE_COMPUTE>(alpha);
112+
return val_a + val_alpha * val_b;
117113
},
118114
ctx,
119115
a,

kernels/portable/cpu/op_atan2.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ Tensor& atan2_out(
6060
op_name,
6161
utils::SupportedTensorDtypes::FLOATHBF16>(
6262
[](const auto val_a, const auto val_b) {
63-
return executorch::math::atan2(val_a, val_b);
63+
return std::atan2(val_a, val_b);
6464
},
6565
ctx,
6666
a,

kernels/portable/cpu/op_clamp.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,9 @@ Tensor& clamp_out(
138138
CTYPE_COMPUTE,
139139
op_name,
140140
utils::SupportedTensorDtypes::SAME_AS_COMMON>(
141-
[has_min, min_opt, has_max, max_opt](const auto val_in) {
142-
auto val_out = val_in;
141+
[has_min, min_opt, has_max, max_opt](const CTYPE_COMPUTE val_in) {
142+
// TODO: rewrite this to be vectorization-capable.
143+
CTYPE_COMPUTE val_out = val_in;
143144
if (has_min) {
144145
val_out = utils::max_override(
145146
val_out, utils::scalar_to<CTYPE_COMPUTE>(min_opt.value()));

kernels/portable/cpu/op_elu.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,8 @@ Tensor& elu_out(
4848
CTYPE,
4949
op_name,
5050
utils::SupportedTensorDtypes::SAME_AS_COMMON>(
51-
[negcoef, math_scale, math_input_scale](const CTYPE x) {
51+
[negcoef, math_scale, math_input_scale](const auto x) {
52+
// TODO: rewrite this to be vectorization-capable.
5253
return MathT(x) <= MathT(0)
5354
? std::expm1(MathT(x) * math_input_scale) * negcoef
5455
: MathT(x) * math_scale;

kernels/portable/cpu/op_fmod.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ Tensor& fmod_Tensor_out(
6161
utils::SupportedTensorDtypes::REALHBF16>(
6262
[&div_by_zero_error](
6363
const CTYPE_COMPUTE val_a, const CTYPE_COMPUTE val_b) {
64-
// TODO: rewrite this to be vectorization-capable?
64+
// TODO: rewrite this to be vectorization-capable.
6565
CTYPE_COMPUTE value = 0;
6666
if (is_integral_type<CTYPE_COMPUTE, /*includeBool=*/true>::value) {
6767
if (val_b == 0) {
@@ -138,8 +138,10 @@ Tensor& fmod_Scalar_out(
138138
CTYPE_COMPUTE,
139139
op_name,
140140
utils::SupportedTensorDtypes::REALHBF16>(
141-
[val_b](const auto val_a) {
142-
return executorch::math::fmod(val_a, (decltype(val_a))val_b);
141+
[val_b](const CTYPE_COMPUTE val_a) {
142+
// TODO: rewrite this to be vectorization-capable.
143+
CTYPE_COMPUTE value = std::fmod(val_a, val_b);
144+
return value;
143145
},
144146
ctx,
145147
a,

kernels/portable/cpu/op_maximum.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ Tensor& maximum_out(
4949
CTYPE_COMPUTE,
5050
op_name,
5151
utils::SupportedTensorDtypes::REALHBBF16>(
52-
[](const auto val_a, const auto val_b) {
52+
[](const CTYPE_COMPUTE val_a, const CTYPE_COMPUTE val_b) {
5353
return utils::max_override(val_a, val_b);
5454
},
5555
ctx,

kernels/portable/cpu/op_minimum.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,8 @@ Tensor& minimum_out(
4949
CTYPE_COMPUTE,
5050
op_name,
5151
utils::SupportedTensorDtypes::REALHBBF16>(
52-
[](const auto val_a, const auto val_b) {
52+
[](const CTYPE_COMPUTE val_a, const CTYPE_COMPUTE val_b) {
53+
// TODO: rewrite this to be vectorization-capable.
5354
return utils::min_override(val_a, val_b);
5455
},
5556
ctx,

kernels/portable/cpu/op_mul.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,9 @@ Tensor& mul_out(
7272
CTYPE_COMPUTE,
7373
op_name,
7474
utils::SupportedTensorDtypes::REALHBBF16>(
75-
[](const auto val_a, const auto val_b) { return val_a * val_b; },
75+
[](const CTYPE_COMPUTE val_a, const CTYPE_COMPUTE val_b) {
76+
return val_a * val_b;
77+
},
7678
ctx,
7779
a,
7880
utils::SupportedTensorDtypes::REALHBBF16,

kernels/portable/cpu/op_native_dropout.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,8 @@ std::tuple<Tensor&, Tensor&> native_dropout_out(
5757
}
5858
ET_SWITCH_FLOATHBF16_TYPES(
5959
input.scalar_type(), ctx, op_name, CTYPE_COMPUTE, [&]() {
60-
utils::apply_bitensor_elementwise_fn<
61-
CTYPE_COMPUTE,
62-
op_name,
63-
utils::SupportedTensorDtypes::SAME_AS_COMMON>(
64-
[](const CTYPE_COMPUTE val, const CTYPE_COMPUTE mask_val) {
60+
utils::apply_bitensor_elementwise_fn<CTYPE_COMPUTE, op_name>(
61+
[](const auto val, const auto mask_val) {
6562
if (!mask_val) {
6663
return static_cast<decltype(val)>(0);
6764
}
@@ -73,7 +70,8 @@ std::tuple<Tensor&, Tensor&> native_dropout_out(
7370
mask,
7471
// TODO: should really be just BOOL
7572
utils::SupportedTensorDtypes::BOOL_OR_BYTE,
76-
out);
73+
out,
74+
utils::SupportedTensorDtypes::SAME_AS_COMMON);
7775
});
7876
} else if (input.numel() > 0) {
7977
std::memcpy(out.mutable_data_ptr(), input.data_ptr(), input.nbytes());

0 commit comments

Comments
 (0)