diff --git a/coresimd/ppsv/api/boolean_reductions.rs b/coresimd/ppsv/api/boolean_reductions.rs index 3c45fee48d..4b157ee4aa 100644 --- a/coresimd/ppsv/api/boolean_reductions.rs +++ b/coresimd/ppsv/api/boolean_reductions.rs @@ -6,17 +6,23 @@ macro_rules! impl_bool_reductions { /// Are `all` vector lanes `true`? #[inline] pub fn all(self) -> bool { - self.and() + unsafe { + use coresimd::simd_llvm::simd_reduce_all; + simd_reduce_all(self) + } } /// Is `any` vector lanes `true`? #[inline] pub fn any(self) -> bool { - self.or() + unsafe { + use coresimd::simd_llvm::simd_reduce_any; + simd_reduce_any(self) + } } /// Are `all` vector lanes `false`? #[inline] pub fn none(self) -> bool { - !self.or() + !self.any() } } } diff --git a/coresimd/ppsv/codegen/and.rs b/coresimd/ppsv/codegen/and.rs index aaba2b3c85..f65173194a 100644 --- a/coresimd/ppsv/codegen/and.rs +++ b/coresimd/ppsv/codegen/and.rs @@ -1,83 +1,6 @@ //! Code generation for the and reduction. use coresimd::simd::*; -/// LLVM intrinsics used in the and reduction -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.experimental.vector.reduce.and.i8.v2i8"] - fn reduce_and_i8x2(x: i8x2) -> i8; - #[link_name = "llvm.experimental.vector.reduce.and.u8.v2u8"] - fn reduce_and_u8x2(x: u8x2) -> u8; - #[link_name = "llvm.experimental.vector.reduce.and.i16.v2i16"] - fn reduce_and_i16x2(x: i16x2) -> i16; - #[link_name = "llvm.experimental.vector.reduce.and.u16.v2u16"] - fn reduce_and_u16x2(x: u16x2) -> u16; - #[link_name = "llvm.experimental.vector.reduce.and.i32.v2i32"] - fn reduce_and_i32x2(x: i32x2) -> i32; - #[link_name = "llvm.experimental.vector.reduce.and.u32.v2u32"] - fn reduce_and_u32x2(x: u32x2) -> u32; - #[link_name = "llvm.experimental.vector.reduce.and.i64.v2i64"] - fn reduce_and_i64x2(x: i64x2) -> i64; - #[link_name = "llvm.experimental.vector.reduce.and.u64.v2u64"] - fn reduce_and_u64x2(x: u64x2) -> u64; - #[link_name = "llvm.experimental.vector.reduce.and.i8.v4i8"] - fn reduce_and_i8x4(x: i8x4) -> i8; - #[link_name = "llvm.experimental.vector.reduce.and.u8.v4u8"] - fn reduce_and_u8x4(x: u8x4) -> u8; - #[link_name = "llvm.experimental.vector.reduce.and.i16.v4i16"] - fn reduce_and_i16x4(x: i16x4) -> i16; - #[link_name = "llvm.experimental.vector.reduce.and.u16.v4u16"] - fn reduce_and_u16x4(x: u16x4) -> u16; - #[link_name = "llvm.experimental.vector.reduce.and.i32.v4i32"] - fn reduce_and_i32x4(x: i32x4) -> i32; - #[link_name = "llvm.experimental.vector.reduce.and.u32.v4u32"] - fn reduce_and_u32x4(x: u32x4) -> u32; - #[link_name = "llvm.experimental.vector.reduce.and.i64.v4i64"] - fn reduce_and_i64x4(x: i64x4) -> i64; - #[link_name = "llvm.experimental.vector.reduce.and.u64.v4u64"] - fn reduce_and_u64x4(x: u64x4) -> u64; - #[link_name = "llvm.experimental.vector.reduce.and.i8.v8i8"] - fn reduce_and_i8x8(x: i8x8) -> i8; - #[link_name = "llvm.experimental.vector.reduce.and.u8.v8u8"] - fn reduce_and_u8x8(x: u8x8) -> u8; - #[link_name = "llvm.experimental.vector.reduce.and.i16.v8i16"] - fn reduce_and_i16x8(x: i16x8) -> i16; - #[link_name = "llvm.experimental.vector.reduce.and.u16.v8u16"] - fn reduce_and_u16x8(x: u16x8) -> u16; - #[link_name = "llvm.experimental.vector.reduce.and.i32.v8i32"] - fn reduce_and_i32x8(x: i32x8) -> i32; - #[link_name = "llvm.experimental.vector.reduce.and.u32.v8u32"] - fn reduce_and_u32x8(x: u32x8) -> u32; - #[link_name = "llvm.experimental.vector.reduce.and.i64.v8i64"] - fn reduce_and_i64x8(x: i64x8) -> i64; - #[link_name = "llvm.experimental.vector.reduce.and.u64.v8u64"] - fn reduce_and_u64x8(x: u64x8) -> u64; - #[link_name = "llvm.experimental.vector.reduce.and.i8.v16i8"] - fn reduce_and_i8x16(x: i8x16) -> i8; - #[link_name = "llvm.experimental.vector.reduce.and.u8.v16u8"] - fn reduce_and_u8x16(x: u8x16) -> u8; - #[link_name = "llvm.experimental.vector.reduce.and.i16.v16i16"] - fn reduce_and_i16x16(x: i16x16) -> i16; - #[link_name = "llvm.experimental.vector.reduce.and.u16.v16u16"] - fn reduce_and_u16x16(x: u16x16) -> u16; - #[link_name = "llvm.experimental.vector.reduce.and.i32.v16i32"] - fn reduce_and_i32x16(x: i32x16) -> i32; - #[link_name = "llvm.experimental.vector.reduce.and.u32.v16u32"] - fn reduce_and_u32x16(x: u32x16) -> u32; - #[link_name = "llvm.experimental.vector.reduce.and.i8.v32i8"] - fn reduce_and_i8x32(x: i8x32) -> i8; - #[link_name = "llvm.experimental.vector.reduce.and.u8.v32u8"] - fn reduce_and_u8x32(x: u8x32) -> u8; - #[link_name = "llvm.experimental.vector.reduce.and.i16.v32i16"] - fn reduce_and_i16x32(x: i16x32) -> i16; - #[link_name = "llvm.experimental.vector.reduce.and.u16.v32u16"] - fn reduce_and_u16x32(x: u16x32) -> u16; - #[link_name = "llvm.experimental.vector.reduce.and.i8.v64i8"] - fn reduce_and_i8x64(x: i8x64) -> i8; - #[link_name = "llvm.experimental.vector.reduce.and.u8.v64u8"] - fn reduce_and_u8x64(x: u8x64) -> u8; -} - /// Reduction: horizontal bitwise and of the vector elements. #[cfg_attr(feature = "cargo-clippy", allow(stutter))] pub trait ReduceAnd { @@ -88,13 +11,16 @@ pub trait ReduceAnd { } macro_rules! red_and { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { + ($id:ident, $elem_ty:ident) => { impl ReduceAnd for $id { type Acc = $elem_ty; #[cfg(not(target_arch = "aarch64"))] #[inline] fn reduce_and(self) -> Self::Acc { - unsafe { $llvm_intr(self.into_bits()) } + unsafe { + use coresimd::simd_llvm::simd_reduce_and; + simd_reduce_and(self) + } } // FIXME: broken in AArch64 #[cfg(target_arch = "aarch64")] @@ -109,49 +35,49 @@ macro_rules! red_and { } }; } -red_and!(i8x2, i8, reduce_and_i8x2); -red_and!(u8x2, u8, reduce_and_u8x2); -red_and!(i16x2, i16, reduce_and_i16x2); -red_and!(u16x2, u16, reduce_and_u16x2); -red_and!(i32x2, i32, reduce_and_i32x2); -red_and!(u32x2, u32, reduce_and_u32x2); -red_and!(i64x2, i64, reduce_and_i64x2); -red_and!(u64x2, u64, reduce_and_u64x2); -red_and!(i8x4, i8, reduce_and_i8x4); -red_and!(u8x4, u8, reduce_and_u8x4); -red_and!(i16x4, i16, reduce_and_i16x4); -red_and!(u16x4, u16, reduce_and_u16x4); -red_and!(i32x4, i32, reduce_and_i32x4); -red_and!(u32x4, u32, reduce_and_u32x4); -red_and!(i64x4, i64, reduce_and_i64x4); -red_and!(u64x4, u64, reduce_and_u64x4); -red_and!(i8x8, i8, reduce_and_i8x8); -red_and!(u8x8, u8, reduce_and_u8x8); -red_and!(i16x8, i16, reduce_and_i16x8); -red_and!(u16x8, u16, reduce_and_u16x8); -red_and!(i32x8, i32, reduce_and_i32x8); -red_and!(u32x8, u32, reduce_and_u32x8); -red_and!(i64x8, i64, reduce_and_i64x8); -red_and!(u64x8, u64, reduce_and_u64x8); -red_and!(i8x16, i8, reduce_and_i8x16); -red_and!(u8x16, u8, reduce_and_u8x16); -red_and!(i16x16, i16, reduce_and_i16x16); -red_and!(u16x16, u16, reduce_and_u16x16); -red_and!(i32x16, i32, reduce_and_i32x16); -red_and!(u32x16, u32, reduce_and_u32x16); -red_and!(i8x32, i8, reduce_and_i8x32); -red_and!(u8x32, u8, reduce_and_u8x32); -red_and!(i16x32, i16, reduce_and_i16x32); -red_and!(u16x32, u16, reduce_and_u16x32); -red_and!(i8x64, i8, reduce_and_i8x64); -red_and!(u8x64, u8, reduce_and_u8x64); +red_and!(i8x2, i8); +red_and!(u8x2, u8); +red_and!(i16x2, i16); +red_and!(u16x2, u16); +red_and!(i32x2, i32); +red_and!(u32x2, u32); +red_and!(i64x2, i64); +red_and!(u64x2, u64); +red_and!(i8x4, i8); +red_and!(u8x4, u8); +red_and!(i16x4, i16); +red_and!(u16x4, u16); +red_and!(i32x4, i32); +red_and!(u32x4, u32); +red_and!(i64x4, i64); +red_and!(u64x4, u64); +red_and!(i8x8, i8); +red_and!(u8x8, u8); +red_and!(i16x8, i16); +red_and!(u16x8, u16); +red_and!(i32x8, i32); +red_and!(u32x8, u32); +red_and!(i64x8, i64); +red_and!(u64x8, u64); +red_and!(i8x16, i8); +red_and!(u8x16, u8); +red_and!(i16x16, i16); +red_and!(u16x16, u16); +red_and!(i32x16, i32); +red_and!(u32x16, u32); +red_and!(i8x32, i8); +red_and!(u8x32, u8); +red_and!(i16x32, i16); +red_and!(u16x32, u16); +red_and!(i8x64, i8); +red_and!(u8x64, u8); -red_and!(b8x2, i8, reduce_and_i8x2); -red_and!(b8x4, i8, reduce_and_i8x4); -red_and!(b8x8, i8, reduce_and_i8x8); -red_and!(b8x16, i8, reduce_and_i8x16); -red_and!(b8x32, i8, reduce_and_i8x32); -red_and!(b8x64, i8, reduce_and_i8x64); +red_and!(b8x2, i8); +red_and!(b8x4, i8); +red_and!(b8x8, i8); +red_and!(b8x16, i8); +red_and!(b8x32, i8); +red_and!(b8x64, i8); #[cfg(test)] mod tests { diff --git a/coresimd/ppsv/codegen/max.rs b/coresimd/ppsv/codegen/max.rs index 420aca447c..a14ce071d3 100644 --- a/coresimd/ppsv/codegen/max.rs +++ b/coresimd/ppsv/codegen/max.rs @@ -1,97 +1,6 @@ //! Code generation for the max reduction. use coresimd::simd::*; -/// LLVM intrinsics used in the max reduction -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.experimental.vector.reduce.smax.i8.v2i8"] - fn reduce_max_i8x2(x: i8x2) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umax.u8.v2u8"] - fn reduce_max_u8x2(x: u8x2) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smax.i16.v2i16"] - fn reduce_max_i16x2(x: i16x2) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umax.u16.v2u16"] - fn reduce_max_u16x2(x: u16x2) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smax.i32.v2i32"] - fn reduce_max_i32x2(x: i32x2) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umax.u32.v2u32"] - fn reduce_max_u32x2(x: u32x2) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smax.i64.v2i64"] - fn reduce_max_i64x2(x: i64x2) -> i64; - #[link_name = "llvm.experimental.vector.reduce.umax.u64.v2u64"] - fn reduce_max_u64x2(x: u64x2) -> u64; - #[link_name = "llvm.experimental.vector.reduce.smax.i8.v4i8"] - fn reduce_max_i8x4(x: i8x4) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umax.u8.v4u8"] - fn reduce_max_u8x4(x: u8x4) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smax.i16.v4i16"] - fn reduce_max_i16x4(x: i16x4) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umax.u16.v4u16"] - fn reduce_max_u16x4(x: u16x4) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smax.i32.v4i32"] - fn reduce_max_i32x4(x: i32x4) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umax.u32.v4u32"] - fn reduce_max_u32x4(x: u32x4) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smax.i64.v4i64"] - fn reduce_max_i64x4(x: i64x4) -> i64; - #[link_name = "llvm.experimental.vector.reduce.umax.u64.v4u64"] - fn reduce_max_u64x4(x: u64x4) -> u64; - #[link_name = "llvm.experimental.vector.reduce.smax.i8.v8i8"] - fn reduce_max_i8x8(x: i8x8) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umax.u8.v8u8"] - fn reduce_max_u8x8(x: u8x8) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smax.i16.v8i16"] - fn reduce_max_i16x8(x: i16x8) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umax.u16.v8u16"] - fn reduce_max_u16x8(x: u16x8) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smax.i32.v8i32"] - fn reduce_max_i32x8(x: i32x8) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umax.u32.v8u32"] - fn reduce_max_u32x8(x: u32x8) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smax.i64.v8i64"] - fn reduce_max_i64x8(x: i64x8) -> i64; - #[link_name = "llvm.experimental.vector.reduce.umax.u64.v8u64"] - fn reduce_max_u64x8(x: u64x8) -> u64; - #[link_name = "llvm.experimental.vector.reduce.smax.i8.v16i8"] - fn reduce_max_i8x16(x: i8x16) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umax.u8.v16u8"] - fn reduce_max_u8x16(x: u8x16) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smax.i16.v16i16"] - fn reduce_max_i16x16(x: i16x16) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umax.u16.v16u16"] - fn reduce_max_u16x16(x: u16x16) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smax.i32.v16i32"] - fn reduce_max_i32x16(x: i32x16) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umax.u32.v16u32"] - fn reduce_max_u32x16(x: u32x16) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smax.i8.v32i8"] - fn reduce_max_i8x32(x: i8x32) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umax.u8.v32u8"] - fn reduce_max_u8x32(x: u8x32) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smax.i16.v32i16"] - fn reduce_max_i16x32(x: i16x32) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umax.u16.v32u16"] - fn reduce_max_u16x32(x: u16x32) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smax.i8.v64i8"] - fn reduce_max_i8x64(x: i8x64) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umax.u8.v64u8"] - fn reduce_max_u8x64(x: u8x64) -> u8; - #[link_name = "llvm.experimental.vector.reduce.fmax.f32.v2f32"] - fn reduce_fmax_f32x2(x: f32x2) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmax.f64.v2f64"] - fn reduce_fmax_f64x2(x: f64x2) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmax.f32.v4f32"] - fn reduce_fmax_f32x4(x: f32x4) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmax.f64.v4f64"] - fn reduce_fmax_f64x4(x: f64x4) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmax.f32.v8f32"] - fn reduce_fmax_f32x8(x: f32x8) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmax.f64.v8f64"] - fn reduce_fmax_f64x8(x: f64x8) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmax.f32.v16f32"] - fn reduce_fmax_f32x16(x: f32x16) -> f32; -} - /// Reduction: horizontal max of the vector elements. #[cfg_attr(feature = "cargo-clippy", allow(stutter))] pub trait ReduceMax { @@ -102,13 +11,16 @@ pub trait ReduceMax { } macro_rules! red_max { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { + ($id:ident, $elem_ty:ident) => { impl ReduceMax for $id { type Acc = $elem_ty; #[cfg(not(target_arch = "aarch64"))] #[inline] fn reduce_max(self) -> Self::Acc { - unsafe { $llvm_intr(self) } + unsafe { + use coresimd::simd_llvm::simd_reduce_max; + simd_reduce_max(self) + } } // FIXME: broken on AArch64 #[cfg(target_arch = "aarch64")] @@ -126,50 +38,50 @@ macro_rules! red_max { } }; } -red_max!(i8x2, i8, reduce_max_i8x2); -red_max!(u8x2, u8, reduce_max_u8x2); -red_max!(i16x2, i16, reduce_max_i16x2); -red_max!(u16x2, u16, reduce_max_u16x2); -red_max!(i32x2, i32, reduce_max_i32x2); -red_max!(u32x2, u32, reduce_max_u32x2); -red_max!(i64x2, i64, reduce_max_i64x2); -red_max!(u64x2, u64, reduce_max_u64x2); -red_max!(i8x4, i8, reduce_max_i8x4); -red_max!(u8x4, u8, reduce_max_u8x4); -red_max!(i16x4, i16, reduce_max_i16x4); -red_max!(u16x4, u16, reduce_max_u16x4); -red_max!(i32x4, i32, reduce_max_i32x4); -red_max!(u32x4, u32, reduce_max_u32x4); -red_max!(i64x4, i64, reduce_max_i64x4); -red_max!(u64x4, u64, reduce_max_u64x4); -red_max!(i8x8, i8, reduce_max_i8x8); -red_max!(u8x8, u8, reduce_max_u8x8); -red_max!(i16x8, i16, reduce_max_i16x8); -red_max!(u16x8, u16, reduce_max_u16x8); -red_max!(i32x8, i32, reduce_max_i32x8); -red_max!(u32x8, u32, reduce_max_u32x8); -red_max!(i64x8, i64, reduce_max_i64x8); -red_max!(u64x8, u64, reduce_max_u64x8); -red_max!(i8x16, i8, reduce_max_i8x16); -red_max!(u8x16, u8, reduce_max_u8x16); -red_max!(i16x16, i16, reduce_max_i16x16); -red_max!(u16x16, u16, reduce_max_u16x16); -red_max!(i32x16, i32, reduce_max_i32x16); -red_max!(u32x16, u32, reduce_max_u32x16); -red_max!(i8x32, i8, reduce_max_i8x32); -red_max!(u8x32, u8, reduce_max_u8x32); -red_max!(i16x32, i16, reduce_max_i16x32); -red_max!(u16x32, u16, reduce_max_u16x32); -red_max!(i8x64, i8, reduce_max_i8x64); -red_max!(u8x64, u8, reduce_max_u8x64); +red_max!(i8x2, i8); +red_max!(u8x2, u8); +red_max!(i16x2, i16); +red_max!(u16x2, u16); +red_max!(i32x2, i32); +red_max!(u32x2, u32); +red_max!(i64x2, i64); +red_max!(u64x2, u64); +red_max!(i8x4, i8); +red_max!(u8x4, u8); +red_max!(i16x4, i16); +red_max!(u16x4, u16); +red_max!(i32x4, i32); +red_max!(u32x4, u32); +red_max!(i64x4, i64); +red_max!(u64x4, u64); +red_max!(i8x8, i8); +red_max!(u8x8, u8); +red_max!(i16x8, i16); +red_max!(u16x8, u16); +red_max!(i32x8, i32); +red_max!(u32x8, u32); +red_max!(i64x8, i64); +red_max!(u64x8, u64); +red_max!(i8x16, i8); +red_max!(u8x16, u8); +red_max!(i16x16, i16); +red_max!(u16x16, u16); +red_max!(i32x16, i32); +red_max!(u32x16, u32); +red_max!(i8x32, i8); +red_max!(u8x32, u8); +red_max!(i16x32, i16); +red_max!(u16x32, u16); +red_max!(i8x64, i8); +red_max!(u8x64, u8); -red_max!(f32x2, f32, reduce_fmax_f32x2); -red_max!(f64x2, f64, reduce_fmax_f64x2); -red_max!(f32x4, f32, reduce_fmax_f32x4); -red_max!(f64x4, f64, reduce_fmax_f64x4); -red_max!(f32x8, f32, reduce_fmax_f32x8); -red_max!(f64x8, f64, reduce_fmax_f64x8); -red_max!(f32x16, f32, reduce_fmax_f32x16); +red_max!(f32x2, f32); +red_max!(f64x2, f64); +red_max!(f32x4, f32); +red_max!(f64x4, f64); +red_max!(f32x8, f32); +red_max!(f64x8, f64); +red_max!(f32x16, f32); #[cfg(test)] mod tests { diff --git a/coresimd/ppsv/codegen/min.rs b/coresimd/ppsv/codegen/min.rs index 064b591e76..3ff110df39 100644 --- a/coresimd/ppsv/codegen/min.rs +++ b/coresimd/ppsv/codegen/min.rs @@ -1,97 +1,6 @@ //! Code generation for the min reduction. use coresimd::simd::*; -/// LLVM intrinsics used in the min reduction -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.experimental.vector.reduce.smin.i8.v2i8"] - fn reduce_min_i8x2(x: i8x2) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umin.u8.v2u8"] - fn reduce_min_u8x2(x: u8x2) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smin.i16.v2i16"] - fn reduce_min_i16x2(x: i16x2) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umin.u16.v2u16"] - fn reduce_min_u16x2(x: u16x2) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smin.i32.v2i32"] - fn reduce_min_i32x2(x: i32x2) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umin.u32.v2u32"] - fn reduce_min_u32x2(x: u32x2) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smin.i64.v2i64"] - fn reduce_min_i64x2(x: i64x2) -> i64; - #[link_name = "llvm.experimental.vector.reduce.umin.u64.v2u64"] - fn reduce_min_u64x2(x: u64x2) -> u64; - #[link_name = "llvm.experimental.vector.reduce.smin.i8.v4i8"] - fn reduce_min_i8x4(x: i8x4) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umin.u8.v4u8"] - fn reduce_min_u8x4(x: u8x4) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smin.i16.v4i16"] - fn reduce_min_i16x4(x: i16x4) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umin.u16.v4u16"] - fn reduce_min_u16x4(x: u16x4) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smin.i32.v4i32"] - fn reduce_min_i32x4(x: i32x4) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umin.u32.v4u32"] - fn reduce_min_u32x4(x: u32x4) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smin.i64.v4i64"] - fn reduce_min_i64x4(x: i64x4) -> i64; - #[link_name = "llvm.experimental.vector.reduce.umin.u64.v4u64"] - fn reduce_min_u64x4(x: u64x4) -> u64; - #[link_name = "llvm.experimental.vector.reduce.smin.i8.v8i8"] - fn reduce_min_i8x8(x: i8x8) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umin.u8.v8u8"] - fn reduce_min_u8x8(x: u8x8) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smin.i16.v8i16"] - fn reduce_min_i16x8(x: i16x8) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umin.u16.v8u16"] - fn reduce_min_u16x8(x: u16x8) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smin.i32.v8i32"] - fn reduce_min_i32x8(x: i32x8) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umin.u32.v8u32"] - fn reduce_min_u32x8(x: u32x8) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smin.i64.v8i64"] - fn reduce_min_i64x8(x: i64x8) -> i64; - #[link_name = "llvm.experimental.vector.reduce.umin.u64.v8u64"] - fn reduce_min_u64x8(x: u64x8) -> u64; - #[link_name = "llvm.experimental.vector.reduce.smin.i8.v16i8"] - fn reduce_min_i8x16(x: i8x16) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umin.u8.v16u8"] - fn reduce_min_u8x16(x: u8x16) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smin.i16.v16i16"] - fn reduce_min_i16x16(x: i16x16) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umin.u16.v16u16"] - fn reduce_min_u16x16(x: u16x16) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smin.i32.v16i32"] - fn reduce_min_i32x16(x: i32x16) -> i32; - #[link_name = "llvm.experimental.vector.reduce.umin.u32.v16u32"] - fn reduce_min_u32x16(x: u32x16) -> u32; - #[link_name = "llvm.experimental.vector.reduce.smin.i8.v32i8"] - fn reduce_min_i8x32(x: i8x32) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umin.u8.v32u8"] - fn reduce_min_u8x32(x: u8x32) -> u8; - #[link_name = "llvm.experimental.vector.reduce.smin.i16.v32i16"] - fn reduce_min_i16x32(x: i16x32) -> i16; - #[link_name = "llvm.experimental.vector.reduce.umin.u16.v32u16"] - fn reduce_min_u16x32(x: u16x32) -> u16; - #[link_name = "llvm.experimental.vector.reduce.smin.i8.v64i8"] - fn reduce_min_i8x64(x: i8x64) -> i8; - #[link_name = "llvm.experimental.vector.reduce.umin.u8.v64u8"] - fn reduce_min_u8x64(x: u8x64) -> u8; - #[link_name = "llvm.experimental.vector.reduce.fmin.f32.v2f32"] - fn reduce_fmin_f32x2(x: f32x2) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmin.f64.v2f64"] - fn reduce_fmin_f64x2(x: f64x2) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmin.f32.v4f32"] - fn reduce_fmin_f32x4(x: f32x4) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmin.f64.v4f64"] - fn reduce_fmin_f64x4(x: f64x4) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmin.f32.v8f32"] - fn reduce_fmin_f32x8(x: f32x8) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmin.f64.v8f64"] - fn reduce_fmin_f64x8(x: f64x8) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmin.f32.v16f32"] - fn reduce_fmin_f32x16(x: f32x16) -> f32; -} - /// Reduction: horizontal max of the vector elements. #[cfg_attr(feature = "cargo-clippy", allow(stutter))] pub trait ReduceMin { @@ -102,13 +11,16 @@ pub trait ReduceMin { } macro_rules! red_min { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { + ($id:ident, $elem_ty:ident) => { impl ReduceMin for $id { type Acc = $elem_ty; #[cfg(not(target_arch = "aarch64"))] #[inline] fn reduce_min(self) -> Self::Acc { - unsafe { $llvm_intr(self) } + unsafe { + use coresimd::simd_llvm::simd_reduce_min; + simd_reduce_min(self) + } } // FIXME: broken on AArch64 #[cfg(target_arch = "aarch64")] @@ -126,50 +38,50 @@ macro_rules! red_min { } }; } -red_min!(i8x2, i8, reduce_min_i8x2); -red_min!(u8x2, u8, reduce_min_u8x2); -red_min!(i16x2, i16, reduce_min_i16x2); -red_min!(u16x2, u16, reduce_min_u16x2); -red_min!(i32x2, i32, reduce_min_i32x2); -red_min!(u32x2, u32, reduce_min_u32x2); -red_min!(i64x2, i64, reduce_min_i64x2); -red_min!(u64x2, u64, reduce_min_u64x2); -red_min!(i8x4, i8, reduce_min_i8x4); -red_min!(u8x4, u8, reduce_min_u8x4); -red_min!(i16x4, i16, reduce_min_i16x4); -red_min!(u16x4, u16, reduce_min_u16x4); -red_min!(i32x4, i32, reduce_min_i32x4); -red_min!(u32x4, u32, reduce_min_u32x4); -red_min!(i64x4, i64, reduce_min_i64x4); -red_min!(u64x4, u64, reduce_min_u64x4); -red_min!(i8x8, i8, reduce_min_i8x8); -red_min!(u8x8, u8, reduce_min_u8x8); -red_min!(i16x8, i16, reduce_min_i16x8); -red_min!(u16x8, u16, reduce_min_u16x8); -red_min!(i32x8, i32, reduce_min_i32x8); -red_min!(u32x8, u32, reduce_min_u32x8); -red_min!(i64x8, i64, reduce_min_i64x8); -red_min!(u64x8, u64, reduce_min_u64x8); -red_min!(i8x16, i8, reduce_min_i8x16); -red_min!(u8x16, u8, reduce_min_u8x16); -red_min!(i16x16, i16, reduce_min_i16x16); -red_min!(u16x16, u16, reduce_min_u16x16); -red_min!(i32x16, i32, reduce_min_i32x16); -red_min!(u32x16, u32, reduce_min_u32x16); -red_min!(i8x32, i8, reduce_min_i8x32); -red_min!(u8x32, u8, reduce_min_u8x32); -red_min!(i16x32, i16, reduce_min_i16x32); -red_min!(u16x32, u16, reduce_min_u16x32); -red_min!(i8x64, i8, reduce_min_i8x64); -red_min!(u8x64, u8, reduce_min_u8x64); +red_min!(i8x2, i8); +red_min!(u8x2, u8); +red_min!(i16x2, i16); +red_min!(u16x2, u16); +red_min!(i32x2, i32); +red_min!(u32x2, u32); +red_min!(i64x2, i64); +red_min!(u64x2, u64); +red_min!(i8x4, i8); +red_min!(u8x4, u8); +red_min!(i16x4, i16); +red_min!(u16x4, u16); +red_min!(i32x4, i32); +red_min!(u32x4, u32); +red_min!(i64x4, i64); +red_min!(u64x4, u64); +red_min!(i8x8, i8); +red_min!(u8x8, u8); +red_min!(i16x8, i16); +red_min!(u16x8, u16); +red_min!(i32x8, i32); +red_min!(u32x8, u32); +red_min!(i64x8, i64); +red_min!(u64x8, u64); +red_min!(i8x16, i8); +red_min!(u8x16, u8); +red_min!(i16x16, i16); +red_min!(u16x16, u16); +red_min!(i32x16, i32); +red_min!(u32x16, u32); +red_min!(i8x32, i8); +red_min!(u8x32, u8); +red_min!(i16x32, i16); +red_min!(u16x32, u16); +red_min!(i8x64, i8); +red_min!(u8x64, u8); -red_min!(f32x2, f32, reduce_fmin_f32x2); -red_min!(f64x2, f64, reduce_fmin_f64x2); -red_min!(f32x4, f32, reduce_fmin_f32x4); -red_min!(f64x4, f64, reduce_fmin_f64x4); -red_min!(f32x8, f32, reduce_fmin_f32x8); -red_min!(f64x8, f64, reduce_fmin_f64x8); -red_min!(f32x16, f32, reduce_fmin_f32x16); +red_min!(f32x2, f32); +red_min!(f64x2, f64); +red_min!(f32x4, f32); +red_min!(f64x4, f64); +red_min!(f32x8, f32); +red_min!(f64x8, f64); +red_min!(f32x16, f32); #[cfg(test)] mod tests { diff --git a/coresimd/ppsv/codegen/or.rs b/coresimd/ppsv/codegen/or.rs index c5b8711f6a..e870dad971 100644 --- a/coresimd/ppsv/codegen/or.rs +++ b/coresimd/ppsv/codegen/or.rs @@ -1,83 +1,6 @@ //! Code generation for the or reduction. use coresimd::simd::*; -/// LLVM intrinsics used in the or reduction -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.experimental.vector.reduce.or.i8.v2i8"] - fn reduce_or_i8x2(x: i8x2) -> i8; - #[link_name = "llvm.experimental.vector.reduce.or.u8.v2u8"] - fn reduce_or_u8x2(x: u8x2) -> u8; - #[link_name = "llvm.experimental.vector.reduce.or.i16.v2i16"] - fn reduce_or_i16x2(x: i16x2) -> i16; - #[link_name = "llvm.experimental.vector.reduce.or.u16.v2u16"] - fn reduce_or_u16x2(x: u16x2) -> u16; - #[link_name = "llvm.experimental.vector.reduce.or.i32.v2i32"] - fn reduce_or_i32x2(x: i32x2) -> i32; - #[link_name = "llvm.experimental.vector.reduce.or.u32.v2u32"] - fn reduce_or_u32x2(x: u32x2) -> u32; - #[link_name = "llvm.experimental.vector.reduce.or.i64.v2i64"] - fn reduce_or_i64x2(x: i64x2) -> i64; - #[link_name = "llvm.experimental.vector.reduce.or.u64.v2u64"] - fn reduce_or_u64x2(x: u64x2) -> u64; - #[link_name = "llvm.experimental.vector.reduce.or.i8.v4i8"] - fn reduce_or_i8x4(x: i8x4) -> i8; - #[link_name = "llvm.experimental.vector.reduce.or.u8.v4u8"] - fn reduce_or_u8x4(x: u8x4) -> u8; - #[link_name = "llvm.experimental.vector.reduce.or.i16.v4i16"] - fn reduce_or_i16x4(x: i16x4) -> i16; - #[link_name = "llvm.experimental.vector.reduce.or.u16.v4u16"] - fn reduce_or_u16x4(x: u16x4) -> u16; - #[link_name = "llvm.experimental.vector.reduce.or.i32.v4i32"] - fn reduce_or_i32x4(x: i32x4) -> i32; - #[link_name = "llvm.experimental.vector.reduce.or.u32.v4u32"] - fn reduce_or_u32x4(x: u32x4) -> u32; - #[link_name = "llvm.experimental.vector.reduce.or.i64.v4i64"] - fn reduce_or_i64x4(x: i64x4) -> i64; - #[link_name = "llvm.experimental.vector.reduce.or.u64.v4u64"] - fn reduce_or_u64x4(x: u64x4) -> u64; - #[link_name = "llvm.experimental.vector.reduce.or.i8.v8i8"] - fn reduce_or_i8x8(x: i8x8) -> i8; - #[link_name = "llvm.experimental.vector.reduce.or.u8.v8u8"] - fn reduce_or_u8x8(x: u8x8) -> u8; - #[link_name = "llvm.experimental.vector.reduce.or.i16.v8i16"] - fn reduce_or_i16x8(x: i16x8) -> i16; - #[link_name = "llvm.experimental.vector.reduce.or.u16.v8u16"] - fn reduce_or_u16x8(x: u16x8) -> u16; - #[link_name = "llvm.experimental.vector.reduce.or.i32.v8i32"] - fn reduce_or_i32x8(x: i32x8) -> i32; - #[link_name = "llvm.experimental.vector.reduce.or.u32.v8u32"] - fn reduce_or_u32x8(x: u32x8) -> u32; - #[link_name = "llvm.experimental.vector.reduce.or.i64.v8i64"] - fn reduce_or_i64x8(x: i64x8) -> i64; - #[link_name = "llvm.experimental.vector.reduce.or.u64.v8u64"] - fn reduce_or_u64x8(x: u64x8) -> u64; - #[link_name = "llvm.experimental.vector.reduce.or.i8.v16i8"] - fn reduce_or_i8x16(x: i8x16) -> i8; - #[link_name = "llvm.experimental.vector.reduce.or.u8.v16u8"] - fn reduce_or_u8x16(x: u8x16) -> u8; - #[link_name = "llvm.experimental.vector.reduce.or.i16.v16i16"] - fn reduce_or_i16x16(x: i16x16) -> i16; - #[link_name = "llvm.experimental.vector.reduce.or.u16.v16u16"] - fn reduce_or_u16x16(x: u16x16) -> u16; - #[link_name = "llvm.experimental.vector.reduce.or.i32.v16i32"] - fn reduce_or_i32x16(x: i32x16) -> i32; - #[link_name = "llvm.experimental.vector.reduce.or.u32.v16u32"] - fn reduce_or_u32x16(x: u32x16) -> u32; - #[link_name = "llvm.experimental.vector.reduce.or.i8.v32i8"] - fn reduce_or_i8x32(x: i8x32) -> i8; - #[link_name = "llvm.experimental.vector.reduce.or.u8.v32u8"] - fn reduce_or_u8x32(x: u8x32) -> u8; - #[link_name = "llvm.experimental.vector.reduce.or.i16.v32i16"] - fn reduce_or_i16x32(x: i16x32) -> i16; - #[link_name = "llvm.experimental.vector.reduce.or.u16.v32u16"] - fn reduce_or_u16x32(x: u16x32) -> u16; - #[link_name = "llvm.experimental.vector.reduce.or.i8.v64i8"] - fn reduce_or_i8x64(x: i8x64) -> i8; - #[link_name = "llvm.experimental.vector.reduce.or.u8.v64u8"] - fn reduce_or_u8x64(x: u8x64) -> u8; -} - /// Reduction: horizontal bitwise or of the vector elements. #[cfg_attr(feature = "cargo-clippy", allow(stutter))] pub trait ReduceOr { @@ -88,13 +11,16 @@ pub trait ReduceOr { } macro_rules! red_or { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { + ($id:ident, $elem_ty:ident) => { impl ReduceOr for $id { type Acc = $elem_ty; #[cfg(not(target_arch = "aarch64"))] #[inline] fn reduce_or(self) -> Self::Acc { - unsafe { $llvm_intr(self.into_bits()) } + unsafe { + use coresimd::simd_llvm::simd_reduce_or; + simd_reduce_or(self) + } } // FIXME: broken in AArch64 #[cfg(target_arch = "aarch64")] @@ -109,49 +35,49 @@ macro_rules! red_or { } }; } -red_or!(i8x2, i8, reduce_or_i8x2); -red_or!(u8x2, u8, reduce_or_u8x2); -red_or!(i16x2, i16, reduce_or_i16x2); -red_or!(u16x2, u16, reduce_or_u16x2); -red_or!(i32x2, i32, reduce_or_i32x2); -red_or!(u32x2, u32, reduce_or_u32x2); -red_or!(i64x2, i64, reduce_or_i64x2); -red_or!(u64x2, u64, reduce_or_u64x2); -red_or!(i8x4, i8, reduce_or_i8x4); -red_or!(u8x4, u8, reduce_or_u8x4); -red_or!(i16x4, i16, reduce_or_i16x4); -red_or!(u16x4, u16, reduce_or_u16x4); -red_or!(i32x4, i32, reduce_or_i32x4); -red_or!(u32x4, u32, reduce_or_u32x4); -red_or!(i64x4, i64, reduce_or_i64x4); -red_or!(u64x4, u64, reduce_or_u64x4); -red_or!(i8x8, i8, reduce_or_i8x8); -red_or!(u8x8, u8, reduce_or_u8x8); -red_or!(i16x8, i16, reduce_or_i16x8); -red_or!(u16x8, u16, reduce_or_u16x8); -red_or!(i32x8, i32, reduce_or_i32x8); -red_or!(u32x8, u32, reduce_or_u32x8); -red_or!(i64x8, i64, reduce_or_i64x8); -red_or!(u64x8, u64, reduce_or_u64x8); -red_or!(i8x16, i8, reduce_or_i8x16); -red_or!(u8x16, u8, reduce_or_u8x16); -red_or!(i16x16, i16, reduce_or_i16x16); -red_or!(u16x16, u16, reduce_or_u16x16); -red_or!(i32x16, i32, reduce_or_i32x16); -red_or!(u32x16, u32, reduce_or_u32x16); -red_or!(i8x32, i8, reduce_or_i8x32); -red_or!(u8x32, u8, reduce_or_u8x32); -red_or!(i16x32, i16, reduce_or_i16x32); -red_or!(u16x32, u16, reduce_or_u16x32); -red_or!(i8x64, i8, reduce_or_i8x64); -red_or!(u8x64, u8, reduce_or_u8x64); +red_or!(i8x2, i8); +red_or!(u8x2, u8); +red_or!(i16x2, i16); +red_or!(u16x2, u16); +red_or!(i32x2, i32); +red_or!(u32x2, u32); +red_or!(i64x2, i64); +red_or!(u64x2, u64); +red_or!(i8x4, i8); +red_or!(u8x4, u8); +red_or!(i16x4, i16); +red_or!(u16x4, u16); +red_or!(i32x4, i32); +red_or!(u32x4, u32); +red_or!(i64x4, i64); +red_or!(u64x4, u64); +red_or!(i8x8, i8); +red_or!(u8x8, u8); +red_or!(i16x8, i16); +red_or!(u16x8, u16); +red_or!(i32x8, i32); +red_or!(u32x8, u32); +red_or!(i64x8, i64); +red_or!(u64x8, u64); +red_or!(i8x16, i8); +red_or!(u8x16, u8); +red_or!(i16x16, i16); +red_or!(u16x16, u16); +red_or!(i32x16, i32); +red_or!(u32x16, u32); +red_or!(i8x32, i8); +red_or!(u8x32, u8); +red_or!(i16x32, i16); +red_or!(u16x32, u16); +red_or!(i8x64, i8); +red_or!(u8x64, u8); -red_or!(b8x2, i8, reduce_or_i8x2); -red_or!(b8x4, i8, reduce_or_i8x4); -red_or!(b8x8, i8, reduce_or_i8x8); -red_or!(b8x16, i8, reduce_or_i8x16); -red_or!(b8x32, i8, reduce_or_i8x32); -red_or!(b8x64, i8, reduce_or_i8x64); +red_or!(b8x2, i8); +red_or!(b8x4, i8); +red_or!(b8x8, i8); +red_or!(b8x16, i8); +red_or!(b8x32, i8); +red_or!(b8x64, i8); #[cfg(test)] mod tests { diff --git a/coresimd/ppsv/codegen/product.rs b/coresimd/ppsv/codegen/product.rs index 00bce740f1..b557d848ed 100644 --- a/coresimd/ppsv/codegen/product.rs +++ b/coresimd/ppsv/codegen/product.rs @@ -1,97 +1,6 @@ //! Code generation for the product reduction. use coresimd::simd::*; -/// LLVM intrinsics used in the product reduction -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.experimental.vector.reduce.mul.i8.v2i8"] - fn reduce_mul_i8x2(x: i8x2) -> i8; - #[link_name = "llvm.experimental.vector.reduce.mul.u8.v2u8"] - fn reduce_mul_u8x2(x: u8x2) -> u8; - #[link_name = "llvm.experimental.vector.reduce.mul.i16.v2i16"] - fn reduce_mul_i16x2(x: i16x2) -> i16; - #[link_name = "llvm.experimental.vector.reduce.mul.u16.v2u16"] - fn reduce_mul_u16x2(x: u16x2) -> u16; - #[link_name = "llvm.experimental.vector.reduce.mul.i32.v2i32"] - fn reduce_mul_i32x2(x: i32x2) -> i32; - #[link_name = "llvm.experimental.vector.reduce.mul.u32.v2u32"] - fn reduce_mul_u32x2(x: u32x2) -> u32; - #[link_name = "llvm.experimental.vector.reduce.mul.i64.v2i64"] - fn reduce_mul_i64x2(x: i64x2) -> i64; - #[link_name = "llvm.experimental.vector.reduce.mul.u64.v2u64"] - fn reduce_mul_u64x2(x: u64x2) -> u64; - #[link_name = "llvm.experimental.vector.reduce.mul.i8.v4i8"] - fn reduce_mul_i8x4(x: i8x4) -> i8; - #[link_name = "llvm.experimental.vector.reduce.mul.u8.v4u8"] - fn reduce_mul_u8x4(x: u8x4) -> u8; - #[link_name = "llvm.experimental.vector.reduce.mul.i16.v4i16"] - fn reduce_mul_i16x4(x: i16x4) -> i16; - #[link_name = "llvm.experimental.vector.reduce.mul.u16.v4u16"] - fn reduce_mul_u16x4(x: u16x4) -> u16; - #[link_name = "llvm.experimental.vector.reduce.mul.i32.v4i32"] - fn reduce_mul_i32x4(x: i32x4) -> i32; - #[link_name = "llvm.experimental.vector.reduce.mul.u32.v4u32"] - fn reduce_mul_u32x4(x: u32x4) -> u32; - #[link_name = "llvm.experimental.vector.reduce.mul.i64.v4i64"] - fn reduce_mul_i64x4(x: i64x4) -> i64; - #[link_name = "llvm.experimental.vector.reduce.mul.u64.v4u64"] - fn reduce_mul_u64x4(x: u64x4) -> u64; - #[link_name = "llvm.experimental.vector.reduce.mul.i8.v8i8"] - fn reduce_mul_i8x8(x: i8x8) -> i8; - #[link_name = "llvm.experimental.vector.reduce.mul.u8.v8u8"] - fn reduce_mul_u8x8(x: u8x8) -> u8; - #[link_name = "llvm.experimental.vector.reduce.mul.i16.v8i16"] - fn reduce_mul_i16x8(x: i16x8) -> i16; - #[link_name = "llvm.experimental.vector.reduce.mul.u16.v8u16"] - fn reduce_mul_u16x8(x: u16x8) -> u16; - #[link_name = "llvm.experimental.vector.reduce.mul.i32.v8i32"] - fn reduce_mul_i32x8(x: i32x8) -> i32; - #[link_name = "llvm.experimental.vector.reduce.mul.u32.v8u32"] - fn reduce_mul_u32x8(x: u32x8) -> u32; - #[link_name = "llvm.experimental.vector.reduce.mul.i64.v8i64"] - fn reduce_mul_i64x8(x: i64x8) -> i64; - #[link_name = "llvm.experimental.vector.reduce.mul.u64.v8u64"] - fn reduce_mul_u64x8(x: u64x8) -> u64; - #[link_name = "llvm.experimental.vector.reduce.mul.i8.v16i8"] - fn reduce_mul_i8x16(x: i8x16) -> i8; - #[link_name = "llvm.experimental.vector.reduce.mul.u8.v16u8"] - fn reduce_mul_u8x16(x: u8x16) -> u8; - #[link_name = "llvm.experimental.vector.reduce.mul.i16.v16i16"] - fn reduce_mul_i16x16(x: i16x16) -> i16; - #[link_name = "llvm.experimental.vector.reduce.mul.u16.v16u16"] - fn reduce_mul_u16x16(x: u16x16) -> u16; - #[link_name = "llvm.experimental.vector.reduce.mul.i32.v16i32"] - fn reduce_mul_i32x16(x: i32x16) -> i32; - #[link_name = "llvm.experimental.vector.reduce.mul.u32.v16u32"] - fn reduce_mul_u32x16(x: u32x16) -> u32; - #[link_name = "llvm.experimental.vector.reduce.mul.i8.v32i8"] - fn reduce_mul_i8x32(x: i8x32) -> i8; - #[link_name = "llvm.experimental.vector.reduce.mul.u8.v32u8"] - fn reduce_mul_u8x32(x: u8x32) -> u8; - #[link_name = "llvm.experimental.vector.reduce.mul.i16.v32i16"] - fn reduce_mul_i16x32(x: i16x32) -> i16; - #[link_name = "llvm.experimental.vector.reduce.mul.u16.v32u16"] - fn reduce_mul_u16x32(x: u16x32) -> u16; - #[link_name = "llvm.experimental.vector.reduce.mul.i8.v64i8"] - fn reduce_mul_i8x64(x: i8x64) -> i8; - #[link_name = "llvm.experimental.vector.reduce.mul.u8.v64u8"] - fn reduce_mul_u8x64(x: u8x64) -> u8; - #[link_name = "llvm.experimental.vector.reduce.fmul.f32.v2f32"] - fn reduce_fmul_f32x2(acc: f32, x: f32x2) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmul.f64.v2f64"] - fn reduce_fmul_f64x2(acc: f64, x: f64x2) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmul.f32.v4f32"] - fn reduce_fmul_f32x4(acc: f32, x: f32x4) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmul.f64.v4f64"] - fn reduce_fmul_f64x4(acc: f64, x: f64x4) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmul.f32.v8f32"] - fn reduce_fmul_f32x8(acc: f32, x: f32x8) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fmul.f64.v8f64"] - fn reduce_fmul_f64x8(acc: f64, x: f64x8) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fmul.f32.v16f32"] - fn reduce_fmul_f32x16(acc: f32, x: f32x16) -> f32; -} - /// Reduction: horizontal product of the vector elements. pub trait ReduceMul { /// Result type of the reduction. @@ -101,13 +10,16 @@ pub trait ReduceMul { } macro_rules! red_mul { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { + ($id:ident, $elem_ty:ident) => { impl ReduceMul for $id { type Acc = $elem_ty; #[cfg(not(target_arch = "aarch64"))] #[inline] fn reduce_mul(self) -> Self::Acc { - unsafe { $llvm_intr(self) } + unsafe { + use coresimd::simd_llvm::simd_reduce_mul; + simd_reduce_mul(self) + } } // FIXME: broken in AArch64 #[cfg(target_arch = "aarch64")] @@ -122,68 +34,49 @@ macro_rules! red_mul { } }; } -red_mul!(i8x2, i8, reduce_mul_i8x2); -red_mul!(u8x2, u8, reduce_mul_u8x2); -red_mul!(i16x2, i16, reduce_mul_i16x2); -red_mul!(u16x2, u16, reduce_mul_u16x2); -red_mul!(i32x2, i32, reduce_mul_i32x2); -red_mul!(u32x2, u32, reduce_mul_u32x2); -red_mul!(i64x2, i64, reduce_mul_i64x2); -red_mul!(u64x2, u64, reduce_mul_u64x2); -red_mul!(i8x4, i8, reduce_mul_i8x4); -red_mul!(u8x4, u8, reduce_mul_u8x4); -red_mul!(i16x4, i16, reduce_mul_i16x4); -red_mul!(u16x4, u16, reduce_mul_u16x4); -red_mul!(i32x4, i32, reduce_mul_i32x4); -red_mul!(u32x4, u32, reduce_mul_u32x4); -red_mul!(i64x4, i64, reduce_mul_i64x4); -red_mul!(u64x4, u64, reduce_mul_u64x4); -red_mul!(i8x8, i8, reduce_mul_i8x8); -red_mul!(u8x8, u8, reduce_mul_u8x8); -red_mul!(i16x8, i16, reduce_mul_i16x8); -red_mul!(u16x8, u16, reduce_mul_u16x8); -red_mul!(i32x8, i32, reduce_mul_i32x8); -red_mul!(u32x8, u32, reduce_mul_u32x8); -red_mul!(i64x8, i64, reduce_mul_i64x8); -red_mul!(u64x8, u64, reduce_mul_u64x8); -red_mul!(i8x16, i8, reduce_mul_i8x16); -red_mul!(u8x16, u8, reduce_mul_u8x16); -red_mul!(i16x16, i16, reduce_mul_i16x16); -red_mul!(u16x16, u16, reduce_mul_u16x16); -red_mul!(i32x16, i32, reduce_mul_i32x16); -red_mul!(u32x16, u32, reduce_mul_u32x16); -red_mul!(i8x32, i8, reduce_mul_i8x32); -red_mul!(u8x32, u8, reduce_mul_u8x32); -red_mul!(i16x32, i16, reduce_mul_i16x32); -red_mul!(u16x32, u16, reduce_mul_u16x32); -red_mul!(i8x64, i8, reduce_mul_i8x64); -red_mul!(u8x64, u8, reduce_mul_u8x64); - -macro_rules! red_fmul { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { - impl ReduceMul for $id { - type Acc = $elem_ty; - #[inline] - fn reduce_mul(self) -> Self::Acc { - // FIXME: - // unsafe { $llvm_intr(1. as $elem_ty, self) } - let mut x = self.extract(0); - for i in 1..$id::lanes() { - x *= self.extract(i); - } - x - } - } - }; -} - -red_fmul!(f32x2, f32, reduce_fmul_f32x2); -red_fmul!(f64x2, f64, reduce_fmul_f64x2); -red_fmul!(f32x4, f32, reduce_fmul_f32x4); -red_fmul!(f64x4, f64, reduce_fmul_f64x4); -red_fmul!(f32x8, f32, reduce_fmul_f32x8); -red_fmul!(f64x8, f64, reduce_fmul_f64x8); -red_fmul!(f32x16, f32, reduce_fmul_f32x16); +red_mul!(i8x2, i8); +red_mul!(u8x2, u8); +red_mul!(i16x2, i16); +red_mul!(u16x2, u16); +red_mul!(i32x2, i32); +red_mul!(u32x2, u32); +red_mul!(i64x2, i64); +red_mul!(u64x2, u64); +red_mul!(i8x4, i8); +red_mul!(u8x4, u8); +red_mul!(i16x4, i16); +red_mul!(u16x4, u16); +red_mul!(i32x4, i32); +red_mul!(u32x4, u32); +red_mul!(i64x4, i64); +red_mul!(u64x4, u64); +red_mul!(i8x8, i8); +red_mul!(u8x8, u8); +red_mul!(i16x8, i16); +red_mul!(u16x8, u16); +red_mul!(i32x8, i32); +red_mul!(u32x8, u32); +red_mul!(i64x8, i64); +red_mul!(u64x8, u64); +red_mul!(i8x16, i8); +red_mul!(u8x16, u8); +red_mul!(i16x16, i16); +red_mul!(u16x16, u16); +red_mul!(i32x16, i32); +red_mul!(u32x16, u32); +red_mul!(i8x32, i8); +red_mul!(u8x32, u8); +red_mul!(i16x32, i16); +red_mul!(u16x32, u16); +red_mul!(i8x64, i8); +red_mul!(u8x64, u8); +red_mul!(f32x2, f32); +red_mul!(f64x2, f64); +red_mul!(f32x4, f32); +red_mul!(f64x4, f64); +red_mul!(f32x8, f32); +red_mul!(f64x8, f64); +red_mul!(f32x16, f32); #[cfg(test)] mod tests { diff --git a/coresimd/ppsv/codegen/sum.rs b/coresimd/ppsv/codegen/sum.rs index b67a598f9f..f6152bb8c5 100644 --- a/coresimd/ppsv/codegen/sum.rs +++ b/coresimd/ppsv/codegen/sum.rs @@ -1,97 +1,6 @@ //! Code generation for the sum reduction. use coresimd::simd::*; -/// LLVM intrinsics used in the sum reduction -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.experimental.vector.reduce.add.i8.v2i8"] - fn reduce_add_i8x2(x: i8x2) -> i8; - #[link_name = "llvm.experimental.vector.reduce.add.u8.v2u8"] - fn reduce_add_u8x2(x: u8x2) -> u8; - #[link_name = "llvm.experimental.vector.reduce.add.i16.v2i16"] - fn reduce_add_i16x2(x: i16x2) -> i16; - #[link_name = "llvm.experimental.vector.reduce.add.u16.v2u16"] - fn reduce_add_u16x2(x: u16x2) -> u16; - #[link_name = "llvm.experimental.vector.reduce.add.i32.v2i32"] - fn reduce_add_i32x2(x: i32x2) -> i32; - #[link_name = "llvm.experimental.vector.reduce.add.u32.v2u32"] - fn reduce_add_u32x2(x: u32x2) -> u32; - #[link_name = "llvm.experimental.vector.reduce.add.i64.v2i64"] - fn reduce_add_i64x2(x: i64x2) -> i64; - #[link_name = "llvm.experimental.vector.reduce.add.u64.v2u64"] - fn reduce_add_u64x2(x: u64x2) -> u64; - #[link_name = "llvm.experimental.vector.reduce.add.i8.v4i8"] - fn reduce_add_i8x4(x: i8x4) -> i8; - #[link_name = "llvm.experimental.vector.reduce.add.u8.v4u8"] - fn reduce_add_u8x4(x: u8x4) -> u8; - #[link_name = "llvm.experimental.vector.reduce.add.i16.v4i16"] - fn reduce_add_i16x4(x: i16x4) -> i16; - #[link_name = "llvm.experimental.vector.reduce.add.u16.v4u16"] - fn reduce_add_u16x4(x: u16x4) -> u16; - #[link_name = "llvm.experimental.vector.reduce.add.i32.v4i32"] - fn reduce_add_i32x4(x: i32x4) -> i32; - #[link_name = "llvm.experimental.vector.reduce.add.u32.v4u32"] - fn reduce_add_u32x4(x: u32x4) -> u32; - #[link_name = "llvm.experimental.vector.reduce.add.i64.v4i64"] - fn reduce_add_i64x4(x: i64x4) -> i64; - #[link_name = "llvm.experimental.vector.reduce.add.u64.v4u64"] - fn reduce_add_u64x4(x: u64x4) -> u64; - #[link_name = "llvm.experimental.vector.reduce.add.i8.v8i8"] - fn reduce_add_i8x8(x: i8x8) -> i8; - #[link_name = "llvm.experimental.vector.reduce.add.u8.v8u8"] - fn reduce_add_u8x8(x: u8x8) -> u8; - #[link_name = "llvm.experimental.vector.reduce.add.i16.v8i16"] - fn reduce_add_i16x8(x: i16x8) -> i16; - #[link_name = "llvm.experimental.vector.reduce.add.u16.v8u16"] - fn reduce_add_u16x8(x: u16x8) -> u16; - #[link_name = "llvm.experimental.vector.reduce.add.i32.v8i32"] - fn reduce_add_i32x8(x: i32x8) -> i32; - #[link_name = "llvm.experimental.vector.reduce.add.u32.v8u32"] - fn reduce_add_u32x8(x: u32x8) -> u32; - #[link_name = "llvm.experimental.vector.reduce.add.i64.v8i64"] - fn reduce_add_i64x8(x: i64x8) -> i64; - #[link_name = "llvm.experimental.vector.reduce.add.u64.v8u64"] - fn reduce_add_u64x8(x: u64x8) -> u64; - #[link_name = "llvm.experimental.vector.reduce.add.i8.v16i8"] - fn reduce_add_i8x16(x: i8x16) -> i8; - #[link_name = "llvm.experimental.vector.reduce.add.u8.v16u8"] - fn reduce_add_u8x16(x: u8x16) -> u8; - #[link_name = "llvm.experimental.vector.reduce.add.i16.v16i16"] - fn reduce_add_i16x16(x: i16x16) -> i16; - #[link_name = "llvm.experimental.vector.reduce.add.u16.v16u16"] - fn reduce_add_u16x16(x: u16x16) -> u16; - #[link_name = "llvm.experimental.vector.reduce.add.i32.v16i32"] - fn reduce_add_i32x16(x: i32x16) -> i32; - #[link_name = "llvm.experimental.vector.reduce.add.u32.v16u32"] - fn reduce_add_u32x16(x: u32x16) -> u32; - #[link_name = "llvm.experimental.vector.reduce.add.i8.v32i8"] - fn reduce_add_i8x32(x: i8x32) -> i8; - #[link_name = "llvm.experimental.vector.reduce.add.u8.v32u8"] - fn reduce_add_u8x32(x: u8x32) -> u8; - #[link_name = "llvm.experimental.vector.reduce.add.i16.v32i16"] - fn reduce_add_i16x32(x: i16x32) -> i16; - #[link_name = "llvm.experimental.vector.reduce.add.u16.v32u16"] - fn reduce_add_u16x32(x: u16x32) -> u16; - #[link_name = "llvm.experimental.vector.reduce.add.i8.v64i8"] - fn reduce_add_i8x64(x: i8x64) -> i8; - #[link_name = "llvm.experimental.vector.reduce.add.u8.v64u8"] - fn reduce_add_u8x64(x: u8x64) -> u8; - #[link_name = "llvm.experimental.vector.reduce.fadd.f32.v2f32"] - fn reduce_fadd_f32x2(acc: f32, x: f32x2) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fadd.f64.v2f64"] - fn reduce_fadd_f64x2(acc: f64, x: f64x2) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fadd.f32.v4f32"] - fn reduce_fadd_f32x4(acc: f32, x: f32x4) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fadd.f64.v4f64"] - fn reduce_fadd_f64x4(acc: f64, x: f64x4) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fadd.f32.v8f32"] - fn reduce_fadd_f32x8(acc: f32, x: f32x8) -> f32; - #[link_name = "llvm.experimental.vector.reduce.fadd.f64.v8f64"] - fn reduce_fadd_f64x8(acc: f64, x: f64x8) -> f64; - #[link_name = "llvm.experimental.vector.reduce.fadd.f32.v16f32"] - fn reduce_fadd_f32x16(acc: f32, x: f32x16) -> f32; -} - /// Reduction: horizontal sum of the vector elements. pub trait ReduceAdd { /// Result type of the reduction. @@ -101,13 +10,16 @@ pub trait ReduceAdd { } macro_rules! red_add { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { + ($id:ident, $elem_ty:ident) => { impl ReduceAdd for $id { type Acc = $elem_ty; #[cfg(not(target_arch = "aarch64"))] #[inline] fn reduce_add(self) -> Self::Acc { - unsafe { $llvm_intr(self) } + unsafe { + use coresimd::simd_llvm::simd_reduce_add; + simd_reduce_add(self) + } } // FIXME: broken in AArch64 #[cfg(target_arch = "aarch64")] @@ -122,68 +34,49 @@ macro_rules! red_add { } }; } -red_add!(i8x2, i8, reduce_add_i8x2); -red_add!(u8x2, u8, reduce_add_u8x2); -red_add!(i16x2, i16, reduce_add_i16x2); -red_add!(u16x2, u16, reduce_add_u16x2); -red_add!(i32x2, i32, reduce_add_i32x2); -red_add!(u32x2, u32, reduce_add_u32x2); -red_add!(i64x2, i64, reduce_add_i64x2); -red_add!(u64x2, u64, reduce_add_u64x2); -red_add!(i8x4, i8, reduce_add_i8x4); -red_add!(u8x4, u8, reduce_add_u8x4); -red_add!(i16x4, i16, reduce_add_i16x4); -red_add!(u16x4, u16, reduce_add_u16x4); -red_add!(i32x4, i32, reduce_add_i32x4); -red_add!(u32x4, u32, reduce_add_u32x4); -red_add!(i64x4, i64, reduce_add_i64x4); -red_add!(u64x4, u64, reduce_add_u64x4); -red_add!(i8x8, i8, reduce_add_i8x8); -red_add!(u8x8, u8, reduce_add_u8x8); -red_add!(i16x8, i16, reduce_add_i16x8); -red_add!(u16x8, u16, reduce_add_u16x8); -red_add!(i32x8, i32, reduce_add_i32x8); -red_add!(u32x8, u32, reduce_add_u32x8); -red_add!(i64x8, i64, reduce_add_i64x8); -red_add!(u64x8, u64, reduce_add_u64x8); -red_add!(i8x16, i8, reduce_add_i8x16); -red_add!(u8x16, u8, reduce_add_u8x16); -red_add!(i16x16, i16, reduce_add_i16x16); -red_add!(u16x16, u16, reduce_add_u16x16); -red_add!(i32x16, i32, reduce_add_i32x16); -red_add!(u32x16, u32, reduce_add_u32x16); -red_add!(i8x32, i8, reduce_add_i8x32); -red_add!(u8x32, u8, reduce_add_u8x32); -red_add!(i16x32, i16, reduce_add_i16x32); -red_add!(u16x32, u16, reduce_add_u16x32); -red_add!(i8x64, i8, reduce_add_i8x64); -red_add!(u8x64, u8, reduce_add_u8x64); - -macro_rules! red_fadd { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { - impl ReduceAdd for $id { - type Acc = $elem_ty; - #[inline] - fn reduce_add(self) -> Self::Acc { - // FIXME: - //unsafe { $llvm_intr(0. as $elem_ty, self) } - let mut x = self.extract(0); - for i in 1..$id::lanes() { - x += self.extract(i); - } - x - } - } - }; -} - -red_fadd!(f32x2, f32, reduce_fadd_f32x2); -red_fadd!(f64x2, f64, reduce_fadd_f64x2); -red_fadd!(f32x4, f32, reduce_fadd_f32x4); -red_fadd!(f64x4, f64, reduce_fadd_f64x4); -red_fadd!(f32x8, f32, reduce_fadd_f32x8); -red_fadd!(f64x8, f64, reduce_fadd_f64x8); -red_fadd!(f32x16, f32, reduce_fadd_f32x16); +red_add!(i8x2, i8); +red_add!(u8x2, u8); +red_add!(i16x2, i16); +red_add!(u16x2, u16); +red_add!(i32x2, i32); +red_add!(u32x2, u32); +red_add!(i64x2, i64); +red_add!(u64x2, u64); +red_add!(i8x4, i8); +red_add!(u8x4, u8); +red_add!(i16x4, i16); +red_add!(u16x4, u16); +red_add!(i32x4, i32); +red_add!(u32x4, u32); +red_add!(i64x4, i64); +red_add!(u64x4, u64); +red_add!(i8x8, i8); +red_add!(u8x8, u8); +red_add!(i16x8, i16); +red_add!(u16x8, u16); +red_add!(i32x8, i32); +red_add!(u32x8, u32); +red_add!(i64x8, i64); +red_add!(u64x8, u64); +red_add!(i8x16, i8); +red_add!(u8x16, u8); +red_add!(i16x16, i16); +red_add!(u16x16, u16); +red_add!(i32x16, i32); +red_add!(u32x16, u32); +red_add!(i8x32, i8); +red_add!(u8x32, u8); +red_add!(i16x32, i16); +red_add!(u16x32, u16); +red_add!(i8x64, i8); +red_add!(u8x64, u8); +red_add!(f32x2, f32); +red_add!(f64x2, f64); +red_add!(f32x4, f32); +red_add!(f64x4, f64); +red_add!(f32x8, f32); +red_add!(f64x8, f64); +red_add!(f32x16, f32); #[cfg(test)] mod tests { diff --git a/coresimd/ppsv/codegen/xor.rs b/coresimd/ppsv/codegen/xor.rs index 5d4695fa6f..025d995f74 100644 --- a/coresimd/ppsv/codegen/xor.rs +++ b/coresimd/ppsv/codegen/xor.rs @@ -1,83 +1,6 @@ //! Code generation for the xor reduction. use coresimd::simd::*; -/// LLVM intrinsics used in the xor reduction -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.experimental.vector.reduce.xor.i8.v2i8"] - fn reduce_xor_i8x2(x: i8x2) -> i8; - #[link_name = "llvm.experimental.vector.reduce.xor.u8.v2u8"] - fn reduce_xor_u8x2(x: u8x2) -> u8; - #[link_name = "llvm.experimental.vector.reduce.xor.i16.v2i16"] - fn reduce_xor_i16x2(x: i16x2) -> i16; - #[link_name = "llvm.experimental.vector.reduce.xor.u16.v2u16"] - fn reduce_xor_u16x2(x: u16x2) -> u16; - #[link_name = "llvm.experimental.vector.reduce.xor.i32.v2i32"] - fn reduce_xor_i32x2(x: i32x2) -> i32; - #[link_name = "llvm.experimental.vector.reduce.xor.u32.v2u32"] - fn reduce_xor_u32x2(x: u32x2) -> u32; - #[link_name = "llvm.experimental.vector.reduce.xor.i64.v2i64"] - fn reduce_xor_i64x2(x: i64x2) -> i64; - #[link_name = "llvm.experimental.vector.reduce.xor.u64.v2u64"] - fn reduce_xor_u64x2(x: u64x2) -> u64; - #[link_name = "llvm.experimental.vector.reduce.xor.i8.v4i8"] - fn reduce_xor_i8x4(x: i8x4) -> i8; - #[link_name = "llvm.experimental.vector.reduce.xor.u8.v4u8"] - fn reduce_xor_u8x4(x: u8x4) -> u8; - #[link_name = "llvm.experimental.vector.reduce.xor.i16.v4i16"] - fn reduce_xor_i16x4(x: i16x4) -> i16; - #[link_name = "llvm.experimental.vector.reduce.xor.u16.v4u16"] - fn reduce_xor_u16x4(x: u16x4) -> u16; - #[link_name = "llvm.experimental.vector.reduce.xor.i32.v4i32"] - fn reduce_xor_i32x4(x: i32x4) -> i32; - #[link_name = "llvm.experimental.vector.reduce.xor.u32.v4u32"] - fn reduce_xor_u32x4(x: u32x4) -> u32; - #[link_name = "llvm.experimental.vector.reduce.xor.i64.v4i64"] - fn reduce_xor_i64x4(x: i64x4) -> i64; - #[link_name = "llvm.experimental.vector.reduce.xor.u64.v4u64"] - fn reduce_xor_u64x4(x: u64x4) -> u64; - #[link_name = "llvm.experimental.vector.reduce.xor.i8.v8i8"] - fn reduce_xor_i8x8(x: i8x8) -> i8; - #[link_name = "llvm.experimental.vector.reduce.xor.u8.v8u8"] - fn reduce_xor_u8x8(x: u8x8) -> u8; - #[link_name = "llvm.experimental.vector.reduce.xor.i16.v8i16"] - fn reduce_xor_i16x8(x: i16x8) -> i16; - #[link_name = "llvm.experimental.vector.reduce.xor.u16.v8u16"] - fn reduce_xor_u16x8(x: u16x8) -> u16; - #[link_name = "llvm.experimental.vector.reduce.xor.i32.v8i32"] - fn reduce_xor_i32x8(x: i32x8) -> i32; - #[link_name = "llvm.experimental.vector.reduce.xor.u32.v8u32"] - fn reduce_xor_u32x8(x: u32x8) -> u32; - #[link_name = "llvm.experimental.vector.reduce.xor.i64.v8i64"] - fn reduce_xor_i64x8(x: i64x8) -> i64; - #[link_name = "llvm.experimental.vector.reduce.xor.u64.v8u64"] - fn reduce_xor_u64x8(x: u64x8) -> u64; - #[link_name = "llvm.experimental.vector.reduce.xor.i8.v16i8"] - fn reduce_xor_i8x16(x: i8x16) -> i8; - #[link_name = "llvm.experimental.vector.reduce.xor.u8.v16u8"] - fn reduce_xor_u8x16(x: u8x16) -> u8; - #[link_name = "llvm.experimental.vector.reduce.xor.i16.v16i16"] - fn reduce_xor_i16x16(x: i16x16) -> i16; - #[link_name = "llvm.experimental.vector.reduce.xor.u16.v16u16"] - fn reduce_xor_u16x16(x: u16x16) -> u16; - #[link_name = "llvm.experimental.vector.reduce.xor.i32.v16i32"] - fn reduce_xor_i32x16(x: i32x16) -> i32; - #[link_name = "llvm.experimental.vector.reduce.xor.u32.v16u32"] - fn reduce_xor_u32x16(x: u32x16) -> u32; - #[link_name = "llvm.experimental.vector.reduce.xor.i8.v32i8"] - fn reduce_xor_i8x32(x: i8x32) -> i8; - #[link_name = "llvm.experimental.vector.reduce.xor.u8.v32u8"] - fn reduce_xor_u8x32(x: u8x32) -> u8; - #[link_name = "llvm.experimental.vector.reduce.xor.i16.v32i16"] - fn reduce_xor_i16x32(x: i16x32) -> i16; - #[link_name = "llvm.experimental.vector.reduce.xor.u16.v32u16"] - fn reduce_xor_u16x32(x: u16x32) -> u16; - #[link_name = "llvm.experimental.vector.reduce.xor.i8.v64i8"] - fn reduce_xor_i8x64(x: i8x64) -> i8; - #[link_name = "llvm.experimental.vector.reduce.xor.u8.v64u8"] - fn reduce_xor_u8x64(x: u8x64) -> u8; -} - /// Reduction: horizontal bitwise xor of the vector elements. #[cfg_attr(feature = "cargo-clippy", allow(stutter))] pub trait ReduceXor { @@ -88,13 +11,16 @@ pub trait ReduceXor { } macro_rules! red_xor { - ($id:ident, $elem_ty:ident, $llvm_intr:ident) => { + ($id:ident, $elem_ty:ident) => { impl ReduceXor for $id { type Acc = $elem_ty; #[cfg(not(target_arch = "aarch64"))] #[inline] fn reduce_xor(self) -> Self::Acc { - unsafe { $llvm_intr(self.into_bits()) } + unsafe { + use coresimd::simd_llvm::simd_reduce_xor; + simd_reduce_xor(self) + } } // FIXME: broken in AArch64 #[cfg(target_arch = "aarch64")] @@ -109,49 +35,49 @@ macro_rules! red_xor { } }; } -red_xor!(i8x2, i8, reduce_xor_i8x2); -red_xor!(u8x2, u8, reduce_xor_u8x2); -red_xor!(i16x2, i16, reduce_xor_i16x2); -red_xor!(u16x2, u16, reduce_xor_u16x2); -red_xor!(i32x2, i32, reduce_xor_i32x2); -red_xor!(u32x2, u32, reduce_xor_u32x2); -red_xor!(i64x2, i64, reduce_xor_i64x2); -red_xor!(u64x2, u64, reduce_xor_u64x2); -red_xor!(i8x4, i8, reduce_xor_i8x4); -red_xor!(u8x4, u8, reduce_xor_u8x4); -red_xor!(i16x4, i16, reduce_xor_i16x4); -red_xor!(u16x4, u16, reduce_xor_u16x4); -red_xor!(i32x4, i32, reduce_xor_i32x4); -red_xor!(u32x4, u32, reduce_xor_u32x4); -red_xor!(i64x4, i64, reduce_xor_i64x4); -red_xor!(u64x4, u64, reduce_xor_u64x4); -red_xor!(i8x8, i8, reduce_xor_i8x8); -red_xor!(u8x8, u8, reduce_xor_u8x8); -red_xor!(i16x8, i16, reduce_xor_i16x8); -red_xor!(u16x8, u16, reduce_xor_u16x8); -red_xor!(i32x8, i32, reduce_xor_i32x8); -red_xor!(u32x8, u32, reduce_xor_u32x8); -red_xor!(i64x8, i64, reduce_xor_i64x8); -red_xor!(u64x8, u64, reduce_xor_u64x8); -red_xor!(i8x16, i8, reduce_xor_i8x16); -red_xor!(u8x16, u8, reduce_xor_u8x16); -red_xor!(i16x16, i16, reduce_xor_i16x16); -red_xor!(u16x16, u16, reduce_xor_u16x16); -red_xor!(i32x16, i32, reduce_xor_i32x16); -red_xor!(u32x16, u32, reduce_xor_u32x16); -red_xor!(i8x32, i8, reduce_xor_i8x32); -red_xor!(u8x32, u8, reduce_xor_u8x32); -red_xor!(i16x32, i16, reduce_xor_i16x32); -red_xor!(u16x32, u16, reduce_xor_u16x32); -red_xor!(i8x64, i8, reduce_xor_i8x64); -red_xor!(u8x64, u8, reduce_xor_u8x64); +red_xor!(i8x2, i8); +red_xor!(u8x2, u8); +red_xor!(i16x2, i16); +red_xor!(u16x2, u16); +red_xor!(i32x2, i32); +red_xor!(u32x2, u32); +red_xor!(i64x2, i64); +red_xor!(u64x2, u64); +red_xor!(i8x4, i8); +red_xor!(u8x4, u8); +red_xor!(i16x4, i16); +red_xor!(u16x4, u16); +red_xor!(i32x4, i32); +red_xor!(u32x4, u32); +red_xor!(i64x4, i64); +red_xor!(u64x4, u64); +red_xor!(i8x8, i8); +red_xor!(u8x8, u8); +red_xor!(i16x8, i16); +red_xor!(u16x8, u16); +red_xor!(i32x8, i32); +red_xor!(u32x8, u32); +red_xor!(i64x8, i64); +red_xor!(u64x8, u64); +red_xor!(i8x16, i8); +red_xor!(u8x16, u8); +red_xor!(i16x16, i16); +red_xor!(u16x16, u16); +red_xor!(i32x16, i32); +red_xor!(u32x16, u32); +red_xor!(i8x32, i8); +red_xor!(u8x32, u8); +red_xor!(i16x32, i16); +red_xor!(u16x32, u16); +red_xor!(i8x64, i8); +red_xor!(u8x64, u8); -red_xor!(b8x2, i8, reduce_xor_i8x2); -red_xor!(b8x4, i8, reduce_xor_i8x4); -red_xor!(b8x8, i8, reduce_xor_i8x8); -red_xor!(b8x16, i8, reduce_xor_i8x16); -red_xor!(b8x32, i8, reduce_xor_i8x32); -red_xor!(b8x64, i8, reduce_xor_i8x64); +red_xor!(b8x2, i8); +red_xor!(b8x4, i8); +red_xor!(b8x8, i8); +red_xor!(b8x16, i8); +red_xor!(b8x32, i8); +red_xor!(b8x64, i8); #[cfg(test)] mod tests { diff --git a/coresimd/simd_llvm.rs b/coresimd/simd_llvm.rs index c4ae8a2a90..35b303358c 100644 --- a/coresimd/simd_llvm.rs +++ b/coresimd/simd_llvm.rs @@ -31,4 +31,15 @@ extern "platform-intrinsic" { pub fn simd_and(x: T, y: T) -> T; pub fn simd_or(x: T, y: T) -> T; pub fn simd_xor(x: T, y: T) -> T; + + pub fn simd_reduce_add(x: T) -> U; + pub fn simd_reduce_mul(x: T) -> U; + pub fn simd_reduce_min(x: T) -> U; + pub fn simd_reduce_max(x: T) -> U; + pub fn simd_reduce_and(x: T) -> U; + pub fn simd_reduce_or(x: T) -> U; + pub fn simd_reduce_xor(x: T) -> U; + + pub fn simd_reduce_all(x: T) -> bool; + pub fn simd_reduce_any(x: T) -> bool; }