diff --git a/src/lib.rs b/src/lib.rs index 6e41a7026f..685dbef092 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -169,14 +169,20 @@ mod v32 { define_ty! { i16x2, i16, i16 } define_impl! { i16x2, i16, 2, i16x2, x0, x1 } + define_ty! { u16x2, u16, u16 } + define_impl! { u16x2, u16, 2, i16x2, x0, x1 } define_ty! { i8x4, i8, i8, i8, i8 } define_impl! { i8x4, i8, 4, i8x4, x0, x1, x2, x3 } - define_ty! { u8x4, u8, u8, u8, u8 } define_impl! { u8x4, u8, 4, i8x4, x0, x1, x2, x3 } - define_casts!((i8x4, i32x4, as_i32x4), (i16x2, i64x2, as_i64x2)); + define_casts!( + (i16x2, i64x2, as_i64x2), + (u16x2, i64x2, as_i64x2), + (i8x4, i32x4, as_i32x4), + (u8x4, i32x4, as_i32x4) + ); } /// 16-bit wide vector tpyes @@ -185,8 +191,10 @@ mod v16 { define_ty! { i8x2, i8, i8 } define_impl! { i8x2, i8, 2, i8x2, x0, x1 } + define_ty! { u8x2, u8, u8 } + define_impl! { u8x2, u8, 2, i8x2, x0, x1 } - define_casts!((i8x2, i64x2, as_i64x2)); + define_casts!((i8x2, i64x2, as_i64x2), (u8x2, i64x2, as_i64x2)); } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] diff --git a/src/macros.rs b/src/macros.rs index c2018acc40..c2009fa939 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -485,7 +485,7 @@ macro_rules! test_arithmetic_ { #[cfg(test)] #[macro_export] - macro_rules! test_neg_ { +macro_rules! test_neg_ { ($tn:ident, $zero:expr, $one:expr, $two:expr, $four:expr) => { { let z = $tn::splat($zero); @@ -573,7 +573,7 @@ macro_rules! test_bit_arithmetic_ { #[cfg(test)] #[macro_export] - macro_rules! test_ops_si { +macro_rules! test_ops_si { ($($tn:ident),+) => { $( test_arithmetic_!($tn, 0, 1, 2, 4); @@ -585,7 +585,7 @@ macro_rules! test_bit_arithmetic_ { #[cfg(test)] #[macro_export] - macro_rules! test_ops_ui { +macro_rules! test_ops_ui { ($($tn:ident),+) => { $( test_arithmetic_!($tn, 0, 1, 2, 4); @@ -596,7 +596,7 @@ macro_rules! test_bit_arithmetic_ { #[cfg(test)] #[macro_export] - macro_rules! test_ops_f { +macro_rules! test_ops_f { ($($tn:ident),+) => { $( test_arithmetic_!($tn, 0., 1., 2., 4.); diff --git a/src/v64.rs b/src/v64.rs index 0df2e878d6..c1e346d1b2 100644 --- a/src/v64.rs +++ b/src/v64.rs @@ -60,11 +60,14 @@ define_casts!( (u8x8, i8x8, as_i8x8), (i8x8, u8x8, as_u8x8), (i8x8, i16x8, as_i16x8), + (u8x8, i16x8, as_i16x8), (i16x4, i32x4, as_i32x4), (i32x2, i64x2, as_i64x2), (u8x8, u16x8, as_u16x8), (u16x4, u32x4, as_u32x4), - (u32x2, u64x2, as_u64x2) + (u16x4, i32x4, as_i32x4), + (u32x2, u64x2, as_u64x2), + (u32x2, i64x2, as_i64x2) ); #[cfg(test)] diff --git a/src/x86/macros.rs b/src/x86/macros.rs index f268a3499b..79109fd67c 100644 --- a/src/x86/macros.rs +++ b/src/x86/macros.rs @@ -328,6 +328,22 @@ macro_rules! constify_imm4 { } } +macro_rules! constify_imm3 { + ($imm8:expr, $expand:ident) => { + #[allow(overflowing_literals)] + match $imm8 & 0b111 { + 0 => $expand!(0), + 1 => $expand!(1), + 2 => $expand!(2), + 3 => $expand!(3), + 4 => $expand!(4), + 5 => $expand!(5), + 6 => $expand!(6), + _ => $expand!(7), + } + } +} + macro_rules! constify_imm2 { ($imm8:expr, $expand:ident) => { #[allow(overflowing_literals)] diff --git a/src/x86/sse41.rs b/src/x86/sse41.rs index aabb8fdb79..86538ca562 100644 --- a/src/x86/sse41.rs +++ b/src/x86/sse41.rs @@ -6,6 +6,7 @@ use std::mem; use stdsimd_test::assert_instr; use simd_llvm::{simd_shuffle2, simd_shuffle4, simd_shuffle8}; +use x86::__m128i; use v128::*; // SSE4 rounding constans @@ -138,7 +139,7 @@ pub unsafe fn _mm_extract_epi32(a: i32x4, imm8: u8) -> i32 { } /// Extract an 64-bit integer from `a` selected with `imm8` -#[cfg(target_arch = "x86_64")] +#[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 #[inline(always)] #[target_feature = "+sse4.1"] // TODO: Add test for Windows @@ -151,8 +152,7 @@ pub unsafe fn _mm_extract_epi64(a: i64x2, imm8: u8) -> i64 { /// Then zero elements according to `imm8`. /// /// `imm8` specifies which bits from operand `a` will be copied, which bits in -/// the -/// result they will be copied to, and which bits in the result will be +/// the result they will be copied to, and which bits in the result will be /// cleared. The following assignments are made: /// /// * Bits `[7:6]` specify the bits to copy from operand `a`: @@ -200,7 +200,7 @@ pub unsafe fn _mm_insert_epi32(a: i32x4, i: i32, imm8: u8) -> i32x4 { /// Return a copy of `a` with the 64-bit integer from `i` inserted at a /// location specified by `imm8`. -#[cfg(target_arch = "x86_64")] +#[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(pinsrq, imm8 = 0))] @@ -330,7 +330,7 @@ pub unsafe fn _mm_cvtepi16_epi32(a: i16x8) -> i32x4 { simd_shuffle4::<_, ::v64::i16x4>(a, a, [0, 1, 2, 3]).as_i32x4() } -/// Sign extend packed 16-bit integers in a to packed 64-bit integers +/// Sign extend packed 16-bit integers in `a` to packed 64-bit integers #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(pmovsxwq))] @@ -338,6 +338,185 @@ pub unsafe fn _mm_cvtepi16_epi64(a: i16x8) -> i64x2 { simd_shuffle2::<_, ::v32::i16x2>(a, a, [0, 1]).as_i64x2() } +/// Sign extend packed 32-bit integers in `a` to packed 64-bit integers +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pmovsxdq))] +pub unsafe fn _mm_cvtepi32_epi64(a: i32x4) -> i64x2 { + simd_shuffle2::<_, ::v64::i32x2>(a, a, [0, 1]).as_i64x2() +} + +/// Zero extend packed unsigned 8-bit integers in `a` to packed 16-bit integers +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pmovzxbw))] +pub unsafe fn _mm_cvtepu8_epi16(a: u8x16) -> i16x8 { + simd_shuffle8::<_, ::v64::u8x8>(a, a, [0, 1, 2, 3, 4, 5, 6, 7]).as_i16x8() +} + +/// Zero extend packed unsigned 8-bit integers in `a` to packed 16-bit integers +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pmovzxbd))] +pub unsafe fn _mm_cvtepu8_epi32(a: u8x16) -> i32x4 { + simd_shuffle4::<_, ::v32::u8x4>(a, a, [0, 1, 2, 3]).as_i32x4() +} + +/// Zero extend packed unsigned 8-bit integers in `a` to packed 16-bit integers +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pmovzxbq))] +pub unsafe fn _mm_cvtepu8_epi64(a: u8x16) -> i64x2 { + simd_shuffle2::<_, ::v16::u8x2>(a, a, [0, 1]).as_i64x2() +} + +/// Zero extend packed unsigned 16-bit integers in `a` +/// to packed 32-bit integers +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pmovzxwd))] +pub unsafe fn _mm_cvtepu16_epi32(a: u16x8) -> i32x4 { + simd_shuffle4::<_, ::v64::u16x4>(a, a, [0, 1, 2, 3]).as_i32x4() +} + +/// Zero extend packed unsigned 16-bit integers in `a` +/// to packed 64-bit integers +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pmovzxwq))] +pub unsafe fn _mm_cvtepu16_epi64(a: u16x8) -> i64x2 { + simd_shuffle2::<_, ::v32::u16x2>(a, a, [0, 1]).as_i64x2() +} + +/// Zero extend packed unsigned 32-bit integers in `a` +/// to packed 64-bit integers +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pmovzxdq))] +pub unsafe fn _mm_cvtepu32_epi64(a: u32x4) -> i64x2 { + simd_shuffle2::<_, ::v64::u32x2>(a, a, [0, 1]).as_i64x2() +} + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// zeros. +/// +/// Arguments: +/// +/// * `a` - A 128-bit integer vector containing the bits to be tested. +/// * `mask` - A 128-bit integer vector selecting which bits to test in +/// operand `a`. +/// +/// Returns: +/// +/// * `1` - if the specified bits are all zeros, +/// * `0` - otherwise. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(ptest))] +pub unsafe fn _mm_testz_si128(a: __m128i, mask: __m128i) -> i32 { + ptestz(a.into(), mask.into()) +} + + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// ones. +/// +/// Arguments: +/// +/// * `a` - A 128-bit integer vector containing the bits to be tested. +/// * `mask` - A 128-bit integer vector selecting which bits to test in +/// operand `a`. +/// +/// Returns: +/// +/// * `1` - if the specified bits are all ones, +/// * `0` - otherwise. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(ptest))] +pub unsafe fn _mm_testc_si128(a: __m128i, mask: __m128i) -> i32 { + ptestc(a.into(), mask.into()) +} + +/// Tests whether the specified bits in a 128-bit integer vector are +/// neither all zeros nor all ones. +/// +/// Arguments: +/// +/// * `a` - A 128-bit integer vector containing the bits to be tested. +/// * `mask` - A 128-bit integer vector selecting which bits to test in +/// operand `a`. +/// +/// Returns: +/// +/// * `1` - if the specified bits are neither all zeros nor all ones, +/// * `0` - otherwise. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(ptest))] +pub unsafe fn _mm_testnzc_si128(a: __m128i, mask: __m128i) -> i32 { + ptestnzc(a.into(), mask.into()) +} + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// zeros. +/// +/// Arguments: +/// +/// * `a` - A 128-bit integer vector containing the bits to be tested. +/// * `mask` - A 128-bit integer vector selecting which bits to test in +/// operand `a`. +/// +/// Returns: +/// +/// * `1` - if the specified bits are all zeros, +/// * `0` - otherwise. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(ptest))] +pub unsafe fn _mm_test_all_zeros(a: __m128i, mask: __m128i) -> i32 { + _mm_testz_si128(a, mask) +} + +/// Tests whether the specified bits in `a` 128-bit integer vector are all +/// ones. +/// +/// Argument: +/// +/// * `a` - A 128-bit integer vector containing the bits to be tested. +/// +/// Returns: +/// +/// * `1` - if the bits specified in the operand are all set to 1, +/// * `0` - otherwise. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(pcmpeqd))] +#[cfg_attr(test, assert_instr(ptest))] +pub unsafe fn _mm_test_all_ones(a: __m128i) -> i32 { + _mm_testc_si128(a, ::x86::sse2::_mm_cmpeq_epi32(a.into(), a.into()).into()) +} + +/// Tests whether the specified bits in a 128-bit integer vector are +/// neither all zeros nor all ones. +/// +/// Arguments: +/// +/// * `a` - A 128-bit integer vector containing the bits to be tested. +/// * `mask` - A 128-bit integer vector selecting which bits to test in +/// operand `a`. +/// +/// Returns: +/// +/// * `1` - if the specified bits are neither all zeros nor all ones, +/// * `0` - otherwise. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(ptest))] +pub unsafe fn _mm_test_mix_ones_zeros(a: __m128i, mask: __m128i) -> i32 { + _mm_testnzc_si128(a, mask) +} + /// Returns the dot product of two f64x2 vectors. /// /// `imm8[1:0]` is the broadcast mask, and `imm8[5:4]` is the condition mask. @@ -580,10 +759,25 @@ pub unsafe fn _mm_round_ss(a: f32x4, b: f32x4, rounding: i32) -> f32x4 { constify_imm4!(rounding, call) } -/// Find minimal u16 element in vector. -/// Place it in the first element of resulting vector and it's index -/// in second element (formally bits [16..18] inclusive). -/// All other elements are set to zero. +/// Finds the minimum unsigned 16-bit element in the 128-bit u16x8 vector, +/// returning a vector containing its value in its first position, and its +/// index +/// in its second position; all other elements are set to zero. +/// +/// This intrinsic corresponds to the VPHMINPOSUW / PHMINPOSUW +/// instruction. +/// +/// Arguments: +/// +/// * `a` - A 128-bit vector of type `u16x8`. +/// +/// Returns: +/// +/// A 128-bit value where: +/// +/// * bits `[15:0]` - contain the minimum value found in parameter `a`, +/// * bits `[18:16]` - contain the index of the minimum value +/// * remaining bits are set to `0`. #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(phminposuw))] @@ -591,8 +785,8 @@ pub unsafe fn _mm_minpos_epu16(a: u16x8) -> u16x8 { phminposuw(a) } -/// Multiply the low 32-bit integers from each packed 64-bit element -/// in a and b, and store the signed 64-bit results in dst. +/// Multiply the low 32-bit integers from each packed 64-bit +/// element in `a` and `b`, and return the signed 64-bit result. #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(pmuldq))] @@ -600,12 +794,12 @@ pub unsafe fn _mm_mul_epi32(a: i32x4, b: i32x4) -> i64x2 { pmuldq(a, b) } -/// Multiply the packed 32-bit integers in a and b, producing intermediate -/// 64-bit integers, and returns the lowest 32-bit, whatever they might be, -/// reinterpreted as a signed integer. -/// While pmulld i32x4::splat(2), i32x4::splat(2) returns the obvious -/// i32x4::splat(4), pmulld i32x4::splat(i32::MAX), i32x4::splat(2) -/// would return a negative number. +/// Multiply the packed 32-bit integers in `a` and `b`, producing intermediate +/// 64-bit integers, and returns the lowest 32-bit, whatever they might be, +/// reinterpreted as a signed integer. While `pmulld i32x4::splat(2), +/// i32x4::splat(2)` returns the obvious `i32x4::splat(4)`, due to wrapping +/// arithmetic `pmulld i32x4::splat(i32::MAX), i32x4::splat(2)` would return a +/// negative number. #[inline(always)] #[target_feature = "+sse4.1"] #[cfg_attr(test, assert_instr(pmulld))] @@ -613,6 +807,46 @@ pub unsafe fn _mm_mullo_epi32(a: i32x4, b: i32x4) -> i32x4 { a * b } +/// Subtracts 8-bit unsigned integer values and computes the absolute +/// values of the differences to the corresponding bits in the destination. +/// Then sums of the absolute differences are returned according to the bit +/// fields in the immediate operand. +/// +/// The following algorithm is performed: +/// +/// ```ignore +/// i = imm8[2] * 4 +/// j = imm8[1:0] * 4 +/// for k := 0 to 7 +/// d0 = abs(a[i + k + 0] - b[j + 0]) +/// d1 = abs(a[i + k + 1] - b[j + 1]) +/// d2 = abs(a[i + k + 2] - b[j + 2]) +/// d3 = abs(a[i + k + 3] - b[j + 3]) +/// r[k] = d0 + d1 + d2 + d3 +/// ``` +/// +/// Arguments: +/// +/// * `a` - A 128-bit vector of type `i8x16`. +/// * `b` - A 128-bit vector of type `i8x16`. +/// * `imm8` - An 8-bit immediate operand specifying how the absolute +/// differences are to be calculated +/// * Bit `[2]` specify the offset for operand `a` +/// * Bits `[1:0]` specify the offset for operand `b` +/// +/// Returns: +/// +/// * A `i16x8` vector containing the sums of the sets of +/// absolute differences between both operands. +#[inline(always)] +#[target_feature = "+sse4.1"] +#[cfg_attr(test, assert_instr(mpsadbw, imm8 = 0))] +pub unsafe fn _mm_mpsadbw_epu8(a: u8x16, b: u8x16, imm8: u8) -> u16x8 { + macro_rules! call { + ($imm8:expr) => { mpsadbw(a, b, $imm8) } + } + constify_imm3!(imm8, call) +} #[allow(improper_ctypes)] extern "C" { @@ -648,6 +882,12 @@ extern "C" { fn pminud(a: u32x4, b: u32x4) -> u32x4; #[link_name = "llvm.x86.sse41.packusdw"] fn packusdw(a: i32x4, b: i32x4) -> u16x8; + #[link_name = "llvm.x86.sse41.ptestz"] + fn ptestz(a: i64x2, mask: i64x2) -> i32; + #[link_name = "llvm.x86.sse41.ptestc"] + fn ptestc(a: i64x2, mask: i64x2) -> i32; + #[link_name = "llvm.x86.sse41.ptestnzc"] + fn ptestnzc(a: i64x2, mask: i64x2) -> i32; #[link_name = "llvm.x86.sse41.dppd"] fn dppd(a: f64x2, b: f64x2, imm8: u8) -> f64x2; #[link_name = "llvm.x86.sse41.dpps"] @@ -664,16 +904,18 @@ extern "C" { fn phminposuw(a: u16x8) -> u16x8; #[link_name = "llvm.x86.sse41.pmuldq"] fn pmuldq(a: i32x4, b: i32x4) -> i64x2; + #[link_name = "llvm.x86.sse41.mpsadbw"] + fn mpsadbw(a: u8x16, b: u8x16, imm8: u8) -> u16x8; } #[cfg(test)] mod tests { use std::mem; - use stdsimd_test::simd_test; - - use v128::*; use x86::sse41; + use v128::*; + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 + use x86::__m128i; #[simd_test = "sse4.1"] unsafe fn _mm_blendv_epi8() { @@ -767,7 +1009,7 @@ mod tests { assert_eq!(r, 1); } - #[cfg(target_arch = "x86_64")] + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 #[simd_test = "sse4.1"] unsafe fn _mm_extract_epi64() { let a = i64x2::new(0, 1); @@ -806,7 +1048,7 @@ mod tests { assert_eq!(r, e); } - #[cfg(target_arch = "x86_64")] + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 #[simd_test = "sse4.1"] unsafe fn _mm_insert_epi64() { let a = i64x2::splat(0); @@ -1021,6 +1263,170 @@ mod tests { assert_eq!(r, e); } + #[simd_test = "sse4.1"] + unsafe fn _mm_cvtepi32_epi64() { + let a = i32x4::splat(10); + let r = sse41::_mm_cvtepi32_epi64(a); + let e = i64x2::splat(10); + assert_eq!(r, e); + let a = i32x4::splat(-10); + let r = sse41::_mm_cvtepi32_epi64(a); + let e = i64x2::splat(-10); + assert_eq!(r, e); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_cvtepu8_epi16() { + let a = u8x16::splat(10); + let r = sse41::_mm_cvtepu8_epi16(a); + let e = i16x8::splat(10); + assert_eq!(r, e); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_cvtepu8_epi32() { + let a = u8x16::splat(10); + let r = sse41::_mm_cvtepu8_epi32(a); + let e = i32x4::splat(10); + assert_eq!(r, e); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_cvtepu8_epi64() { + let a = u8x16::splat(10); + let r = sse41::_mm_cvtepu8_epi64(a); + let e = i64x2::splat(10); + assert_eq!(r, e); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_cvtepu16_epi32() { + let a = u16x8::splat(10); + let r = sse41::_mm_cvtepu16_epi32(a); + let e = i32x4::splat(10); + assert_eq!(r, e); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_cvtepu16_epi64() { + let a = u16x8::splat(10); + let r = sse41::_mm_cvtepu16_epi64(a); + let e = i64x2::splat(10); + assert_eq!(r, e); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_cvtepu32_epi64() { + let a = u32x4::splat(10); + let r = sse41::_mm_cvtepu32_epi64(a); + let e = i64x2::splat(10); + assert_eq!(r, e); + } + + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 + #[simd_test = "sse4.1"] + unsafe fn _mm_testz_si128() { + let a = __m128i::splat(1); + let mask = __m128i::splat(0); + let r = sse41::_mm_testz_si128(a, mask); + assert_eq!(r, 1); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b110); + let r = sse41::_mm_testz_si128(a, mask); + assert_eq!(r, 0); + let a = __m128i::splat(0b011); + let mask = __m128i::splat(0b100); + let r = sse41::_mm_testz_si128(a, mask); + assert_eq!(r, 1); + } + + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 + #[simd_test = "sse4.1"] + unsafe fn _mm_testc_si128() { + let a = __m128i::splat(-1); + let mask = __m128i::splat(0); + let r = sse41::_mm_testc_si128(a, mask); + assert_eq!(r, 1); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b110); + let r = sse41::_mm_testc_si128(a, mask); + assert_eq!(r, 0); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b100); + let r = sse41::_mm_testc_si128(a, mask); + assert_eq!(r, 1); + } + + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 + #[simd_test = "sse4.1"] + unsafe fn _mm_testnzc_si128() { + let a = __m128i::splat(0); + let mask = __m128i::splat(1); + let r = sse41::_mm_testnzc_si128(a, mask); + assert_eq!(r, 0); + let a = __m128i::splat(-1); + let mask = __m128i::splat(0); + let r = sse41::_mm_testnzc_si128(a, mask); + assert_eq!(r, 0); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b110); + let r = sse41::_mm_testnzc_si128(a, mask); + assert_eq!(r, 1); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b101); + let r = sse41::_mm_testnzc_si128(a, mask); + assert_eq!(r, 0); + } + + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 + #[simd_test = "sse4.1"] + unsafe fn _mm_test_all_zeros() { + let a = __m128i::splat(1); + let mask = __m128i::splat(0); + let r = sse41::_mm_test_all_zeros(a, mask); + assert_eq!(r, 1); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b110); + let r = sse41::_mm_test_all_zeros(a, mask); + assert_eq!(r, 0); + let a = __m128i::splat(0b011); + let mask = __m128i::splat(0b100); + let r = sse41::_mm_test_all_zeros(a, mask); + assert_eq!(r, 1); + } + + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 + #[simd_test = "sse4.1"] + unsafe fn _mm_test_all_ones() { + let a = __m128i::splat(-1); + let r = sse41::_mm_test_all_ones(a); + assert_eq!(r, 1); + let a = __m128i::splat(0b101); + let r = sse41::_mm_test_all_ones(a); + assert_eq!(r, 0); + } + + #[cfg(all(target_arch = "x86_64", not(target_feature = "sse2")))] // i586 + #[simd_test = "sse4.1"] + unsafe fn _mm_test_mix_ones_zeros() { + let a = __m128i::splat(0); + let mask = __m128i::splat(1); + let r = sse41::_mm_test_mix_ones_zeros(a, mask); + assert_eq!(r, 0); + let a = __m128i::splat(-1); + let mask = __m128i::splat(0); + let r = sse41::_mm_test_mix_ones_zeros(a, mask); + assert_eq!(r, 0); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b110); + let r = sse41::_mm_test_mix_ones_zeros(a, mask); + assert_eq!(r, 1); + let a = __m128i::splat(0b101); + let mask = __m128i::splat(0b101); + let r = sse41::_mm_test_mix_ones_zeros(a, mask); + assert_eq!(r, 0); + } + #[simd_test = "sse4.1"] unsafe fn _mm_dp_pd() { let a = f64x2::new(2.0, 3.0); @@ -1165,27 +1571,84 @@ mod tests { #[simd_test = "sse4.1"] unsafe fn _mm_mul_epi32() { - let a = - i32x4::new(15, 2 /* ignored */, 1234567, 4 /* ignored */); - let b = i32x4::new( - -20, - -256, /* ignored */ - 666666, - 666666, /* ignored */ - ); - let r = sse41::_mm_mul_epi32(a, b); - let e = i64x2::new(-300, 823043843622); - assert_eq!(r, e); + { + let a = i32x4::new(1, 1, 1, 1); + let b = i32x4::new(1, 2, 3, 4); + let r = sse41::_mm_mul_epi32(a, b); + let e = i64x2::new(1, 3); + assert_eq!(r, e); + } + { + let a = i32x4::new( + 15, + 2, /* ignored */ + 1234567, + 4, /* ignored */ + ); + let b = i32x4::new( + -20, + -256, /* ignored */ + 666666, + 666666, /* ignored */ + ); + let r = sse41::_mm_mul_epi32(a, b); + let e = i64x2::new(-300, 823043843622); + assert_eq!(r, e); + } } #[simd_test = "sse4.1"] unsafe fn _mm_mullo_epi32() { - let a = i32x4::new(15, -2, 1234567, 99999); - let b = i32x4::new(-20, -256, 666666, -99999); - let r = sse41::_mm_mullo_epi32(a, b); - // Attention, most significant bit in r[2] is treated as a sign bit! - // 1234567 * 666666 = -1589877210 - let e = i32x4::new(-300, 512, -1589877210, -1409865409); + { + let a = i32x4::new(1, 1, 1, 1); + let b = i32x4::new(1, 2, 3, 4); + let r = sse41::_mm_mullo_epi32(a, b); + let e = i32x4::new(1, 2, 3, 4); + assert_eq!(r, e); + } + { + let a = i32x4::new(15, -2, 1234567, 99999); + let b = i32x4::new(-20, -256, 666666, -99999); + let r = sse41::_mm_mullo_epi32(a, b); + // Attention, most significant bit in r[2] is treated + // as a sign bit: + // 1234567 * 666666 = -1589877210 + let e = i32x4::new(-300, 512, -1589877210, -1409865409); + assert_eq!(r, e); + } + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_minpos_epu16() { + let a = u16x8::new(8, 7, 6, 5, 4, 1, 2, 3); + let r = sse41::_mm_minpos_epu16(a); + let e = u16x8::splat(0).replace(0, 1).replace(1, 5); + assert_eq!(r, e); + } + + #[simd_test = "sse4.1"] + unsafe fn _mm_mpsadbw_epu8() { + let a = + u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + + let r = sse41::_mm_mpsadbw_epu8(a, a, 0b000); + let e = u16x8::new(0, 4, 8, 12, 16, 20, 24, 28); + assert_eq!(r, e); + + let r = sse41::_mm_mpsadbw_epu8(a, a, 0b001); + let e = u16x8::new(16, 12, 8, 4, 0, 4, 8, 12); + assert_eq!(r, e); + + let r = sse41::_mm_mpsadbw_epu8(a, a, 0b100); + let e = u16x8::new(16, 20, 24, 28, 32, 36, 40, 44); + assert_eq!(r, e); + + let r = sse41::_mm_mpsadbw_epu8(a, a, 0b101); + let e = u16x8::new(0, 4, 8, 12, 16, 20, 24, 28); + assert_eq!(r, e); + + let r = sse41::_mm_mpsadbw_epu8(a, a, 0b111); + let e = u16x8::new(32, 28, 24, 20, 16, 12, 8, 4); assert_eq!(r, e); } }