16 #ifndef included_vector_neon_h 17 #define included_vector_neon_h 21 #define u16x8_sub_saturate(a,b) vsubq_u16(a,b) 22 #define i16x8_sub_saturate(a,b) vsubq_s16(a,b) 24 #define vreinterpretq_u8_u8(a) a 30 uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
31 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
34 uint8x16_t x = vandq_u8 (v, mask);
37 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
39 return (
u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
43 #define foreach_neon_vec128i \ 44 _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64) 45 #define foreach_neon_vec128u \ 46 _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64) 47 #define foreach_neon_vec128f \ 48 _(f,32,4,f32) _(f,64,2,f64) 50 #define _(t, s, c, i) \ 51 static_always_inline t##s##x##c \ 52 t##s##x##c##_splat (t##s x) \ 53 { return (t##s##x##c) vdupq_n_##i (x); } \ 55 static_always_inline t##s##x##c \ 56 t##s##x##c##_load_unaligned (void *p) \ 57 { return (t##s##x##c) vld1q_##i (p); } \ 59 static_always_inline void \ 60 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \ 61 { vst1q_##i (p, v); } \ 63 static_always_inline int \ 64 t##s##x##c##_is_all_zero (t##s##x##c x) \ 65 { return !(vaddvq_##i (x)); } \ 67 static_always_inline int \ 68 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ 69 { return t##s##x##c##_is_all_zero (a ^ b); } \ 71 static_always_inline int \ 72 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ 73 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \ 75 static_always_inline u32 \ 76 t##s##x##c##_zero_byte_mask (t##s##x##c x) \ 77 { uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i(0), x)); \ 78 return u8x16_compare_byte_mask (v); } \ 88 return (u16x8) vrev16q_u8 ((u8x16) v);
94 return (u8x16) vqtbl1q_u8 (v, m);
100 return (
u32x4) vpaddq_u32 (v1, v2);
106 return vmovl_u32 (vget_low_u32 (v));
112 return vmovl_high_u32 (v);
120 { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
122 uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
125 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
127 return (
u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
130 #define CLIB_HAVE_VEC128_MSB_MASK 132 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE 133 #define CLIB_VEC128_SPLAT_DEFINED #define foreach_neon_vec128i
static_always_inline u64x2 u32x4_extend_to_u64x2_high(u32x4 v)
static_always_inline u32x4 u32x4_hadd(u32x4 v1, u32x4 v2)
#define static_always_inline
epu8_epi32 epu16_epi32 u64x2
static_always_inline u16 u8x16_msb_mask(u8x16 v)
static u32 u8x16_compare_byte_mask(u8x16 v)
static_always_inline u64x2 u32x4_extend_to_u64x2(u32x4 v)
#define foreach_neon_vec128u
static_always_inline u8x16 u8x16_shuffle(u8x16 v, u8x16 m)
foreach_neon_vec128i foreach_neon_vec128u static_always_inline u16x8 u16x8_byte_swap(u16x8 v)