16 #ifndef included_vector_neon_h 17 #define included_vector_neon_h 21 #define u16x8_sub_saturate(a,b) vsubq_u16(a,b) 22 #define i16x8_sub_saturate(a,b) vsubq_s16(a,b) 28 uint8x16_t mask_shift =
29 { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
30 uint8x16_t mask_and = vdupq_n_u8 (0x80);
31 x = vandq_u8 (x, mask_and);
32 x = vshlq_u8 (x, vreinterpretq_s8_u8 (mask_shift));
36 return vgetq_lane_u8 (x, 0) | (vgetq_lane_u8 (x, 1) << 8);
42 u8x16 vall_one = vdupq_n_u8 (0x0);
43 u8x16 res_values = { 0x01, 0x02, 0x04, 0x08,
44 0x10, 0x20, 0x40, 0x80,
45 0x01, 0x02, 0x04, 0x08,
46 0x10, 0x20, 0x40, 0x80
51 vreinterpretq_u8_u16 (vceqq_u16 (input, vreinterpretq_u16_u8 (vall_one)));
52 u8x16 before_merge = vminq_u8 (test_result, res_values);
56 u16x8 merge1 = vpaddlq_u8 (before_merge);
58 u32x4 merge2 = vpaddlq_u16 (merge1);
60 u64x2 merge3 = vpaddlq_u32 (merge2);
62 return (
u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0);
84 #define foreach_neon_vec128i \ 85 _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64) 86 #define foreach_neon_vec128u \ 87 _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64) 88 #define foreach_neon_vec128f \ 89 _(f,32,4,f32) _(f,64,2,f64) 91 #define _(t, s, c, i) \ 92 static_always_inline t##s##x##c \ 93 t##s##x##c##_splat (t##s x) \ 94 { return (t##s##x##c) vdupq_n_##i (x); } \ 96 static_always_inline t##s##x##c \ 97 t##s##x##c##_load_unaligned (void *p) \ 98 { return (t##s##x##c) vld1q_##i (p); } \ 100 static_always_inline void \ 101 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \ 102 { vst1q_##i (p, v); } \ 104 static_always_inline int \ 105 t##s##x##c##_is_all_zero (t##s##x##c x) \ 106 { return !(vaddvq_##i (x)); } \ 108 static_always_inline int \ 109 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ 110 { return t##s##x##c##_is_all_zero (a ^ b); } \ 112 static_always_inline int \ 113 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ 114 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \ 124 return (u16x8) vrev16q_u8 ((u8x16) v);
130 return (u8x16) vqtbl1q_u8 (v, m);
136 return (
u32x4) vpaddq_u32 (v1, v2);
142 return vmovl_u32 (vget_low_u32 (v));
148 return vmovl_high_u32 (v);
151 #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE 152 #define CLIB_VEC128_SPLAT_DEFINED #define foreach_neon_vec128i
static_always_inline u64x2 u32x4_extend_to_u64x2_high(u32x4 v)
static_always_inline u32x4 u32x4_hadd(u32x4 v1, u32x4 v2)
#define static_always_inline
static u32 u32x4_zero_byte_mask(u32x4 input)
epu8_epi32 epu16_epi32 u64x2
static u32 u16x8_zero_byte_mask(u16x8 input)
static u32 u8x16_zero_byte_mask(u8x16 input)
static_always_inline u64x2 u32x4_extend_to_u64x2(u32x4 v)
#define foreach_neon_vec128u
static_always_inline u8x16 u8x16_shuffle(u8x16 v, u8x16 m)
foreach_neon_vec128i foreach_neon_vec128u static_always_inline u16x8 u16x8_byte_swap(u16x8 v)
static u32 u64x2_zero_byte_mask(u64x2 input)
static u32 u8x16_compare_byte_mask(u8x16 x)