Go to the source code of this file.
|
#define | u16x8_sub_saturate(a, b) vsubq_u16(a,b) |
|
#define | i16x8_sub_saturate(a, b) vsubq_s16(a,b) |
|
#define | vreinterpretq_u8_u8(a) a |
|
#define | vminvq_u64(x) |
|
#define | foreach_neon_vec128i _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64) |
|
#define | foreach_neon_vec128u _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64) |
|
#define | foreach_neon_vec128f _(f,32,4,f32) _(f,64,2,f64) |
|
#define | u8x16_word_shift_left(x, n) vextq_u8(u8x16_splat (0), x, 16 - n) |
|
#define | u8x16_word_shift_right(x, n) vextq_u8(x, u8x16_splat (0), n) |
|
#define | CLIB_HAVE_VEC128_MSB_MASK |
|
#define | CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE |
|
#define | CLIB_VEC128_SPLAT_DEFINED |
|
|
static u32 | u8x16_compare_byte_mask (u8x16 v) |
|
foreach_neon_vec128i foreach_neon_vec128u static_always_inline u16x8 | u16x8_byte_swap (u16x8 v) |
|
static_always_inline u32x4 | u32x4_byte_swap (u32x4 v) |
|
static_always_inline u8x16 | u8x16_shuffle (u8x16 v, u8x16 m) |
|
static_always_inline u32x4 | u32x4_hadd (u32x4 v1, u32x4 v2) |
|
static_always_inline u64x2 | u32x4_extend_to_u64x2 (u32x4 v) |
|
static_always_inline u64x2 | u32x4_extend_to_u64x2_high (u32x4 v) |
|
static_always_inline u16 | u8x16_msb_mask (u8x16 v) |
|
static_always_inline u64x2 | u64x2_gather (void *p0, void *p1) |
|
static_always_inline u32x4 | u32x4_gather (void *p0, void *p1, void *p2, void *p3) |
|
static_always_inline void | u64x2_scatter (u64x2 r, void *p0, void *p1) |
|
static_always_inline void | u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3) |
|
static_always_inline u32 | u32x4_min_scalar (u32x4 v) |
|
static_always_inline u8x16 | u8x16_reflect (u8x16 v) |
|
static_always_inline u8x16 | u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c) |
|
◆ CLIB_HAVE_VEC128_MSB_MASK
#define CLIB_HAVE_VEC128_MSB_MASK |
◆ CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE |
◆ CLIB_VEC128_SPLAT_DEFINED
#define CLIB_VEC128_SPLAT_DEFINED |
◆ foreach_neon_vec128f
#define foreach_neon_vec128f _(f,32,4,f32) _(f,64,2,f64) |
◆ foreach_neon_vec128i
#define foreach_neon_vec128i _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64) |
◆ foreach_neon_vec128u
#define foreach_neon_vec128u _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64) |
◆ i16x8_sub_saturate
#define i16x8_sub_saturate |
( |
|
a, |
|
|
|
b |
|
) |
| vsubq_s16(a,b) |
◆ u16x8_sub_saturate
#define u16x8_sub_saturate |
( |
|
a, |
|
|
|
b |
|
) |
| vsubq_u16(a,b) |
◆ u8x16_word_shift_left
#define u8x16_word_shift_left |
( |
|
x, |
|
|
|
n |
|
) |
| vextq_u8(u8x16_splat (0), x, 16 - n) |
◆ u8x16_word_shift_right
#define u8x16_word_shift_right |
( |
|
x, |
|
|
|
n |
|
) |
| vextq_u8(x, u8x16_splat (0), n) |
◆ vminvq_u64
Value:({ \
u64 x0 = vgetq_lane_u64(x, 0); \
u64 x1 = vgetq_lane_u64(x, 1); \
x0 < x1 ? x0 : x1; \
})
Definition at line 26 of file vector_neon.h.
◆ vreinterpretq_u8_u8
#define vreinterpretq_u8_u8 |
( |
|
a | ) |
a |
◆ u16x8_byte_swap()
◆ u32x4_byte_swap()
◆ u32x4_extend_to_u64x2()
◆ u32x4_extend_to_u64x2_high()
◆ u32x4_gather()
◆ u32x4_hadd()
◆ u32x4_min_scalar()
◆ u32x4_scatter()
◆ u64x2_gather()
◆ u64x2_scatter()
◆ u8x16_compare_byte_mask()
static u32 u8x16_compare_byte_mask |
( |
u8x16 |
v | ) |
|
|
inlinestatic |
◆ u8x16_msb_mask()
◆ u8x16_reflect()
◆ u8x16_shuffle()
◆ u8x16_xor3()