|
#define | foreach_sse42_vec128i _(i,8,16,epi8) _(i,16,8,epi16) _(i,32,4,epi32) _(i,64,2,epi64x) |
|
#define | foreach_sse42_vec128u _(u,8,16,epi8) _(u,16,8,epi16) _(u,32,4,epi32) _(u,64,2,epi64x) |
|
#define | foreach_sse42_vec128f _(f,32,4,ps) _(f,64,2,pd) |
|
#define | CLIB_VEC128_SPLAT_DEFINED |
|
#define | CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE |
|
#define | u8x16_word_shift_left(a, n) (u8x16) _mm_slli_si128((__m128i) a, n) |
|
#define | u8x16_word_shift_right(a, n) (u8x16) _mm_srli_si128((__m128i) a, n) |
|
#define | i8x16_word_shift_left(a, n) ((i8x16) u8x16_word_shift_left((u8x16) (a), (n))) |
|
#define | i8x16_word_shift_right(a, n) ((i8x16) u8x16_word_shift_right((u8x16) (a), (n))) |
|
#define | u16x8_word_shift_left(a, n) ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16))) |
|
#define | i16x8_word_shift_left(a, n) ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16))) |
|
#define | u16x8_word_shift_right(a, n) ((u16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16))) |
|
#define | i16x8_word_shift_right(a, n) ((i16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16))) |
|
#define | u32x4_word_shift_left(a, n) ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32))) |
|
#define | i32x4_word_shift_left(a, n) ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32))) |
|
#define | u32x4_word_shift_right(a, n) ((u32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32))) |
|
#define | i32x4_word_shift_right(a, n) ((i32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32))) |
|
#define | u64x2_word_shift_left(a, n) ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64))) |
|
#define | i64x2_word_shift_left(a, n) ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64))) |
|
#define | u64x2_word_shift_right(a, n) ((u64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64))) |
|
#define | i64x2_word_shift_right(a, n) ((i64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64))) |
|
#define | u32x4_select(A, MASK) |
|
#define | u32x4_splat_word(x, i) |
|
#define | u8x16_align_right(a, b, imm) (u8x16) _mm_alignr_epi8 ((__m128i) a, (__m128i) b, imm) |
|
#define | CLIB_HAVE_VEC128_MSB_MASK |
|
|
static u8x16 | u8x16_interleave_hi (u8x16 a, u8x16 b) |
|
static u8x16 | u8x16_interleave_lo (u8x16 a, u8x16 b) |
|
static u16x8 | u16x8_interleave_hi (u16x8 a, u16x8 b) |
|
static u16x8 | u16x8_interleave_lo (u16x8 a, u16x8 b) |
|
static u32x4 | u32x4_interleave_hi (u32x4 a, u32x4 b) |
|
static u32x4 | u32x4_interleave_lo (u32x4 a, u32x4 b) |
|
static u64x2 | u64x2_interleave_hi (u64x2 a, u64x2 b) |
|
static u64x2 | u64x2_interleave_lo (u64x2 a, u64x2 b) |
|
static u8x8 | u8x8_interleave_hi (u8x8 a, u8x8 b) |
|
static u8x8 | u8x8_interleave_lo (u8x8 a, u8x8 b) |
|
static u16x4 | u16x4_interleave_hi (u16x4 a, u16x4 b) |
|
static u16x4 | u16x4_interleave_lo (u16x4 a, u16x4 b) |
|
static u32x2 | u32x2_interleave_hi (u32x2 a, u32x2 b) |
|
static u32x2 | u32x2_interleave_lo (u32x2 a, u32x2 b) |
|
static u8x16 | u16x8_pack (u16x8 lo, u16x8 hi) |
|
static i8x16 | i16x8_pack (i16x8 lo, i16x8 hi) |
|
static u16x8 | u32x4_pack (u32x4 lo, u32x4 hi) |
|
static u8x8 | u16x4_pack (u16x4 lo, u16x4 hi) |
|
static i8x8 | i16x4_pack (i16x4 lo, i16x4 hi) |
|
static u16x4 | u32x2_pack (u32x2 lo, u32x2 hi) |
|
static i16x4 | i32x2_pack (i32x2 lo, i32x2 hi) |
|
static u64x2 | u64x2_read_lo (u64x2 x, u64 *a) |
|
static u64x2 | u64x2_read_hi (u64x2 x, u64 *a) |
|
static void | u64x2_write_lo (u64x2 x, u64 *a) |
|
static void | u64x2_write_hi (u64x2 x, u64 *a) |
|
adds_epu static subs_epu i16x8 | i16x8_mul_lo (i16x8 x, i16x8 y) |
|
static u16x8 | u16x8_mul_lo (u16x8 x, u16x8 y) |
|
static i16x8 | i16x8_mul_hi (i16x8 x, i16x8 y) |
|
static u16x8 | u16x8_mul_hi (u16x8 x, u16x8 y) |
|
static u32x2 | u32x2_shift_left (u32x2 x, u32x2 i) |
|
static u16x4 | u16x4_shift_right (u16x4 x, u16x4 i) |
|
static u32x2 | u32x2_shift_right (u32x2 x, u32x2 i) |
|
static i16x4 | i16x4_shift_left (i16x4 x, i16x4 i) |
|
static i32x2 | i32x2_shift_left (i32x2 x, i32x2 i) |
|
static i16x4 | i16x4_shift_right (i16x4 x, i16x4 i) |
|
static i32x2 | i32x2_shift_right (i32x2 x, i32x2 i) |
|
static u32 | u32x4_get0 (u32x4 x) |
|
static u32x4 | u32x4_set0 (u32 x) |
|
static i32x4 | i32x4_set0 (i32 x) |
|
static i32 | i32x4_get0 (i32x4 x) |
|
static u32 | u8x16_compare_byte_mask (u8x16 x) |
|
static u32 | u32x4_compare_word_mask (u32x4 x) |
|
static u32 | u8x16_zero_byte_mask (u8x16 x) |
|
static u32 | u16x8_zero_byte_mask (u16x8 x) |
|
static u32 | u32x4_zero_byte_mask (u32x4 x) |
|
static u32 | u8x16_max_scalar (u8x16 x) |
|
static u8 | u8x16_min_scalar (u8x16 x) |
|
static i16 | i16x8_max_scalar (i16x8 x) |
|
static i16 | i16x8_min_scalar (i16x8 x) |
|
static_always_inline u32 | u32x4_min_scalar (u32x4 v) |
|
static_always_inline u32 | u32x4_max_scalar (u32x4 v) |
|
static_always_inline u32 | i32x4_min_scalar (i32x4 v) |
|
static_always_inline u32 | i32x4_max_scalar (i32x4 v) |
|
static_always_inline u16 | u8x16_msb_mask (u8x16 v) |
|
static_always_inline u32x4 | u32x4_byte_swap (u32x4 v) |
|
static_always_inline u16x8 | u16x8_byte_swap (u16x8 v) |
|
static_always_inline u32x4 | u32x4_hadd (u32x4 v1, u32x4 v2) |
|
static_always_inline u8x16 | u8x16_shuffle (u8x16 v, u8x16 m) |
|
static_always_inline u32x4 | u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d) |
|
epu8_epi32 epu16_epi32 epu32_epi64 epi8_epi32 epi16_epi32 epi32_epi64 static_always_inline u64x2 | u64x2_gather (void *p0, void *p1) |
|
static_always_inline u32x4 | u32x4_gather (void *p0, void *p1, void *p2, void *p3) |
|
static_always_inline void | u64x2_scatter (u64x2 r, void *p0, void *p1) |
|
static_always_inline void | u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3) |
|
static_always_inline void | u64x2_scatter_one (u64x2 r, int index, void *p) |
|
static_always_inline void | u32x4_scatter_one (u32x4 r, int index, void *p) |
|
static_always_inline u8x16 | u8x16_is_greater (u8x16 v1, u8x16 v2) |
|
static_always_inline u8x16 | u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask) |
|