48 #ifndef included_clib_memcpy_sse3_h 49 #define included_clib_memcpy_sse3_h 52 #include <x86intrin.h> 59 xmm0 = _mm_loadu_si128((
const __m128i *)src);
60 _mm_storeu_si128((__m128i *)dst, xmm0);
102 #define CLIB_MVUNALIGN_LEFT47_IMM(dst, src, len, offset) \ 105 while (len >= 128 + 16 - offset) { \ 106 xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \ 108 xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \ 109 xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \ 110 xmm3 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 3 * 16)); \ 111 xmm4 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 4 * 16)); \ 112 xmm5 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 5 * 16)); \ 113 xmm6 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 6 * 16)); \ 114 xmm7 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 7 * 16)); \ 115 xmm8 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 8 * 16)); \ 116 src = (const u8 *)src + 128; \ 117 _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \ 118 _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \ 119 _mm_storeu_si128((__m128i *)((u8 *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \ 120 _mm_storeu_si128((__m128i *)((u8 *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \ 121 _mm_storeu_si128((__m128i *)((u8 *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \ 122 _mm_storeu_si128((__m128i *)((u8 *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \ 123 _mm_storeu_si128((__m128i *)((u8 *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \ 124 _mm_storeu_si128((__m128i *)((u8 *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \ 125 dst = (u8 *)dst + 128; \ 128 len = ((len - 16 + offset) & 127) + 16 - offset; \ 130 src = (const u8 *)src + tmp; \ 131 dst = (u8 *)dst + tmp; \ 132 if (len >= 32 + 16 - offset) { \ 133 while (len >= 32 + 16 - offset) { \ 134 xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \ 136 xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \ 137 xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \ 138 src = (const u8 *)src + 32; \ 139 _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \ 140 _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \ 141 dst = (u8 *)dst + 32; \ 144 len = ((len - 16 + offset) & 31) + 16 - offset; \ 146 src = (const u8 *)src + tmp; \ 147 dst = (u8 *)dst + tmp; \ 163 #define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset) \ 166 case 0x01: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x01); break; \ 167 case 0x02: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x02); break; \ 168 case 0x03: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x03); break; \ 169 case 0x04: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x04); break; \ 170 case 0x05: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x05); break; \ 171 case 0x06: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x06); break; \ 172 case 0x07: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x07); break; \ 173 case 0x08: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x08); break; \ 174 case 0x09: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x09); break; \ 175 case 0x0A: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0A); break; \ 176 case 0x0B: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0B); break; \ 177 case 0x0C: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0C); break; \ 178 case 0x0D: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0D); break; \ 179 case 0x0E: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0E); break; \ 180 case 0x0F: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0F); break; \ 188 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
200 *(
u8 *)dstu = *(
const u8 *)srcu;
201 srcu = (
uword)((
const u8 *)srcu + 1);
202 dstu = (
uword)((
u8 *)dstu + 1);
205 *(
u16 *)dstu = *(
const u16 *)srcu;
206 srcu = (
uword)((
const u16 *)srcu + 1);
210 *(
u32 *)dstu = *(
const u32 *)srcu;
211 srcu = (
uword)((
const u32 *)srcu + 1);
215 *(
u64 *)dstu = *(
const u64 *)srcu;
240 goto COPY_BLOCK_128_BACK15;
247 src = (
const u8 *)src + 256;
248 dst = (
u8 *)dst + 256;
250 COPY_BLOCK_255_BACK15:
254 src = (
const u8 *)src + 128;
255 dst = (
u8 *)dst + 128;
257 COPY_BLOCK_128_BACK15:
261 src = (
const u8 *)src + 64;
262 dst = (
u8 *)dst + 64;
264 COPY_BLOCK_64_BACK15:
268 src = (
const u8 *)src + 32;
269 dst = (
u8 *)dst + 32;
288 dstofss = 16 - ((
uword)dst & 0x0F) + 16;
291 src = (
const u8 *)src + dstofss;
292 dst = (
u8 *)dst + dstofss;
293 srcofs = ((
uword)src & 0x0F);
302 for (; n >= 256; n -= 256) {
304 dst = (
u8 *)dst + 256;
305 src = (
const u8 *)src + 256;
311 goto COPY_BLOCK_255_BACK15;
322 goto COPY_BLOCK_64_BACK15;
326 #undef CLIB_MVUNALIGN_LEFT47_IMM 327 #undef CLIB_MVUNALIGN_LEFT47 #define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset)
Macro for copying unaligned block from one location to another, 47 bytes leftover maximum...
static void clib_mov64(u8 *dst, const u8 *src)
static void clib_mov128(u8 *dst, const u8 *src)
static void clib_mov16(u8 *dst, const u8 *src)
static void clib_mov256(u8 *dst, const u8 *src)
static void clib_mov32(u8 *dst, const u8 *src)
static void * clib_memcpy(void *dst, const void *src, size_t n)