48 #ifndef included_clib_memcpy_sse3_h 49 #define included_clib_memcpy_sse3_h 52 #include <x86intrin.h> 64 xmm0 = _mm_loadu_si128 ((
const __m128i *) src);
65 _mm_storeu_si128 ((__m128i *) dst, xmm0);
107 #define CLIB_MVUNALIGN_LEFT47_IMM(dst, src, len, offset) \ 110 while (len >= 128 + 16 - offset) { \ 111 xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \ 113 xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \ 114 xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \ 115 xmm3 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 3 * 16)); \ 116 xmm4 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 4 * 16)); \ 117 xmm5 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 5 * 16)); \ 118 xmm6 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 6 * 16)); \ 119 xmm7 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 7 * 16)); \ 120 xmm8 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 8 * 16)); \ 121 src = (const u8 *)src + 128; \ 122 _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \ 123 _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \ 124 _mm_storeu_si128((__m128i *)((u8 *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \ 125 _mm_storeu_si128((__m128i *)((u8 *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \ 126 _mm_storeu_si128((__m128i *)((u8 *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \ 127 _mm_storeu_si128((__m128i *)((u8 *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \ 128 _mm_storeu_si128((__m128i *)((u8 *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \ 129 _mm_storeu_si128((__m128i *)((u8 *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \ 130 dst = (u8 *)dst + 128; \ 133 len = ((len - 16 + offset) & 127) + 16 - offset; \ 135 src = (const u8 *)src + tmp; \ 136 dst = (u8 *)dst + tmp; \ 137 if (len >= 32 + 16 - offset) { \ 138 while (len >= 32 + 16 - offset) { \ 139 xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \ 141 xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \ 142 xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \ 143 src = (const u8 *)src + 32; \ 144 _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \ 145 _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \ 146 dst = (u8 *)dst + 32; \ 149 len = ((len - 16 + offset) & 31) + 16 - offset; \ 151 src = (const u8 *)src + tmp; \ 152 dst = (u8 *)dst + tmp; \ 168 #define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset) \ 171 case 0x01: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x01); break; \ 172 case 0x02: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x02); break; \ 173 case 0x03: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x03); break; \ 174 case 0x04: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x04); break; \ 175 case 0x05: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x05); break; \ 176 case 0x06: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x06); break; \ 177 case 0x07: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x07); break; \ 178 case 0x08: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x08); break; \ 179 case 0x09: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x09); break; \ 180 case 0x0A: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0A); break; \ 181 case 0x0B: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0B); break; \ 182 case 0x0C: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0C); break; \ 183 case 0x0D: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0D); break; \ 184 case 0x0E: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0E); break; \ 185 case 0x0F: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0F); break; \ 193 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
207 *(
u8 *) dstu = *(
const u8 *) srcu;
208 srcu = (
uword) ((
const u8 *) srcu + 1);
209 dstu = (
uword) ((
u8 *) dstu + 1);
213 *(
u16 *) dstu = *(
const u16 *) srcu;
214 srcu = (
uword) ((
const u16 *) srcu + 1);
219 *(
u32 *) dstu = *(
const u32 *) srcu;
220 srcu = (
uword) ((
const u32 *) srcu + 1);
225 *(
u64 *) dstu = *(
const u64 *) srcu;
254 goto COPY_BLOCK_128_BACK15;
263 src = (
const u8 *) src + 256;
264 dst = (
u8 *) dst + 256;
266 COPY_BLOCK_255_BACK15:
271 src = (
const u8 *) src + 128;
272 dst = (
u8 *) dst + 128;
274 COPY_BLOCK_128_BACK15:
279 src = (
const u8 *) src + 64;
280 dst = (
u8 *) dst + 64;
282 COPY_BLOCK_64_BACK15:
287 src = (
const u8 *) src + 32;
288 dst = (
u8 *) dst + 32;
309 dstofss = (
uword) dst & 0x0F;
312 dstofss = 16 - dstofss + 16;
315 src = (
const u8 *) src + dstofss;
316 dst = (
u8 *) dst + dstofss;
318 srcofs = ((
uword) src & 0x0F);
328 for (; n >= 256; n -= 256)
331 dst = (
u8 *) dst + 256;
332 src = (
const u8 *) src + 256;
338 goto COPY_BLOCK_255_BACK15;
349 goto COPY_BLOCK_64_BACK15;
356 #undef CLIB_MVUNALIGN_LEFT47_IMM 357 #undef CLIB_MVUNALIGN_LEFT47 #define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset)
Macro for copying unaligned block from one location to another, 47 bytes leftover maximum...
static void clib_mov64(u8 *dst, const u8 *src)
static void clib_mov128(u8 *dst, const u8 *src)
static void clib_mov16(u8 *dst, const u8 *src)
static void clib_mov256(u8 *dst, const u8 *src)
static void clib_mov32(u8 *dst, const u8 *src)
static void * clib_memcpy_fast(void *dst, const void *src, size_t n)