FD.io VPP  v16.06
Vector Packet Processing
memcpy_sse3.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*-
16  * BSD LICENSE
17  *
18  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19  * All rights reserved.
20  *
21  * Redistribution and use in source and binary forms, with or without
22  * modification, are permitted provided that the following conditions
23  * are met:
24  *
25  * * Redistributions of source code must retain the above copyright
26  * notice, this list of conditions and the following disclaimer.
27  * * Redistributions in binary form must reproduce the above copyright
28  * notice, this list of conditions and the following disclaimer in
29  * the documentation and/or other materials provided with the
30  * distribution.
31  * * Neither the name of Intel Corporation nor the names of its
32  * contributors may be used to endorse or promote products derived
33  * from this software without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47 
48 #ifndef included_clib_memcpy_sse3_h
49 #define included_clib_memcpy_sse3_h
50 
51 #include <stdint.h>
52 #include <x86intrin.h>
53 
54 static inline void
55 clib_mov16(u8 *dst, const u8 *src)
56 {
57  __m128i xmm0;
58 
59  xmm0 = _mm_loadu_si128((const __m128i *)src);
60  _mm_storeu_si128((__m128i *)dst, xmm0);
61 }
62 
63 static inline void
64 clib_mov32(u8 *dst, const u8 *src)
65 {
66  clib_mov16((u8 *)dst + 0 * 16, (const u8 *)src + 0 * 16);
67  clib_mov16((u8 *)dst + 1 * 16, (const u8 *)src + 1 * 16);
68 }
69 
70 static inline void
71 clib_mov64(u8 *dst, const u8 *src)
72 {
73  clib_mov32((u8 *)dst + 0 * 32, (const u8 *)src + 0 * 32);
74  clib_mov32((u8 *)dst + 1 * 32, (const u8 *)src + 1 * 32);
75 }
76 
77 static inline void
78 clib_mov128(u8 *dst, const u8 *src)
79 {
80  clib_mov64((u8 *)dst + 0 * 64, (const u8 *)src + 0 * 64);
81  clib_mov64((u8 *)dst + 1 * 64, (const u8 *)src + 1 * 64);
82 }
83 
84 static inline void
85 clib_mov256(u8 *dst, const u8 *src)
86 {
87  clib_mov128((u8 *)dst + 0 * 128, (const u8 *)src + 0 * 128);
88  clib_mov128((u8 *)dst + 1 * 128, (const u8 *)src + 1 * 128);
89 }
90 
91 /**
92  * Macro for copying unaligned block from one location to another with constant load offset,
93  * 47 bytes leftover maximum,
94  * locations should not overlap.
95  * Requirements:
96  * - Store is aligned
97  * - Load offset is <offset>, which must be immediate value within [1, 15]
98  * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
99  * - <dst>, <src>, <len> must be variables
100  * - __m128i <xmm0> ~ <xmm8> must be pre-defined
101  */
102 #define CLIB_MVUNALIGN_LEFT47_IMM(dst, src, len, offset) \
103 ({ \
104  int tmp; \
105  while (len >= 128 + 16 - offset) { \
106  xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
107  len -= 128; \
108  xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
109  xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
110  xmm3 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 3 * 16)); \
111  xmm4 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 4 * 16)); \
112  xmm5 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 5 * 16)); \
113  xmm6 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 6 * 16)); \
114  xmm7 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 7 * 16)); \
115  xmm8 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 8 * 16)); \
116  src = (const u8 *)src + 128; \
117  _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
118  _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
119  _mm_storeu_si128((__m128i *)((u8 *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
120  _mm_storeu_si128((__m128i *)((u8 *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
121  _mm_storeu_si128((__m128i *)((u8 *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
122  _mm_storeu_si128((__m128i *)((u8 *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
123  _mm_storeu_si128((__m128i *)((u8 *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
124  _mm_storeu_si128((__m128i *)((u8 *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
125  dst = (u8 *)dst + 128; \
126  } \
127  tmp = len; \
128  len = ((len - 16 + offset) & 127) + 16 - offset; \
129  tmp -= len; \
130  src = (const u8 *)src + tmp; \
131  dst = (u8 *)dst + tmp; \
132  if (len >= 32 + 16 - offset) { \
133  while (len >= 32 + 16 - offset) { \
134  xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
135  len -= 32; \
136  xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
137  xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
138  src = (const u8 *)src + 32; \
139  _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
140  _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
141  dst = (u8 *)dst + 32; \
142  } \
143  tmp = len; \
144  len = ((len - 16 + offset) & 31) + 16 - offset; \
145  tmp -= len; \
146  src = (const u8 *)src + tmp; \
147  dst = (u8 *)dst + tmp; \
148  } \
149 })
150 
151 /**
152  * Macro for copying unaligned block from one location to another,
153  * 47 bytes leftover maximum,
154  * locations should not overlap.
155  * Use switch here because the aligning instruction requires immediate value for shift count.
156  * Requirements:
157  * - Store is aligned
158  * - Load offset is <offset>, which must be within [1, 15]
159  * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
160  * - <dst>, <src>, <len> must be variables
161  * - __m128i <xmm0> ~ <xmm8> used in CLIB_MVUNALIGN_LEFT47_IMM must be pre-defined
162  */
163 #define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset) \
164 ({ \
165  switch (offset) { \
166  case 0x01: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x01); break; \
167  case 0x02: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x02); break; \
168  case 0x03: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x03); break; \
169  case 0x04: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x04); break; \
170  case 0x05: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x05); break; \
171  case 0x06: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x06); break; \
172  case 0x07: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x07); break; \
173  case 0x08: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x08); break; \
174  case 0x09: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x09); break; \
175  case 0x0A: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0A); break; \
176  case 0x0B: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0B); break; \
177  case 0x0C: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0C); break; \
178  case 0x0D: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0D); break; \
179  case 0x0E: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0E); break; \
180  case 0x0F: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0F); break; \
181  default:; \
182  } \
183 })
184 
185 static inline void *
186 clib_memcpy(void *dst, const void *src, size_t n)
187 {
188  __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
189  uword dstu = (uword)dst;
190  uword srcu = (uword)src;
191  void *ret = dst;
192  size_t dstofss;
193  size_t srcofs;
194 
195  /**
196  * Copy less than 16 bytes
197  */
198  if (n < 16) {
199  if (n & 0x01) {
200  *(u8 *)dstu = *(const u8 *)srcu;
201  srcu = (uword)((const u8 *)srcu + 1);
202  dstu = (uword)((u8 *)dstu + 1);
203  }
204  if (n & 0x02) {
205  *(u16 *)dstu = *(const u16 *)srcu;
206  srcu = (uword)((const u16 *)srcu + 1);
207  dstu = (uword)((u16 *)dstu + 1);
208  }
209  if (n & 0x04) {
210  *(u32 *)dstu = *(const u32 *)srcu;
211  srcu = (uword)((const u32 *)srcu + 1);
212  dstu = (uword)((u32 *)dstu + 1);
213  }
214  if (n & 0x08) {
215  *(u64 *)dstu = *(const u64 *)srcu;
216  }
217  return ret;
218  }
219 
220  /**
221  * Fast way when copy size doesn't exceed 512 bytes
222  */
223  if (n <= 32) {
224  clib_mov16((u8 *)dst, (const u8 *)src);
225  clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
226  return ret;
227  }
228  if (n <= 48) {
229  clib_mov32((u8 *)dst, (const u8 *)src);
230  clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
231  return ret;
232  }
233  if (n <= 64) {
234  clib_mov32((u8 *)dst, (const u8 *)src);
235  clib_mov16((u8 *)dst + 32, (const u8 *)src + 32);
236  clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
237  return ret;
238  }
239  if (n <= 128) {
240  goto COPY_BLOCK_128_BACK15;
241  }
242  if (n <= 512) {
243  if (n >= 256) {
244  n -= 256;
245  clib_mov128((u8 *)dst, (const u8 *)src);
246  clib_mov128((u8 *)dst + 128, (const u8 *)src + 128);
247  src = (const u8 *)src + 256;
248  dst = (u8 *)dst + 256;
249  }
250 COPY_BLOCK_255_BACK15:
251  if (n >= 128) {
252  n -= 128;
253  clib_mov128((u8 *)dst, (const u8 *)src);
254  src = (const u8 *)src + 128;
255  dst = (u8 *)dst + 128;
256  }
257 COPY_BLOCK_128_BACK15:
258  if (n >= 64) {
259  n -= 64;
260  clib_mov64((u8 *)dst, (const u8 *)src);
261  src = (const u8 *)src + 64;
262  dst = (u8 *)dst + 64;
263  }
264 COPY_BLOCK_64_BACK15:
265  if (n >= 32) {
266  n -= 32;
267  clib_mov32((u8 *)dst, (const u8 *)src);
268  src = (const u8 *)src + 32;
269  dst = (u8 *)dst + 32;
270  }
271  if (n > 16) {
272  clib_mov16((u8 *)dst, (const u8 *)src);
273  clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
274  return ret;
275  }
276  if (n > 0) {
277  clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
278  }
279  return ret;
280  }
281 
282  /**
283  * Make store aligned when copy size exceeds 512 bytes,
284  * and make sure the first 15 bytes are copied, because
285  * unaligned copy functions require up to 15 bytes
286  * backwards access.
287  */
288  dstofss = 16 - ((uword)dst & 0x0F) + 16;
289  n -= dstofss;
290  clib_mov32((u8 *)dst, (const u8 *)src);
291  src = (const u8 *)src + dstofss;
292  dst = (u8 *)dst + dstofss;
293  srcofs = ((uword)src & 0x0F);
294 
295  /**
296  * For aligned copy
297  */
298  if (srcofs == 0) {
299  /**
300  * Copy 256-byte blocks
301  */
302  for (; n >= 256; n -= 256) {
303  clib_mov256((u8 *)dst, (const u8 *)src);
304  dst = (u8 *)dst + 256;
305  src = (const u8 *)src + 256;
306  }
307 
308  /**
309  * Copy whatever left
310  */
311  goto COPY_BLOCK_255_BACK15;
312  }
313 
314  /**
315  * For copy with unaligned load
316  */
317  CLIB_MVUNALIGN_LEFT47(dst, src, n, srcofs);
318 
319  /**
320  * Copy whatever left
321  */
322  goto COPY_BLOCK_64_BACK15;
323 }
324 
325 
326 #undef CLIB_MVUNALIGN_LEFT47_IMM
327 #undef CLIB_MVUNALIGN_LEFT47
328 
329 #endif /* included_clib_memcpy_sse3_h */
330 
#define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset)
Macro for copying unaligned block from one location to another, 47 bytes leftover maximum...
Definition: memcpy_sse3.h:163
unsigned long u64
Definition: types.h:89
static void clib_mov64(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:71
static void clib_mov128(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:78
unsigned int u32
Definition: types.h:88
static void clib_mov16(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:55
static void clib_mov256(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:85
u64 uword
Definition: types.h:112
unsigned short u16
Definition: types.h:57
unsigned char u8
Definition: types.h:56
static void clib_mov32(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:64
static void * clib_memcpy(void *dst, const void *src, size_t n)
Definition: memcpy_sse3.h:186