FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
memcpy_sse3.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*-
16  * BSD LICENSE
17  *
18  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19  * All rights reserved.
20  *
21  * Redistribution and use in source and binary forms, with or without
22  * modification, are permitted provided that the following conditions
23  * are met:
24  *
25  * * Redistributions of source code must retain the above copyright
26  * notice, this list of conditions and the following disclaimer.
27  * * Redistributions in binary form must reproduce the above copyright
28  * notice, this list of conditions and the following disclaimer in
29  * the documentation and/or other materials provided with the
30  * distribution.
31  * * Neither the name of Intel Corporation nor the names of its
32  * contributors may be used to endorse or promote products derived
33  * from this software without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47 
48 #ifndef included_clib_memcpy_sse3_h
49 #define included_clib_memcpy_sse3_h
50 
51 #include <stdint.h>
52 #include <x86intrin.h>
53 
54 static inline void
55 clib_mov16 (u8 * dst, const u8 * src)
56 {
57  __m128i xmm0;
58 
59  xmm0 = _mm_loadu_si128 ((const __m128i *) src);
60  _mm_storeu_si128 ((__m128i *) dst, xmm0);
61 }
62 
63 static inline void
64 clib_mov32 (u8 * dst, const u8 * src)
65 {
66  clib_mov16 ((u8 *) dst + 0 * 16, (const u8 *) src + 0 * 16);
67  clib_mov16 ((u8 *) dst + 1 * 16, (const u8 *) src + 1 * 16);
68 }
69 
70 static inline void
71 clib_mov64 (u8 * dst, const u8 * src)
72 {
73  clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
74  clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
75 }
76 
77 static inline void
78 clib_mov128 (u8 * dst, const u8 * src)
79 {
80  clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
81  clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
82 }
83 
84 static inline void
85 clib_mov256 (u8 * dst, const u8 * src)
86 {
87  clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
88  clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
89 }
90 
91 /**
92  * Macro for copying unaligned block from one location to another with constant load offset,
93  * 47 bytes leftover maximum,
94  * locations should not overlap.
95  * Requirements:
96  * - Store is aligned
97  * - Load offset is <offset>, which must be immediate value within [1, 15]
98  * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
99  * - <dst>, <src>, <len> must be variables
100  * - __m128i <xmm0> ~ <xmm8> must be pre-defined
101  */
102 #define CLIB_MVUNALIGN_LEFT47_IMM(dst, src, len, offset) \
103 ({ \
104  int tmp; \
105  while (len >= 128 + 16 - offset) { \
106  xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
107  len -= 128; \
108  xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
109  xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
110  xmm3 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 3 * 16)); \
111  xmm4 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 4 * 16)); \
112  xmm5 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 5 * 16)); \
113  xmm6 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 6 * 16)); \
114  xmm7 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 7 * 16)); \
115  xmm8 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 8 * 16)); \
116  src = (const u8 *)src + 128; \
117  _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
118  _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
119  _mm_storeu_si128((__m128i *)((u8 *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
120  _mm_storeu_si128((__m128i *)((u8 *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
121  _mm_storeu_si128((__m128i *)((u8 *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
122  _mm_storeu_si128((__m128i *)((u8 *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
123  _mm_storeu_si128((__m128i *)((u8 *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
124  _mm_storeu_si128((__m128i *)((u8 *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
125  dst = (u8 *)dst + 128; \
126  } \
127  tmp = len; \
128  len = ((len - 16 + offset) & 127) + 16 - offset; \
129  tmp -= len; \
130  src = (const u8 *)src + tmp; \
131  dst = (u8 *)dst + tmp; \
132  if (len >= 32 + 16 - offset) { \
133  while (len >= 32 + 16 - offset) { \
134  xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
135  len -= 32; \
136  xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
137  xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
138  src = (const u8 *)src + 32; \
139  _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
140  _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
141  dst = (u8 *)dst + 32; \
142  } \
143  tmp = len; \
144  len = ((len - 16 + offset) & 31) + 16 - offset; \
145  tmp -= len; \
146  src = (const u8 *)src + tmp; \
147  dst = (u8 *)dst + tmp; \
148  } \
149 })
150 
151 /**
152  * Macro for copying unaligned block from one location to another,
153  * 47 bytes leftover maximum,
154  * locations should not overlap.
155  * Use switch here because the aligning instruction requires immediate value for shift count.
156  * Requirements:
157  * - Store is aligned
158  * - Load offset is <offset>, which must be within [1, 15]
159  * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
160  * - <dst>, <src>, <len> must be variables
161  * - __m128i <xmm0> ~ <xmm8> used in CLIB_MVUNALIGN_LEFT47_IMM must be pre-defined
162  */
163 #define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset) \
164 ({ \
165  switch (offset) { \
166  case 0x01: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x01); break; \
167  case 0x02: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x02); break; \
168  case 0x03: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x03); break; \
169  case 0x04: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x04); break; \
170  case 0x05: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x05); break; \
171  case 0x06: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x06); break; \
172  case 0x07: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x07); break; \
173  case 0x08: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x08); break; \
174  case 0x09: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x09); break; \
175  case 0x0A: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0A); break; \
176  case 0x0B: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0B); break; \
177  case 0x0C: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0C); break; \
178  case 0x0D: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0D); break; \
179  case 0x0E: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0E); break; \
180  case 0x0F: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0F); break; \
181  default:; \
182  } \
183 })
184 
185 static inline void *
186 clib_memcpy_fast (void *dst, const void *src, size_t n)
187 {
188  __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
189  uword dstu = (uword) dst;
190  uword srcu = (uword) src;
191  void *ret = dst;
192  size_t dstofss;
193  size_t srcofs;
194 
195  /**
196  * Copy less than 16 bytes
197  */
198  if (n < 16)
199  {
200  if (n & 0x01)
201  {
202  *(u8 *) dstu = *(const u8 *) srcu;
203  srcu = (uword) ((const u8 *) srcu + 1);
204  dstu = (uword) ((u8 *) dstu + 1);
205  }
206  if (n & 0x02)
207  {
208  *(u16 *) dstu = *(const u16 *) srcu;
209  srcu = (uword) ((const u16 *) srcu + 1);
210  dstu = (uword) ((u16 *) dstu + 1);
211  }
212  if (n & 0x04)
213  {
214  *(u32 *) dstu = *(const u32 *) srcu;
215  srcu = (uword) ((const u32 *) srcu + 1);
216  dstu = (uword) ((u32 *) dstu + 1);
217  }
218  if (n & 0x08)
219  {
220  *(u64 *) dstu = *(const u64 *) srcu;
221  }
222  return ret;
223  }
224 
225  /**
226  * Fast way when copy size doesn't exceed 512 bytes
227  */
228  if (n <= 32)
229  {
230  clib_mov16 ((u8 *) dst, (const u8 *) src);
231  clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
232  return ret;
233  }
234  if (n <= 48)
235  {
236  clib_mov32 ((u8 *) dst, (const u8 *) src);
237  clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
238  return ret;
239  }
240  if (n <= 64)
241  {
242  clib_mov32 ((u8 *) dst, (const u8 *) src);
243  clib_mov16 ((u8 *) dst + 32, (const u8 *) src + 32);
244  clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
245  return ret;
246  }
247  if (n <= 128)
248  {
249  goto COPY_BLOCK_128_BACK15;
250  }
251  if (n <= 512)
252  {
253  if (n >= 256)
254  {
255  n -= 256;
256  clib_mov128 ((u8 *) dst, (const u8 *) src);
257  clib_mov128 ((u8 *) dst + 128, (const u8 *) src + 128);
258  src = (const u8 *) src + 256;
259  dst = (u8 *) dst + 256;
260  }
261  COPY_BLOCK_255_BACK15:
262  if (n >= 128)
263  {
264  n -= 128;
265  clib_mov128 ((u8 *) dst, (const u8 *) src);
266  src = (const u8 *) src + 128;
267  dst = (u8 *) dst + 128;
268  }
269  COPY_BLOCK_128_BACK15:
270  if (n >= 64)
271  {
272  n -= 64;
273  clib_mov64 ((u8 *) dst, (const u8 *) src);
274  src = (const u8 *) src + 64;
275  dst = (u8 *) dst + 64;
276  }
277  COPY_BLOCK_64_BACK15:
278  if (n >= 32)
279  {
280  n -= 32;
281  clib_mov32 ((u8 *) dst, (const u8 *) src);
282  src = (const u8 *) src + 32;
283  dst = (u8 *) dst + 32;
284  }
285  if (n > 16)
286  {
287  clib_mov16 ((u8 *) dst, (const u8 *) src);
288  clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
289  return ret;
290  }
291  if (n > 0)
292  {
293  clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
294  }
295  return ret;
296  }
297 
298  /**
299  * Make store aligned when copy size exceeds 512 bytes,
300  * and make sure the first 15 bytes are copied, because
301  * unaligned copy functions require up to 15 bytes
302  * backwards access.
303  */
304  dstofss = (uword) dst & 0x0F;
305  if (dstofss > 0)
306  {
307  dstofss = 16 - dstofss + 16;
308  n -= dstofss;
309  clib_mov32 ((u8 *) dst, (const u8 *) src);
310  src = (const u8 *) src + dstofss;
311  dst = (u8 *) dst + dstofss;
312  }
313  srcofs = ((uword) src & 0x0F);
314 
315  /**
316  * For aligned copy
317  */
318  if (srcofs == 0)
319  {
320  /**
321  * Copy 256-byte blocks
322  */
323  for (; n >= 256; n -= 256)
324  {
325  clib_mov256 ((u8 *) dst, (const u8 *) src);
326  dst = (u8 *) dst + 256;
327  src = (const u8 *) src + 256;
328  }
329 
330  /**
331  * Copy whatever left
332  */
333  goto COPY_BLOCK_255_BACK15;
334  }
335 
336  /**
337  * For copy with unaligned load
338  */
339  CLIB_MVUNALIGN_LEFT47 (dst, src, n, srcofs);
340 
341  /**
342  * Copy whatever left
343  */
344  goto COPY_BLOCK_64_BACK15;
345 }
346 
347 
348 #undef CLIB_MVUNALIGN_LEFT47_IMM
349 #undef CLIB_MVUNALIGN_LEFT47
350 
351 #endif /* included_clib_memcpy_sse3_h */
352 
353 
354 /*
355  * fd.io coding-style-patch-verification: ON
356  *
357  * Local Variables:
358  * eval: (c-set-style "gnu")
359  * End:
360  */
#define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset)
Macro for copying unaligned block from one location to another, 47 bytes leftover maximum...
Definition: memcpy_sse3.h:163
unsigned long u64
Definition: types.h:89
vl_api_address_t src
Definition: gre.api:60
unsigned char u8
Definition: types.h:56
unsigned int u32
Definition: types.h:88
unsigned short u16
Definition: types.h:57
vl_api_address_t dst
Definition: gre.api:61
static void clib_mov64(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:71
static void clib_mov128(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:78
static void clib_mov16(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:55
static void clib_mov256(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:85
static void clib_mov32(u8 *dst, const u8 *src)
Definition: memcpy_sse3.h:64
u64 uword
Definition: types.h:112
static void * clib_memcpy_fast(void *dst, const void *src, size_t n)
Definition: memcpy_sse3.h:186