FD.io VPP  v16.06
Vector Packet Processing
memcpy_avx.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*-
16  * BSD LICENSE
17  *
18  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19  * All rights reserved.
20  *
21  * Redistribution and use in source and binary forms, with or without
22  * modification, are permitted provided that the following conditions
23  * are met:
24  *
25  * * Redistributions of source code must retain the above copyright
26  * notice, this list of conditions and the following disclaimer.
27  * * Redistributions in binary form must reproduce the above copyright
28  * notice, this list of conditions and the following disclaimer in
29  * the documentation and/or other materials provided with the
30  * distribution.
31  * * Neither the name of Intel Corporation nor the names of its
32  * contributors may be used to endorse or promote products derived
33  * from this software without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47 
48 #ifndef included_clib_memcpy_avx_h
49 #define included_clib_memcpy_avx_h
50 
51 #include <stdint.h>
52 #include <x86intrin.h>
53 
54 static inline void
55 clib_mov16(u8 *dst, const u8 *src)
56 {
57  __m128i xmm0;
58 
59  xmm0 = _mm_loadu_si128((const __m128i *)src);
60  _mm_storeu_si128((__m128i *)dst, xmm0);
61 }
62 
63 static inline void
64 clib_mov32(u8 *dst, const u8 *src)
65 {
66  __m256i ymm0;
67 
68  ymm0 = _mm256_loadu_si256((const __m256i *)src);
69  _mm256_storeu_si256((__m256i *)dst, ymm0);
70 }
71 
72 static inline void
73 clib_mov64(u8 *dst, const u8 *src)
74 {
75  clib_mov32((u8 *)dst + 0 * 32, (const u8 *)src + 0 * 32);
76  clib_mov32((u8 *)dst + 1 * 32, (const u8 *)src + 1 * 32);
77 }
78 
79 static inline void
80 clib_mov128(u8 *dst, const u8 *src)
81 {
82  clib_mov64((u8 *)dst + 0 * 64, (const u8 *)src + 0 * 64);
83  clib_mov64((u8 *)dst + 1 * 64, (const u8 *)src + 1 * 64);
84 }
85 
86 static inline void
87 clib_mov256(u8 *dst, const u8 *src)
88 {
89  clib_mov128((u8 *)dst + 0 * 128, (const u8 *)src + 0 * 128);
90  clib_mov128((u8 *)dst + 1 * 128, (const u8 *)src + 1 * 128);
91 }
92 
93 static inline void
94 clib_mov64blocks(u8 *dst, const u8 *src, size_t n)
95 {
96  __m256i ymm0, ymm1;
97 
98  while (n >= 64) {
99  ymm0 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 0 * 32));
100  n -= 64;
101  ymm1 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 1 * 32));
102  src = (const u8 *)src + 64;
103  _mm256_storeu_si256((__m256i *)((u8 *)dst + 0 * 32), ymm0);
104  _mm256_storeu_si256((__m256i *)((u8 *)dst + 1 * 32), ymm1);
105  dst = (u8 *)dst + 64;
106  }
107 }
108 
109 static inline void
110 clib_mov256blocks(u8 *dst, const u8 *src, size_t n)
111 {
112  __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
113 
114  while (n >= 256) {
115  ymm0 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 0 * 32));
116  n -= 256;
117  ymm1 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 1 * 32));
118  ymm2 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 2 * 32));
119  ymm3 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 3 * 32));
120  ymm4 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 4 * 32));
121  ymm5 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 5 * 32));
122  ymm6 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 6 * 32));
123  ymm7 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 7 * 32));
124  src = (const u8 *)src + 256;
125  _mm256_storeu_si256((__m256i *)((u8 *)dst + 0 * 32), ymm0);
126  _mm256_storeu_si256((__m256i *)((u8 *)dst + 1 * 32), ymm1);
127  _mm256_storeu_si256((__m256i *)((u8 *)dst + 2 * 32), ymm2);
128  _mm256_storeu_si256((__m256i *)((u8 *)dst + 3 * 32), ymm3);
129  _mm256_storeu_si256((__m256i *)((u8 *)dst + 4 * 32), ymm4);
130  _mm256_storeu_si256((__m256i *)((u8 *)dst + 5 * 32), ymm5);
131  _mm256_storeu_si256((__m256i *)((u8 *)dst + 6 * 32), ymm6);
132  _mm256_storeu_si256((__m256i *)((u8 *)dst + 7 * 32), ymm7);
133  dst = (u8 *)dst + 256;
134  }
135 }
136 
137 static inline void *
138 clib_memcpy(void *dst, const void *src, size_t n)
139 {
140  uword dstu = (uword)dst;
141  uword srcu = (uword)src;
142  void *ret = dst;
143  size_t dstofss;
144  size_t bits;
145 
146  /**
147  * Copy less than 16 bytes
148  */
149  if (n < 16) {
150  if (n & 0x01) {
151  *(u8 *)dstu = *(const u8 *)srcu;
152  srcu = (uword)((const u8 *)srcu + 1);
153  dstu = (uword)((u8 *)dstu + 1);
154  }
155  if (n & 0x02) {
156  *(uint16_t *)dstu = *(const uint16_t *)srcu;
157  srcu = (uword)((const uint16_t *)srcu + 1);
158  dstu = (uword)((uint16_t *)dstu + 1);
159  }
160  if (n & 0x04) {
161  *(uint32_t *)dstu = *(const uint32_t *)srcu;
162  srcu = (uword)((const uint32_t *)srcu + 1);
163  dstu = (uword)((uint32_t *)dstu + 1);
164  }
165  if (n & 0x08) {
166  *(uint64_t *)dstu = *(const uint64_t *)srcu;
167  }
168  return ret;
169  }
170 
171  /**
172  * Fast way when copy size doesn't exceed 512 bytes
173  */
174  if (n <= 32) {
175  clib_mov16((u8 *)dst, (const u8 *)src);
176  clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
177  return ret;
178  }
179  if (n <= 64) {
180  clib_mov32((u8 *)dst, (const u8 *)src);
181  clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
182  return ret;
183  }
184  if (n <= 512) {
185  if (n >= 256) {
186  n -= 256;
187  clib_mov256((u8 *)dst, (const u8 *)src);
188  src = (const u8 *)src + 256;
189  dst = (u8 *)dst + 256;
190  }
191  if (n >= 128) {
192  n -= 128;
193  clib_mov128((u8 *)dst, (const u8 *)src);
194  src = (const u8 *)src + 128;
195  dst = (u8 *)dst + 128;
196  }
197  if (n >= 64) {
198  n -= 64;
199  clib_mov64((u8 *)dst, (const u8 *)src);
200  src = (const u8 *)src + 64;
201  dst = (u8 *)dst + 64;
202  }
203 COPY_BLOCK_64_BACK31:
204  if (n > 32) {
205  clib_mov32((u8 *)dst, (const u8 *)src);
206  clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
207  return ret;
208  }
209  if (n > 0) {
210  clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
211  }
212  return ret;
213  }
214 
215  /**
216  * Make store aligned when copy size exceeds 512 bytes
217  */
218  dstofss = (uword)dst & 0x1F;
219  if (dstofss > 0) {
220  dstofss = 32 - dstofss;
221  n -= dstofss;
222  clib_mov32((u8 *)dst, (const u8 *)src);
223  src = (const u8 *)src + dstofss;
224  dst = (u8 *)dst + dstofss;
225  }
226 
227  /**
228  * Copy 256-byte blocks.
229  * Use copy block function for better instruction order control,
230  * which is important when load is unaligned.
231  */
232  clib_mov256blocks((u8 *)dst, (const u8 *)src, n);
233  bits = n;
234  n = n & 255;
235  bits -= n;
236  src = (const u8 *)src + bits;
237  dst = (u8 *)dst + bits;
238 
239  /**
240  * Copy 64-byte blocks.
241  * Use copy block function for better instruction order control,
242  * which is important when load is unaligned.
243  */
244  if (n >= 64) {
245  clib_mov64blocks((u8 *)dst, (const u8 *)src, n);
246  bits = n;
247  n = n & 63;
248  bits -= n;
249  src = (const u8 *)src + bits;
250  dst = (u8 *)dst + bits;
251  }
252 
253  /**
254  * Copy whatever left
255  */
256  goto COPY_BLOCK_64_BACK31;
257 }
258 
259 
260 #endif /* included_clib_mamcpy_avx_h */
261 
static void clib_mov64(u8 *dst, const u8 *src)
Definition: memcpy_avx.h:73
unsigned int uint32_t
Definition: fix_types.h:29
static void * clib_memcpy(void *dst, const void *src, size_t n)
Definition: memcpy_avx.h:138
unsigned short int uint16_t
Definition: fix_types.h:28
u64 uword
Definition: types.h:112
unsigned char u8
Definition: types.h:56
static void clib_mov64blocks(u8 *dst, const u8 *src, size_t n)
Definition: memcpy_avx.h:94
static void clib_mov16(u8 *dst, const u8 *src)
Definition: memcpy_avx.h:55
static void clib_mov256blocks(u8 *dst, const u8 *src, size_t n)
Definition: memcpy_avx.h:110
static void clib_mov32(u8 *dst, const u8 *src)
Definition: memcpy_avx.h:64
static void clib_mov128(u8 *dst, const u8 *src)
Definition: memcpy_avx.h:80
static void clib_mov256(u8 *dst, const u8 *src)
Definition: memcpy_avx.h:87