FD.io VPP  v16.06
Vector Packet Processing
pipeline.h
Go to the documentation of this file.
1 /*
2  * vnet/pipeline.h: software pipeline
3  *
4  * Copyright (c) 2012 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 /*
19  * Usage example.
20  *
21  * #define NSTAGES 3 or whatever
22  *
23  * <Define pipeline stages>
24  *
25  * #include <vnet/pipeline.h>
26  *
27  * static uword my_node_fn (vlib_main_t * vm,
28  * vlib_node_runtime_t * node,
29  * vlib_frame_t * frame)
30  * {
31  * return dispatch_pipeline (vm, node, frame);
32  * }
33  *
34  */
35 
36 #ifndef NSTAGES
37 #error files which #include <vnet/pipeline.h> must define NSTAGES
38 #endif
39 
40 #ifndef STAGE_INLINE
41 #define STAGE_INLINE inline
42 #endif
43 
44 /*
45  * A prefetch stride of 2 is quasi-equivalent to doubling the number
46  * of stages with every other pipeline stage empty.
47  */
48 
49 /*
50  * This is a typical first pipeline stage, which prefetches
51  * buffer metadata and the first line of pkt data.
52  * To use it:
53  * #define stage0 generic_stage0
54  */
56  vlib_node_runtime_t * node,
57  u32 buffer_index)
58 {
59  /* generic default stage 0 here */
60  vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
61  vlib_prefetch_buffer_header (b, STORE);
63 }
64 
65 #if NSTAGES == 2
66 
67 static STAGE_INLINE uword
68 dispatch_pipeline (vlib_main_t * vm,
69  vlib_node_runtime_t * node,
70  vlib_frame_t * frame)
71 {
72  u32 * from = vlib_frame_vector_args (frame);
73  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
74  int pi, pi_limit;
75 
76  n_left_from = frame->n_vectors;
77  next_index = node->cached_next_index;
78 
79  while (n_left_from > 0)
80  {
81  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
82 
83  pi_limit = clib_min (n_left_from, n_left_to_next);
84 
85  for (pi = 0; pi < NSTAGES-1; pi++)
86  {
87  if(pi == pi_limit)
88  break;
89  stage0 (vm, node, from[pi]);
90  }
91 
92  for (; pi < pi_limit; pi++)
93  {
94  stage0 (vm, node, from[pi]);
95  to_next[0] = from [pi - 1];
96  to_next++;
97  n_left_to_next--;
98  next0 = last_stage (vm, node, from [pi - 1]);
99  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
100  to_next, n_left_to_next,
101  from[pi - 1], next0);
102  n_left_from--;
103  if ((int) n_left_to_next < 0 && n_left_from > 0)
104  vlib_get_next_frame (vm, node, next_index, to_next,
105  n_left_to_next);
106  }
107 
108  for (; pi < (pi_limit + (NSTAGES-1)); pi++)
109  {
110  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
111  {
112  to_next[0] = from [pi - 1];
113  to_next++;
114  n_left_to_next--;
115  next0 = last_stage (vm, node, from [pi - 1]);
116  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
117  to_next, n_left_to_next,
118  from[pi - 1], next0);
119  n_left_from--;
120  if ((int) n_left_to_next < 0 && n_left_from > 0)
121  vlib_get_next_frame (vm, node, next_index, to_next,
122  n_left_to_next);
123  }
124  }
125  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
126  from += pi_limit;
127  }
128  return frame->n_vectors;
129 }
130 #endif
131 
132 #if NSTAGES == 3
133 static STAGE_INLINE uword
134 dispatch_pipeline (vlib_main_t * vm,
135  vlib_node_runtime_t * node,
136  vlib_frame_t * frame)
137 {
138  u32 * from = vlib_frame_vector_args (frame);
139  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
140  int pi, pi_limit;
141 
142  n_left_from = frame->n_vectors;
143  next_index = node->cached_next_index;
144 
145  while (n_left_from > 0)
146  {
147  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
148 
149  pi_limit = clib_min (n_left_from, n_left_to_next);
150 
151  for (pi = 0; pi < NSTAGES-1; pi++)
152  {
153  if(pi == pi_limit)
154  break;
155  stage0 (vm, node, from[pi]);
156  if (pi-1 >= 0)
157  stage1 (vm, node, from[pi-1]);
158  }
159 
160  for (; pi < pi_limit; pi++)
161  {
162  stage0 (vm, node, from[pi]);
163  stage1 (vm, node, from[pi-1]);
164  to_next[0] = from [pi - 2];
165  to_next++;
166  n_left_to_next--;
167  next0 = last_stage (vm, node, from [pi - 2]);
168  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
169  to_next, n_left_to_next,
170  from[pi - 2], next0);
171  n_left_from--;
172  if ((int) n_left_to_next < 0 && n_left_from > 0)
173  vlib_get_next_frame (vm, node, next_index, to_next,
174  n_left_to_next);
175  }
176 
177 
178  for (; pi < (pi_limit + (NSTAGES-1)); pi++)
179  {
180  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
181  stage1 (vm, node, from[pi-1]);
182  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
183  {
184  to_next[0] = from[pi - 2];
185  to_next++;
186  n_left_to_next--;
187  next0 = last_stage (vm, node, from [pi - 2]);
188  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
189  to_next, n_left_to_next,
190  from[pi - 2], next0);
191  n_left_from--;
192  if ((int) n_left_to_next < 0 && n_left_from > 0)
193  vlib_get_next_frame (vm, node, next_index, to_next,
194  n_left_to_next);
195  }
196  }
197 
198  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
199  from += pi_limit;
200  }
201  return frame->n_vectors;
202 }
203 #endif
204 
205 #if NSTAGES == 4
206 static STAGE_INLINE uword
207 dispatch_pipeline (vlib_main_t * vm,
208  vlib_node_runtime_t * node,
209  vlib_frame_t * frame)
210 {
211  u32 * from = vlib_frame_vector_args (frame);
212  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
213  int pi, pi_limit;
214 
215  n_left_from = frame->n_vectors;
216  next_index = node->cached_next_index;
217 
218  while (n_left_from > 0)
219  {
220  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
221 
222  pi_limit = clib_min (n_left_from, n_left_to_next);
223 
224  for (pi = 0; pi < NSTAGES-1; pi++)
225  {
226  if(pi == pi_limit)
227  break;
228  stage0 (vm, node, from[pi]);
229  if (pi-1 >= 0)
230  stage1 (vm, node, from[pi-1]);
231  if (pi-2 >= 0)
232  stage2 (vm, node, from[pi-2]);
233  }
234 
235  for (; pi < pi_limit; pi++)
236  {
237  stage0 (vm, node, from[pi]);
238  stage1 (vm, node, from[pi-1]);
239  stage2 (vm, node, from[pi-2]);
240  to_next[0] = from [pi - 3];
241  to_next++;
242  n_left_to_next--;
243  next0 = last_stage (vm, node, from [pi - 3]);
244  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
245  to_next, n_left_to_next,
246  from[pi - 3], next0);
247  n_left_from--;
248  if ((int) n_left_to_next < 0 && n_left_from > 0)
249  vlib_get_next_frame (vm, node, next_index, to_next,
250  n_left_to_next);
251  }
252 
253 
254  for (; pi < (pi_limit + (NSTAGES-1)); pi++)
255  {
256  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
257  stage1 (vm, node, from[pi-1]);
258  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
259  stage2 (vm, node, from[pi-2]);
260  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
261  {
262  to_next[0] = from[pi - 3];
263  to_next++;
264  n_left_to_next--;
265  next0 = last_stage (vm, node, from [pi - 3]);
266  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
267  to_next, n_left_to_next,
268  from[pi - 3], next0);
269  n_left_from--;
270  if ((int) n_left_to_next < 0 && n_left_from > 0)
271  vlib_get_next_frame (vm, node, next_index, to_next,
272  n_left_to_next);
273  }
274  }
275 
276  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
277  from += pi_limit;
278  }
279  return frame->n_vectors;
280 }
281 #endif
282 
283 
284 #if NSTAGES == 5
285 static STAGE_INLINE uword
286 dispatch_pipeline (vlib_main_t * vm,
287  vlib_node_runtime_t * node,
288  vlib_frame_t * frame)
289 {
290  u32 * from = vlib_frame_vector_args (frame);
291  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
292  int pi, pi_limit;
293 
294  n_left_from = frame->n_vectors;
295  next_index = node->cached_next_index;
296 
297  while (n_left_from > 0)
298  {
299  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
300 
301  pi_limit = clib_min (n_left_from, n_left_to_next);
302 
303  for (pi = 0; pi < NSTAGES-1; pi++)
304  {
305  if(pi == pi_limit)
306  break;
307  stage0 (vm, node, from[pi]);
308  if (pi-1 >= 0)
309  stage1 (vm, node, from[pi-1]);
310  if (pi-2 >= 0)
311  stage2 (vm, node, from[pi-2]);
312  if (pi-3 >= 0)
313  stage3 (vm, node, from[pi-3]);
314  }
315 
316  for (; pi < pi_limit; pi++)
317  {
318  stage0 (vm, node, from[pi]);
319  stage1 (vm, node, from[pi-1]);
320  stage2 (vm, node, from[pi-2]);
321  stage3 (vm, node, from[pi-3]);
322  to_next[0] = from [pi - 4];
323  to_next++;
324  n_left_to_next--;
325  next0 = last_stage (vm, node, from [pi - 4]);
326  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
327  to_next, n_left_to_next,
328  from[pi - 4], next0);
329  n_left_from--;
330  if ((int) n_left_to_next < 0 && n_left_from > 0)
331  vlib_get_next_frame (vm, node, next_index, to_next,
332  n_left_to_next);
333  }
334 
335 
336  for (; pi < (pi_limit + (NSTAGES-1)); pi++)
337  {
338  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
339  stage1 (vm, node, from[pi-1]);
340  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
341  stage2 (vm, node, from[pi - 2]);
342  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
343  stage3 (vm, node, from[pi - 3]);
344  if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
345  {
346  to_next[0] = from[pi - 4];
347  to_next++;
348  n_left_to_next--;
349  next0 = last_stage (vm, node, from [pi - 4]);
350  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
351  to_next, n_left_to_next,
352  from[pi - 4], next0);
353  n_left_from--;
354  if ((int) n_left_to_next < 0 && n_left_from > 0)
355  vlib_get_next_frame (vm, node, next_index, to_next,
356  n_left_to_next);
357  }
358  }
359 
360  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
361  from += pi_limit;
362  }
363  return frame->n_vectors;
364 }
365 #endif
366 
367 #if NSTAGES == 6
368 static STAGE_INLINE uword
369 dispatch_pipeline (vlib_main_t * vm,
370  vlib_node_runtime_t * node,
371  vlib_frame_t * frame)
372 {
373  u32 * from = vlib_frame_vector_args (frame);
374  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
375  int pi, pi_limit;
376 
377  n_left_from = frame->n_vectors;
378  next_index = node->cached_next_index;
379 
380  while (n_left_from > 0)
381  {
382  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
383 
384  pi_limit = clib_min (n_left_from, n_left_to_next);
385 
386  for (pi = 0; pi < NSTAGES-1; pi++)
387  {
388  if(pi == pi_limit)
389  break;
390  stage0 (vm, node, from[pi]);
391  if (pi-1 >= 0)
392  stage1 (vm, node, from[pi-1]);
393  if (pi-2 >= 0)
394  stage2 (vm, node, from[pi-2]);
395  if (pi-3 >= 0)
396  stage3 (vm, node, from[pi-3]);
397  if (pi-4 >= 0)
398  stage4 (vm, node, from[pi-4]);
399  }
400 
401  for (; pi < pi_limit; pi++)
402  {
403  stage0 (vm, node, from[pi]);
404  stage1 (vm, node, from[pi-1]);
405  stage2 (vm, node, from[pi-2]);
406  stage3 (vm, node, from[pi-3]);
407  stage4 (vm, node, from[pi-4]);
408  to_next[0] = from [pi - 5];
409  to_next++;
410  n_left_to_next--;
411  next0 = last_stage (vm, node, from [pi - 5]);
412  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
413  to_next, n_left_to_next,
414  from[pi - 5], next0);
415  n_left_from--;
416  if ((int) n_left_to_next < 0 && n_left_from > 0)
417  vlib_get_next_frame (vm, node, next_index, to_next,
418  n_left_to_next);
419  }
420 
421 
422  for (; pi < (pi_limit + (NSTAGES-1)); pi++)
423  {
424  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
425  stage1 (vm, node, from[pi-1]);
426  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
427  stage2 (vm, node, from[pi - 2]);
428  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
429  stage3 (vm, node, from[pi - 3]);
430  if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
431  stage4 (vm, node, from[pi - 4]);
432  if (((pi - 5) >= 0) && ((pi - 5) < pi_limit))
433  {
434  to_next[0] = from[pi - 5];
435  to_next++;
436  n_left_to_next--;
437  next0 = last_stage (vm, node, from [pi - 5]);
438  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
439  to_next, n_left_to_next,
440  from[pi - 5], next0);
441  n_left_from--;
442  if ((int) n_left_to_next < 0 && n_left_from > 0)
443  vlib_get_next_frame (vm, node, next_index, to_next,
444  n_left_to_next);
445  }
446  }
447 
448  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
449  from += pi_limit;
450  }
451  return frame->n_vectors;
452 }
453 #endif
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
#define clib_min(x, y)
Definition: clib.h:295
static u32 last_stage(vlib_main_t *vm, vlib_node_runtime_t *node, u32 bi)
Definition: decap.c:111
static void stage2(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
static void stage1(vlib_main_t *vm, vlib_node_runtime_t *node, u32 bi)
Definition: decap.c:64
#define NSTAGES
Definition: decap.c:52
static void stage3(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
static void stage4(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
u16 n_vectors
Definition: node.h:307
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
u16 cached_next_index
Definition: node.h:422
unsigned int u32
Definition: types.h:88
u64 uword
Definition: types.h:112
#define STAGE_INLINE
Definition: pipeline.h:41
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:162
u8 data[0]
Packet data.
Definition: buffer.h:150
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
static STAGE_INLINE void generic_stage0(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
Definition: pipeline.h:55