FD.io VPP  v17.01.1-3-gc6833f8
Vector Packet Processing
pipeline.h
Go to the documentation of this file.
1 /*
2  * vnet/pipeline.h: software pipeline
3  *
4  * Copyright (c) 2012 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 /*
19  * Usage example.
20  *
21  * #define NSTAGES 3 or whatever
22  *
23  * <Define pipeline stages>
24  *
25  * #include <vnet/pipeline.h>
26  *
27  * static uword my_node_fn (vlib_main_t * vm,
28  * vlib_node_runtime_t * node,
29  * vlib_frame_t * frame)
30  * {
31  * return dispatch_pipeline (vm, node, frame);
32  * }
33  *
34  */
35 
36 #ifndef NSTAGES
37 #error files which #include <vnet/pipeline.h> must define NSTAGES
38 #endif
39 
40 #ifndef STAGE_INLINE
41 #define STAGE_INLINE inline
42 #endif
43 
44 /*
45  * A prefetch stride of 2 is quasi-equivalent to doubling the number
46  * of stages with every other pipeline stage empty.
47  */
48 
49 /*
50  * This is a typical first pipeline stage, which prefetches
51  * buffer metadata and the first line of pkt data.
52  * To use it:
53  * #define stage0 generic_stage0
54  */
55 static STAGE_INLINE void
57  vlib_node_runtime_t * node, u32 buffer_index)
58 {
59  /* generic default stage 0 here */
60  vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
61  vlib_prefetch_buffer_header (b, STORE);
63 }
64 
65 #if NSTAGES == 2
66 
67 static STAGE_INLINE uword
68 dispatch_pipeline (vlib_main_t * vm,
69  vlib_node_runtime_t * node, vlib_frame_t * frame)
70 {
71  u32 *from = vlib_frame_vector_args (frame);
72  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
73  int pi, pi_limit;
74 
75  n_left_from = frame->n_vectors;
76  next_index = node->cached_next_index;
77 
78  while (n_left_from > 0)
79  {
80  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
81 
82  pi_limit = clib_min (n_left_from, n_left_to_next);
83 
84  for (pi = 0; pi < NSTAGES - 1; pi++)
85  {
86  if (pi == pi_limit)
87  break;
88  stage0 (vm, node, from[pi]);
89  }
90 
91  for (; pi < pi_limit; pi++)
92  {
93  stage0 (vm, node, from[pi]);
94  to_next[0] = from[pi - 1];
95  to_next++;
96  n_left_to_next--;
97  next0 = last_stage (vm, node, from[pi - 1]);
98  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
99  to_next, n_left_to_next,
100  from[pi - 1], next0);
101  n_left_from--;
102  if ((int) n_left_to_next < 0 && n_left_from > 0)
103  vlib_get_next_frame (vm, node, next_index, to_next,
104  n_left_to_next);
105  }
106 
107  for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
108  {
109  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
110  {
111  to_next[0] = from[pi - 1];
112  to_next++;
113  n_left_to_next--;
114  next0 = last_stage (vm, node, from[pi - 1]);
115  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
116  to_next, n_left_to_next,
117  from[pi - 1], next0);
118  n_left_from--;
119  if ((int) n_left_to_next < 0 && n_left_from > 0)
120  vlib_get_next_frame (vm, node, next_index, to_next,
121  n_left_to_next);
122  }
123  }
124  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
125  from += pi_limit;
126  }
127  return frame->n_vectors;
128 }
129 #endif
130 
131 #if NSTAGES == 3
132 static STAGE_INLINE uword
133 dispatch_pipeline (vlib_main_t * vm,
134  vlib_node_runtime_t * node, vlib_frame_t * frame)
135 {
136  u32 *from = vlib_frame_vector_args (frame);
137  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
138  int pi, pi_limit;
139 
140  n_left_from = frame->n_vectors;
141  next_index = node->cached_next_index;
142 
143  while (n_left_from > 0)
144  {
145  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
146 
147  pi_limit = clib_min (n_left_from, n_left_to_next);
148 
149  for (pi = 0; pi < NSTAGES - 1; pi++)
150  {
151  if (pi == pi_limit)
152  break;
153  stage0 (vm, node, from[pi]);
154  if (pi - 1 >= 0)
155  stage1 (vm, node, from[pi - 1]);
156  }
157 
158  for (; pi < pi_limit; pi++)
159  {
160  stage0 (vm, node, from[pi]);
161  stage1 (vm, node, from[pi - 1]);
162  to_next[0] = from[pi - 2];
163  to_next++;
164  n_left_to_next--;
165  next0 = last_stage (vm, node, from[pi - 2]);
166  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
167  to_next, n_left_to_next,
168  from[pi - 2], next0);
169  n_left_from--;
170  if ((int) n_left_to_next < 0 && n_left_from > 0)
171  vlib_get_next_frame (vm, node, next_index, to_next,
172  n_left_to_next);
173  }
174 
175 
176  for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
177  {
178  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
179  stage1 (vm, node, from[pi - 1]);
180  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
181  {
182  to_next[0] = from[pi - 2];
183  to_next++;
184  n_left_to_next--;
185  next0 = last_stage (vm, node, from[pi - 2]);
186  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
187  to_next, n_left_to_next,
188  from[pi - 2], next0);
189  n_left_from--;
190  if ((int) n_left_to_next < 0 && n_left_from > 0)
191  vlib_get_next_frame (vm, node, next_index, to_next,
192  n_left_to_next);
193  }
194  }
195 
196  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
197  from += pi_limit;
198  }
199  return frame->n_vectors;
200 }
201 #endif
202 
203 #if NSTAGES == 4
204 static STAGE_INLINE uword
205 dispatch_pipeline (vlib_main_t * vm,
206  vlib_node_runtime_t * node, vlib_frame_t * frame)
207 {
208  u32 *from = vlib_frame_vector_args (frame);
209  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
210  int pi, pi_limit;
211 
212  n_left_from = frame->n_vectors;
213  next_index = node->cached_next_index;
214 
215  while (n_left_from > 0)
216  {
217  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
218 
219  pi_limit = clib_min (n_left_from, n_left_to_next);
220 
221  for (pi = 0; pi < NSTAGES - 1; pi++)
222  {
223  if (pi == pi_limit)
224  break;
225  stage0 (vm, node, from[pi]);
226  if (pi - 1 >= 0)
227  stage1 (vm, node, from[pi - 1]);
228  if (pi - 2 >= 0)
229  stage2 (vm, node, from[pi - 2]);
230  }
231 
232  for (; pi < pi_limit; pi++)
233  {
234  stage0 (vm, node, from[pi]);
235  stage1 (vm, node, from[pi - 1]);
236  stage2 (vm, node, from[pi - 2]);
237  to_next[0] = from[pi - 3];
238  to_next++;
239  n_left_to_next--;
240  next0 = last_stage (vm, node, from[pi - 3]);
241  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
242  to_next, n_left_to_next,
243  from[pi - 3], next0);
244  n_left_from--;
245  if ((int) n_left_to_next < 0 && n_left_from > 0)
246  vlib_get_next_frame (vm, node, next_index, to_next,
247  n_left_to_next);
248  }
249 
250 
251  for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
252  {
253  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
254  stage1 (vm, node, from[pi - 1]);
255  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
256  stage2 (vm, node, from[pi - 2]);
257  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
258  {
259  to_next[0] = from[pi - 3];
260  to_next++;
261  n_left_to_next--;
262  next0 = last_stage (vm, node, from[pi - 3]);
263  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
264  to_next, n_left_to_next,
265  from[pi - 3], next0);
266  n_left_from--;
267  if ((int) n_left_to_next < 0 && n_left_from > 0)
268  vlib_get_next_frame (vm, node, next_index, to_next,
269  n_left_to_next);
270  }
271  }
272 
273  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
274  from += pi_limit;
275  }
276  return frame->n_vectors;
277 }
278 #endif
279 
280 
281 #if NSTAGES == 5
282 static STAGE_INLINE uword
283 dispatch_pipeline (vlib_main_t * vm,
284  vlib_node_runtime_t * node, vlib_frame_t * frame)
285 {
286  u32 *from = vlib_frame_vector_args (frame);
287  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
288  int pi, pi_limit;
289 
290  n_left_from = frame->n_vectors;
291  next_index = node->cached_next_index;
292 
293  while (n_left_from > 0)
294  {
295  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
296 
297  pi_limit = clib_min (n_left_from, n_left_to_next);
298 
299  for (pi = 0; pi < NSTAGES - 1; pi++)
300  {
301  if (pi == pi_limit)
302  break;
303  stage0 (vm, node, from[pi]);
304  if (pi - 1 >= 0)
305  stage1 (vm, node, from[pi - 1]);
306  if (pi - 2 >= 0)
307  stage2 (vm, node, from[pi - 2]);
308  if (pi - 3 >= 0)
309  stage3 (vm, node, from[pi - 3]);
310  }
311 
312  for (; pi < pi_limit; pi++)
313  {
314  stage0 (vm, node, from[pi]);
315  stage1 (vm, node, from[pi - 1]);
316  stage2 (vm, node, from[pi - 2]);
317  stage3 (vm, node, from[pi - 3]);
318  to_next[0] = from[pi - 4];
319  to_next++;
320  n_left_to_next--;
321  next0 = last_stage (vm, node, from[pi - 4]);
322  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
323  to_next, n_left_to_next,
324  from[pi - 4], next0);
325  n_left_from--;
326  if ((int) n_left_to_next < 0 && n_left_from > 0)
327  vlib_get_next_frame (vm, node, next_index, to_next,
328  n_left_to_next);
329  }
330 
331 
332  for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
333  {
334  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
335  stage1 (vm, node, from[pi - 1]);
336  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
337  stage2 (vm, node, from[pi - 2]);
338  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
339  stage3 (vm, node, from[pi - 3]);
340  if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
341  {
342  to_next[0] = from[pi - 4];
343  to_next++;
344  n_left_to_next--;
345  next0 = last_stage (vm, node, from[pi - 4]);
346  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
347  to_next, n_left_to_next,
348  from[pi - 4], next0);
349  n_left_from--;
350  if ((int) n_left_to_next < 0 && n_left_from > 0)
351  vlib_get_next_frame (vm, node, next_index, to_next,
352  n_left_to_next);
353  }
354  }
355 
356  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
357  from += pi_limit;
358  }
359  return frame->n_vectors;
360 }
361 #endif
362 
363 #if NSTAGES == 6
364 static STAGE_INLINE uword
365 dispatch_pipeline (vlib_main_t * vm,
366  vlib_node_runtime_t * node, vlib_frame_t * frame)
367 {
368  u32 *from = vlib_frame_vector_args (frame);
369  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
370  int pi, pi_limit;
371 
372  n_left_from = frame->n_vectors;
373  next_index = node->cached_next_index;
374 
375  while (n_left_from > 0)
376  {
377  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
378 
379  pi_limit = clib_min (n_left_from, n_left_to_next);
380 
381  for (pi = 0; pi < NSTAGES - 1; pi++)
382  {
383  if (pi == pi_limit)
384  break;
385  stage0 (vm, node, from[pi]);
386  if (pi - 1 >= 0)
387  stage1 (vm, node, from[pi - 1]);
388  if (pi - 2 >= 0)
389  stage2 (vm, node, from[pi - 2]);
390  if (pi - 3 >= 0)
391  stage3 (vm, node, from[pi - 3]);
392  if (pi - 4 >= 0)
393  stage4 (vm, node, from[pi - 4]);
394  }
395 
396  for (; pi < pi_limit; pi++)
397  {
398  stage0 (vm, node, from[pi]);
399  stage1 (vm, node, from[pi - 1]);
400  stage2 (vm, node, from[pi - 2]);
401  stage3 (vm, node, from[pi - 3]);
402  stage4 (vm, node, from[pi - 4]);
403  to_next[0] = from[pi - 5];
404  to_next++;
405  n_left_to_next--;
406  next0 = last_stage (vm, node, from[pi - 5]);
407  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
408  to_next, n_left_to_next,
409  from[pi - 5], next0);
410  n_left_from--;
411  if ((int) n_left_to_next < 0 && n_left_from > 0)
412  vlib_get_next_frame (vm, node, next_index, to_next,
413  n_left_to_next);
414  }
415 
416 
417  for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
418  {
419  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
420  stage1 (vm, node, from[pi - 1]);
421  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
422  stage2 (vm, node, from[pi - 2]);
423  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
424  stage3 (vm, node, from[pi - 3]);
425  if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
426  stage4 (vm, node, from[pi - 4]);
427  if (((pi - 5) >= 0) && ((pi - 5) < pi_limit))
428  {
429  to_next[0] = from[pi - 5];
430  to_next++;
431  n_left_to_next--;
432  next0 = last_stage (vm, node, from[pi - 5]);
433  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
434  to_next, n_left_to_next,
435  from[pi - 5], next0);
436  n_left_from--;
437  if ((int) n_left_to_next < 0 && n_left_from > 0)
438  vlib_get_next_frame (vm, node, next_index, to_next,
439  n_left_to_next);
440  }
441  }
442 
443  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
444  from += pi_limit;
445  }
446  return frame->n_vectors;
447 }
448 #endif
449 
450 /*
451  * fd.io coding-style-patch-verification: ON
452  *
453  * Local Variables:
454  * eval: (c-set-style "gnu")
455  * End:
456  */
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
#define clib_min(x, y)
Definition: clib.h:326
static u32 last_stage(vlib_main_t *vm, vlib_node_runtime_t *node, u32 bi)
Definition: decap.c:119
static void stage2(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
static void stage1(vlib_main_t *vm, vlib_node_runtime_t *node, u32 bi)
Definition: decap.c:68
#define NSTAGES
Definition: decap.c:56
static void stage3(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
static void stage4(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
u16 n_vectors
Definition: node.h:344
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
u16 cached_next_index
Definition: node.h:463
unsigned int u32
Definition: types.h:88
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
#define STAGE_INLINE
Definition: pipeline.h:41
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:170
u8 data[0]
Packet data.
Definition: buffer.h:158
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
static STAGE_INLINE void generic_stage0(vlib_main_t *vm, vlib_node_runtime_t *node, u32 buffer_index)
Definition: pipeline.h:56