FD.io VPP  v18.10-34-gcce845e
Vector Packet Processing
ip4_forward.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * ip/ip4_forward.h: IP v4 forwarding
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40 #ifndef __included_ip4_forward_h__
41 #define __included_ip4_forward_h__
42 
43 #include <vppinfra/cache.h>
44 #include <vnet/fib/ip4_fib.h>
46 
47 /**
48  * @file
49  * @brief IPv4 Forwarding.
50  *
51  * This file contains the source code for IPv4 forwarding.
52  */
53 
56  vlib_node_runtime_t * node,
57  vlib_frame_t * frame,
58  int lookup_for_responses_to_locally_received_packets)
59 {
60  ip4_main_t *im = &ip4_main;
62  u32 n_left_from, n_left_to_next, *from, *to_next;
63  ip_lookup_next_t next;
64  u32 thread_index = vm->thread_index;
65 
66  from = vlib_frame_vector_args (frame);
67  n_left_from = frame->n_vectors;
68  next = node->cached_next_index;
69 
70  while (n_left_from > 0)
71  {
72  vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
73 
74 #if (CLIB_N_PREFETCHES >= 8)
75  while (n_left_from >= 8 && n_left_to_next >= 4)
76  {
77  vlib_buffer_t *p0, *p1, *p2, *p3;
78  ip4_header_t *ip0, *ip1, *ip2, *ip3;
79  ip_lookup_next_t next0, next1, next2, next3;
80  const load_balance_t *lb0, *lb1, *lb2, *lb3;
81  ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
82  ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
83  ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
84  u32 pi0, pi1, pi2, pi3, lb_index0, lb_index1, lb_index2, lb_index3;
85  flow_hash_config_t flow_hash_config0, flow_hash_config1;
86  flow_hash_config_t flow_hash_config2, flow_hash_config3;
87  u32 hash_c0, hash_c1, hash_c2, hash_c3;
88  const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
89 
90  /* Prefetch next iteration. */
91  {
92  vlib_buffer_t *p4, *p5, *p6, *p7;
93 
94  p4 = vlib_get_buffer (vm, from[4]);
95  p5 = vlib_get_buffer (vm, from[5]);
96  p6 = vlib_get_buffer (vm, from[6]);
97  p7 = vlib_get_buffer (vm, from[7]);
98 
99  vlib_prefetch_buffer_header (p4, LOAD);
100  vlib_prefetch_buffer_header (p5, LOAD);
101  vlib_prefetch_buffer_header (p6, LOAD);
102  vlib_prefetch_buffer_header (p7, LOAD);
103 
104  CLIB_PREFETCH (p4->data, sizeof (ip0[0]), LOAD);
105  CLIB_PREFETCH (p5->data, sizeof (ip0[0]), LOAD);
106  CLIB_PREFETCH (p6->data, sizeof (ip0[0]), LOAD);
107  CLIB_PREFETCH (p7->data, sizeof (ip0[0]), LOAD);
108  }
109 
110  pi0 = to_next[0] = from[0];
111  pi1 = to_next[1] = from[1];
112  pi2 = to_next[2] = from[2];
113  pi3 = to_next[3] = from[3];
114 
115  from += 4;
116  to_next += 4;
117  n_left_to_next -= 4;
118  n_left_from -= 4;
119 
120  p0 = vlib_get_buffer (vm, pi0);
121  p1 = vlib_get_buffer (vm, pi1);
122  p2 = vlib_get_buffer (vm, pi2);
123  p3 = vlib_get_buffer (vm, pi3);
124 
125  ip0 = vlib_buffer_get_current (p0);
126  ip1 = vlib_buffer_get_current (p1);
127  ip2 = vlib_buffer_get_current (p2);
128  ip3 = vlib_buffer_get_current (p3);
129 
130  dst_addr0 = &ip0->dst_address;
131  dst_addr1 = &ip1->dst_address;
132  dst_addr2 = &ip2->dst_address;
133  dst_addr3 = &ip3->dst_address;
134 
139 
140  if (!lookup_for_responses_to_locally_received_packets)
141  {
142  mtrie0 = &ip4_fib_get (vnet_buffer (p0)->ip.fib_index)->mtrie;
143  mtrie1 = &ip4_fib_get (vnet_buffer (p1)->ip.fib_index)->mtrie;
144  mtrie2 = &ip4_fib_get (vnet_buffer (p2)->ip.fib_index)->mtrie;
145  mtrie3 = &ip4_fib_get (vnet_buffer (p3)->ip.fib_index)->mtrie;
146 
147  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
148  leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
149  leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, dst_addr2);
150  leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3);
151  }
152 
153  if (!lookup_for_responses_to_locally_received_packets)
154  {
155  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
156  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
157  leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 2);
158  leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 2);
159  }
160 
161  if (!lookup_for_responses_to_locally_received_packets)
162  {
163  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
164  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
165  leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 3);
166  leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 3);
167  }
168 
169  if (lookup_for_responses_to_locally_received_packets)
170  {
171  lb_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX];
172  lb_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_RX];
173  lb_index2 = vnet_buffer (p2)->ip.adj_index[VLIB_RX];
174  lb_index3 = vnet_buffer (p3)->ip.adj_index[VLIB_RX];
175  }
176  else
177  {
178  lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
179  lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
180  lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
181  lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
182  }
183 
184  ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
185  lb0 = load_balance_get (lb_index0);
186  lb1 = load_balance_get (lb_index1);
187  lb2 = load_balance_get (lb_index2);
188  lb3 = load_balance_get (lb_index3);
189 
190  ASSERT (lb0->lb_n_buckets > 0);
191  ASSERT (is_pow2 (lb0->lb_n_buckets));
192  ASSERT (lb1->lb_n_buckets > 0);
193  ASSERT (is_pow2 (lb1->lb_n_buckets));
194  ASSERT (lb2->lb_n_buckets > 0);
195  ASSERT (is_pow2 (lb2->lb_n_buckets));
196  ASSERT (lb3->lb_n_buckets > 0);
197  ASSERT (is_pow2 (lb3->lb_n_buckets));
198 
199  /* Use flow hash to compute multipath adjacency. */
200  hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
201  hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0;
202  hash_c2 = vnet_buffer (p2)->ip.flow_hash = 0;
203  hash_c3 = vnet_buffer (p3)->ip.flow_hash = 0;
204  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
205  {
206  flow_hash_config0 = lb0->lb_hash_config;
207  hash_c0 = vnet_buffer (p0)->ip.flow_hash =
208  ip4_compute_flow_hash (ip0, flow_hash_config0);
209  dpo0 =
211  (hash_c0 &
212  (lb0->lb_n_buckets_minus_1)));
213  }
214  else
215  {
216  dpo0 = load_balance_get_bucket_i (lb0, 0);
217  }
218  if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
219  {
220  flow_hash_config1 = lb1->lb_hash_config;
221  hash_c1 = vnet_buffer (p1)->ip.flow_hash =
222  ip4_compute_flow_hash (ip1, flow_hash_config1);
223  dpo1 =
225  (hash_c1 &
226  (lb1->lb_n_buckets_minus_1)));
227  }
228  else
229  {
230  dpo1 = load_balance_get_bucket_i (lb1, 0);
231  }
232  if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
233  {
234  flow_hash_config2 = lb2->lb_hash_config;
235  hash_c2 = vnet_buffer (p2)->ip.flow_hash =
236  ip4_compute_flow_hash (ip2, flow_hash_config2);
237  dpo2 =
239  (hash_c2 &
240  (lb2->lb_n_buckets_minus_1)));
241  }
242  else
243  {
244  dpo2 = load_balance_get_bucket_i (lb2, 0);
245  }
246  if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
247  {
248  flow_hash_config3 = lb3->lb_hash_config;
249  hash_c3 = vnet_buffer (p3)->ip.flow_hash =
250  ip4_compute_flow_hash (ip3, flow_hash_config3);
251  dpo3 =
253  (hash_c3 &
254  (lb3->lb_n_buckets_minus_1)));
255  }
256  else
257  {
258  dpo3 = load_balance_get_bucket_i (lb3, 0);
259  }
260 
261  next0 = dpo0->dpoi_next_node;
262  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
263  next1 = dpo1->dpoi_next_node;
264  vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
265  next2 = dpo2->dpoi_next_node;
266  vnet_buffer (p2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
267  next3 = dpo3->dpoi_next_node;
268  vnet_buffer (p3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
269 
271  (cm, thread_index, lb_index0, 1,
272  vlib_buffer_length_in_chain (vm, p0));
274  (cm, thread_index, lb_index1, 1,
275  vlib_buffer_length_in_chain (vm, p1));
277  (cm, thread_index, lb_index2, 1,
278  vlib_buffer_length_in_chain (vm, p2));
280  (cm, thread_index, lb_index3, 1,
281  vlib_buffer_length_in_chain (vm, p3));
282 
283  vlib_validate_buffer_enqueue_x4 (vm, node, next,
284  to_next, n_left_to_next,
285  pi0, pi1, pi2, pi3,
286  next0, next1, next2, next3);
287  }
288 #elif (CLIB_N_PREFETCHES >= 4)
289  while (n_left_from >= 4 && n_left_to_next >= 2)
290  {
291  vlib_buffer_t *p0, *p1;
292  ip4_header_t *ip0, *ip1;
293  ip_lookup_next_t next0, next1;
294  const load_balance_t *lb0, *lb1;
295  ip4_fib_mtrie_t *mtrie0, *mtrie1;
296  ip4_fib_mtrie_leaf_t leaf0, leaf1;
297  ip4_address_t *dst_addr0, *dst_addr1;
298  u32 pi0, pi1, lb_index0, lb_index1;
299  flow_hash_config_t flow_hash_config0, flow_hash_config1;
300  u32 hash_c0, hash_c1;
301  const dpo_id_t *dpo0, *dpo1;
302 
303  /* Prefetch next iteration. */
304  {
305  vlib_buffer_t *p2, *p3;
306 
307  p2 = vlib_get_buffer (vm, from[2]);
308  p3 = vlib_get_buffer (vm, from[3]);
309 
310  vlib_prefetch_buffer_header (p2, LOAD);
311  vlib_prefetch_buffer_header (p3, LOAD);
312 
313  CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
314  CLIB_PREFETCH (p3->data, sizeof (ip0[0]), LOAD);
315  }
316 
317  pi0 = to_next[0] = from[0];
318  pi1 = to_next[1] = from[1];
319 
320  from += 2;
321  to_next += 2;
322  n_left_to_next -= 2;
323  n_left_from -= 2;
324 
325  p0 = vlib_get_buffer (vm, pi0);
326  p1 = vlib_get_buffer (vm, pi1);
327 
328  ip0 = vlib_buffer_get_current (p0);
329  ip1 = vlib_buffer_get_current (p1);
330 
331  dst_addr0 = &ip0->dst_address;
332  dst_addr1 = &ip1->dst_address;
333 
336 
337  if (!lookup_for_responses_to_locally_received_packets)
338  {
339  mtrie0 = &ip4_fib_get (vnet_buffer (p0)->ip.fib_index)->mtrie;
340  mtrie1 = &ip4_fib_get (vnet_buffer (p1)->ip.fib_index)->mtrie;
341 
342  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
343  leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
344  }
345 
346  if (!lookup_for_responses_to_locally_received_packets)
347  {
348  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
349  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
350  }
351 
352  if (!lookup_for_responses_to_locally_received_packets)
353  {
354  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
355  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
356  }
357 
358  if (lookup_for_responses_to_locally_received_packets)
359  {
360  lb_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX];
361  lb_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_RX];
362  }
363  else
364  {
365  lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
366  lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
367  }
368 
369  ASSERT (lb_index0 && lb_index1);
370  lb0 = load_balance_get (lb_index0);
371  lb1 = load_balance_get (lb_index1);
372 
373  ASSERT (lb0->lb_n_buckets > 0);
374  ASSERT (is_pow2 (lb0->lb_n_buckets));
375  ASSERT (lb1->lb_n_buckets > 0);
376  ASSERT (is_pow2 (lb1->lb_n_buckets));
377 
378  /* Use flow hash to compute multipath adjacency. */
379  hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
380  hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0;
381  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
382  {
383  flow_hash_config0 = lb0->lb_hash_config;
384  hash_c0 = vnet_buffer (p0)->ip.flow_hash =
385  ip4_compute_flow_hash (ip0, flow_hash_config0);
386  dpo0 =
388  (hash_c0 &
389  (lb0->lb_n_buckets_minus_1)));
390  }
391  else
392  {
393  dpo0 = load_balance_get_bucket_i (lb0, 0);
394  }
395  if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
396  {
397  flow_hash_config1 = lb1->lb_hash_config;
398  hash_c1 = vnet_buffer (p1)->ip.flow_hash =
399  ip4_compute_flow_hash (ip1, flow_hash_config1);
400  dpo1 =
402  (hash_c1 &
403  (lb1->lb_n_buckets_minus_1)));
404  }
405  else
406  {
407  dpo1 = load_balance_get_bucket_i (lb1, 0);
408  }
409 
410  next0 = dpo0->dpoi_next_node;
411  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
412  next1 = dpo1->dpoi_next_node;
413  vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
414 
416  (cm, thread_index, lb_index0, 1,
417  vlib_buffer_length_in_chain (vm, p0));
419  (cm, thread_index, lb_index1, 1,
420  vlib_buffer_length_in_chain (vm, p1));
421 
422  vlib_validate_buffer_enqueue_x2 (vm, node, next,
423  to_next, n_left_to_next,
424  pi0, pi1, next0, next1);
425  }
426 #endif
427  while (n_left_from > 0 && n_left_to_next > 0)
428  {
429  vlib_buffer_t *p0;
430  ip4_header_t *ip0;
431  ip_lookup_next_t next0;
432  const load_balance_t *lb0;
433  ip4_fib_mtrie_t *mtrie0;
434  ip4_fib_mtrie_leaf_t leaf0;
435  ip4_address_t *dst_addr0;
436  u32 pi0, lbi0;
437  flow_hash_config_t flow_hash_config0;
438  const dpo_id_t *dpo0;
439  u32 hash_c0;
440 
441  pi0 = from[0];
442  to_next[0] = pi0;
443 
444  p0 = vlib_get_buffer (vm, pi0);
445  ip0 = vlib_buffer_get_current (p0);
446  dst_addr0 = &ip0->dst_address;
448 
449  if (!lookup_for_responses_to_locally_received_packets)
450  {
451  mtrie0 = &ip4_fib_get (vnet_buffer (p0)->ip.fib_index)->mtrie;
452  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
453  }
454 
455  if (!lookup_for_responses_to_locally_received_packets)
456  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
457 
458  if (!lookup_for_responses_to_locally_received_packets)
459  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
460 
461  if (lookup_for_responses_to_locally_received_packets)
462  lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX];
463  else
464  {
465  /* Handle default route. */
466  lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
467  }
468 
469  ASSERT (lbi0);
470  lb0 = load_balance_get (lbi0);
471 
472  ASSERT (lb0->lb_n_buckets > 0);
473  ASSERT (is_pow2 (lb0->lb_n_buckets));
474 
475  /* Use flow hash to compute multipath adjacency. */
476  hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
477  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
478  {
479  flow_hash_config0 = lb0->lb_hash_config;
480 
481  hash_c0 = vnet_buffer (p0)->ip.flow_hash =
482  ip4_compute_flow_hash (ip0, flow_hash_config0);
483  dpo0 =
485  (hash_c0 &
486  (lb0->lb_n_buckets_minus_1)));
487  }
488  else
489  {
490  dpo0 = load_balance_get_bucket_i (lb0, 0);
491  }
492 
493  next0 = dpo0->dpoi_next_node;
494  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
495 
496  vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
498  p0));
499 
500  from += 1;
501  to_next += 1;
502  n_left_to_next -= 1;
503  n_left_from -= 1;
504 
505  if (PREDICT_FALSE (next0 != next))
506  {
507  n_left_to_next += 1;
508  vlib_put_next_frame (vm, node, next, n_left_to_next);
509  next = next0;
510  vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
511  to_next[0] = pi0;
512  to_next += 1;
513  n_left_to_next -= 1;
514  }
515  }
516 
517  vlib_put_next_frame (vm, node, next, n_left_to_next);
518  }
519 
520  if (node->flags & VLIB_NODE_FLAG_TRACE)
521  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
522 
523  return frame->n_vectors;
524 }
525 
526 #endif /* __included_ip4_forward_h__ */
527 
528 /*
529  * fd.io coding-style-patch-verification: ON
530  *
531  * Local Variables:
532  * eval: (c-set-style "gnu")
533  * End:
534  */
u16 lb_n_buckets
number of buckets in the load-balance.
Definition: load_balance.h:94
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:204
The mutiway-TRIE.
Definition: ip4_mtrie.h:129
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step(const ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t current_leaf, const ip4_address_t *dst_address, u32 dst_address_byte_index)
Lookup step.
Definition: ip4_mtrie.h:202
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip4.h:296
flow_hash_config_t lb_hash_config
the hash config to use when selecting a bucket.
Definition: load_balance.h:134
static const dpo_id_t * load_balance_get_fwd_bucket(const load_balance_t *lb, u16 bucket)
u32 thread_index
Definition: main.h:179
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:138
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:112
static uword ip4_lookup_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int lookup_for_responses_to_locally_received_packets)
Definition: ip4_forward.h:55
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:263
ip_lookup_next_t
An adjacency is a representation of an attached L3 peer.
Definition: adj.h:50
#define always_inline
Definition: clib.h:94
u16 lb_n_buckets_minus_1
number of buckets in the load-balance - 1.
Definition: load_balance.h:99
ip4_address_t dst_address
Definition: ip4_packet.h:169
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:187
u32 ip4_fib_mtrie_leaf_t
Definition: ip4_mtrie.h:52
unsigned int u32
Definition: types.h:88
static u32 ip4_fib_mtrie_leaf_get_adj_index(ip4_fib_mtrie_leaf_t n)
From the stored slot value extract the LB index value.
Definition: ip4_mtrie.h:192
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:168
static void ip_lookup_set_buffer_fib_index(u32 *fib_index_by_sw_if_index, vlib_buffer_t *b)
Definition: lookup.h:216
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
Definition: load_balance.h:209
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:205
The FIB DPO provieds;.
Definition: load_balance.h:84
#define PREDICT_FALSE(x)
Definition: clib.h:107
load_balance_main_t load_balance_main
The one instance of load-balance main.
Definition: load_balance.c:56
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
ip4_fib_mtrie_t mtrie
Mtrie for fast lookups.
Definition: ip4_fib.h:48
u16 n_vectors
Definition: node.h:401
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:294
static ip4_fib_t * ip4_fib_get(u32 index)
Get the FIB at the given index.
Definition: ip4_fib.h:113
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:455
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
#define ASSERT(truth)
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step_one(const ip4_fib_mtrie_t *m, const ip4_address_t *dst_address)
Lookup step number 1.
Definition: ip4_mtrie.h:224
static load_balance_t * load_balance_get(index_t lbi)
Definition: load_balance.h:200
IPv4 main type.
Definition: ip4.h:96
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
Definition: lookup.h:82
static uword is_pow2(uword x)
Definition: clib.h:231
Definition: defs.h:47
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1038
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:172
#define vnet_buffer(b)
Definition: buffer.h:344
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:900
u8 data[0]
Packet data.
Definition: buffer.h:175
u16 flags
Copy of main node flags.
Definition: node.h:507
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:180
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:310
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
Definition: defs.h:46