FD.io VPP  v21.06-3-gbb25fbf28
Vector Packet Processing
ip4_forward.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * ip/ip4_forward.h: IP v4 forwarding
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40 #ifndef __included_ip4_forward_h__
41 #define __included_ip4_forward_h__
42 
43 #include <vppinfra/cache.h>
44 #include <vnet/fib/ip4_fib.h>
46 #include <vnet/ip/ip4_inlines.h>
47 
48 /**
49  * @file
50  * @brief IPv4 Forwarding.
51  *
52  * This file contains the source code for IPv4 forwarding.
53  */
54 
58 {
61  u32 n_left, *from;
64  vlib_buffer_t **b = bufs;
66 
68  n_left = frame->n_vectors;
69  next = nexts;
71 
72 #if (CLIB_N_PREFETCHES >= 8)
73  while (n_left >= 4)
74  {
75  ip4_header_t *ip0, *ip1, *ip2, *ip3;
76  const load_balance_t *lb0, *lb1, *lb2, *lb3;
77  ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
78  ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
79  ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
80  u32 lb_index0, lb_index1, lb_index2, lb_index3;
81  flow_hash_config_t flow_hash_config0, flow_hash_config1;
82  flow_hash_config_t flow_hash_config2, flow_hash_config3;
83  u32 hash_c0, hash_c1, hash_c2, hash_c3;
84  const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
85 
86  /* Prefetch next iteration. */
87  if (n_left >= 8)
88  {
89  vlib_prefetch_buffer_header (b[4], LOAD);
90  vlib_prefetch_buffer_header (b[5], LOAD);
91  vlib_prefetch_buffer_header (b[6], LOAD);
92  vlib_prefetch_buffer_header (b[7], LOAD);
93 
94  CLIB_PREFETCH (b[4]->data, sizeof (ip0[0]), LOAD);
95  CLIB_PREFETCH (b[5]->data, sizeof (ip0[0]), LOAD);
96  CLIB_PREFETCH (b[6]->data, sizeof (ip0[0]), LOAD);
97  CLIB_PREFETCH (b[7]->data, sizeof (ip0[0]), LOAD);
98  }
99 
100  ip0 = vlib_buffer_get_current (b[0]);
101  ip1 = vlib_buffer_get_current (b[1]);
102  ip2 = vlib_buffer_get_current (b[2]);
103  ip3 = vlib_buffer_get_current (b[3]);
104 
105  dst_addr0 = &ip0->dst_address;
106  dst_addr1 = &ip1->dst_address;
107  dst_addr2 = &ip2->dst_address;
108  dst_addr3 = &ip3->dst_address;
109 
110  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
111  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
112  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[2]);
113  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[3]);
114 
115  mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
116  mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
117  mtrie2 = &ip4_fib_get (vnet_buffer (b[2])->ip.fib_index)->mtrie;
118  mtrie3 = &ip4_fib_get (vnet_buffer (b[3])->ip.fib_index)->mtrie;
119 
120  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
121  leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
122  leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, dst_addr2);
123  leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3);
124 
125  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
126  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
127  leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 2);
128  leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 2);
129 
130  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
131  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
132  leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 3);
133  leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 3);
134 
135  lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
136  lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
137  lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
138  lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
139 
140  ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
141  lb0 = load_balance_get (lb_index0);
142  lb1 = load_balance_get (lb_index1);
143  lb2 = load_balance_get (lb_index2);
144  lb3 = load_balance_get (lb_index3);
145 
146  ASSERT (lb0->lb_n_buckets > 0);
147  ASSERT (is_pow2 (lb0->lb_n_buckets));
148  ASSERT (lb1->lb_n_buckets > 0);
149  ASSERT (is_pow2 (lb1->lb_n_buckets));
150  ASSERT (lb2->lb_n_buckets > 0);
151  ASSERT (is_pow2 (lb2->lb_n_buckets));
152  ASSERT (lb3->lb_n_buckets > 0);
153  ASSERT (is_pow2 (lb3->lb_n_buckets));
154 
155  /* Use flow hash to compute multipath adjacency. */
156  hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
157  hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
158  hash_c2 = vnet_buffer (b[2])->ip.flow_hash = 0;
159  hash_c3 = vnet_buffer (b[3])->ip.flow_hash = 0;
160  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
161  {
162  flow_hash_config0 = lb0->lb_hash_config;
163  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
164  ip4_compute_flow_hash (ip0, flow_hash_config0);
165  dpo0 =
167  (hash_c0 &
168  (lb0->lb_n_buckets_minus_1)));
169  }
170  else
171  {
172  dpo0 = load_balance_get_bucket_i (lb0, 0);
173  }
174  if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
175  {
176  flow_hash_config1 = lb1->lb_hash_config;
177  hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
178  ip4_compute_flow_hash (ip1, flow_hash_config1);
179  dpo1 =
181  (hash_c1 &
182  (lb1->lb_n_buckets_minus_1)));
183  }
184  else
185  {
186  dpo1 = load_balance_get_bucket_i (lb1, 0);
187  }
188  if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
189  {
190  flow_hash_config2 = lb2->lb_hash_config;
191  hash_c2 = vnet_buffer (b[2])->ip.flow_hash =
192  ip4_compute_flow_hash (ip2, flow_hash_config2);
193  dpo2 =
195  (hash_c2 &
196  (lb2->lb_n_buckets_minus_1)));
197  }
198  else
199  {
200  dpo2 = load_balance_get_bucket_i (lb2, 0);
201  }
202  if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
203  {
204  flow_hash_config3 = lb3->lb_hash_config;
205  hash_c3 = vnet_buffer (b[3])->ip.flow_hash =
206  ip4_compute_flow_hash (ip3, flow_hash_config3);
207  dpo3 =
209  (hash_c3 &
210  (lb3->lb_n_buckets_minus_1)));
211  }
212  else
213  {
214  dpo3 = load_balance_get_bucket_i (lb3, 0);
215  }
216 
217  next[0] = dpo0->dpoi_next_node;
218  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
219  next[1] = dpo1->dpoi_next_node;
220  vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
221  next[2] = dpo2->dpoi_next_node;
222  vnet_buffer (b[2])->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
223  next[3] = dpo3->dpoi_next_node;
224  vnet_buffer (b[3])->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
225 
227  (cm, thread_index, lb_index0, 1,
230  (cm, thread_index, lb_index1, 1,
233  (cm, thread_index, lb_index2, 1,
236  (cm, thread_index, lb_index3, 1,
238 
239  b += 4;
240  next += 4;
241  n_left -= 4;
242  }
243 #elif (CLIB_N_PREFETCHES >= 4)
244  while (n_left >= 4)
245  {
246  ip4_header_t *ip0, *ip1;
247  const load_balance_t *lb0, *lb1;
248  ip4_fib_mtrie_t *mtrie0, *mtrie1;
249  ip4_fib_mtrie_leaf_t leaf0, leaf1;
250  ip4_address_t *dst_addr0, *dst_addr1;
251  u32 lb_index0, lb_index1;
252  flow_hash_config_t flow_hash_config0, flow_hash_config1;
253  u32 hash_c0, hash_c1;
254  const dpo_id_t *dpo0, *dpo1;
255 
256  /* Prefetch next iteration. */
257  {
258  vlib_prefetch_buffer_header (b[2], LOAD);
259  vlib_prefetch_buffer_header (b[3], LOAD);
260 
261  CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
262  CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
263  }
264 
265  ip0 = vlib_buffer_get_current (b[0]);
266  ip1 = vlib_buffer_get_current (b[1]);
267 
268  dst_addr0 = &ip0->dst_address;
269  dst_addr1 = &ip1->dst_address;
270 
271  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
272  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
273 
274  mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
275  mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
276 
277  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
278  leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
279 
280  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
281  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
282 
283  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
284  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
285 
286  lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
287  lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
288 
289  ASSERT (lb_index0 && lb_index1);
290  lb0 = load_balance_get (lb_index0);
291  lb1 = load_balance_get (lb_index1);
292 
293  ASSERT (lb0->lb_n_buckets > 0);
294  ASSERT (is_pow2 (lb0->lb_n_buckets));
295  ASSERT (lb1->lb_n_buckets > 0);
296  ASSERT (is_pow2 (lb1->lb_n_buckets));
297 
298  /* Use flow hash to compute multipath adjacency. */
299  hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
300  hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
301  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
302  {
303  flow_hash_config0 = lb0->lb_hash_config;
304  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
305  ip4_compute_flow_hash (ip0, flow_hash_config0);
306  dpo0 =
308  (hash_c0 &
309  (lb0->lb_n_buckets_minus_1)));
310  }
311  else
312  {
313  dpo0 = load_balance_get_bucket_i (lb0, 0);
314  }
315  if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
316  {
317  flow_hash_config1 = lb1->lb_hash_config;
318  hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
319  ip4_compute_flow_hash (ip1, flow_hash_config1);
320  dpo1 =
322  (hash_c1 &
323  (lb1->lb_n_buckets_minus_1)));
324  }
325  else
326  {
327  dpo1 = load_balance_get_bucket_i (lb1, 0);
328  }
329 
330  next[0] = dpo0->dpoi_next_node;
331  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
332  next[1] = dpo1->dpoi_next_node;
333  vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
334 
336  (cm, thread_index, lb_index0, 1,
339  (cm, thread_index, lb_index1, 1,
341 
342  b += 2;
343  next += 2;
344  n_left -= 2;
345  }
346 #endif
347  while (n_left > 0)
348  {
349  ip4_header_t *ip0;
350  const load_balance_t *lb0;
351  ip4_fib_mtrie_t *mtrie0;
352  ip4_fib_mtrie_leaf_t leaf0;
353  ip4_address_t *dst_addr0;
354  u32 lbi0;
355  flow_hash_config_t flow_hash_config0;
356  const dpo_id_t *dpo0;
357  u32 hash_c0;
358 
359  ip0 = vlib_buffer_get_current (b[0]);
360  dst_addr0 = &ip0->dst_address;
361  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
362 
363  mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
364  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
365  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
366  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
367  lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
368 
369  ASSERT (lbi0);
370  lb0 = load_balance_get (lbi0);
371 
372  ASSERT (lb0->lb_n_buckets > 0);
373  ASSERT (is_pow2 (lb0->lb_n_buckets));
374 
375  /* Use flow hash to compute multipath adjacency. */
376  hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
377  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
378  {
379  flow_hash_config0 = lb0->lb_hash_config;
380 
381  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
382  ip4_compute_flow_hash (ip0, flow_hash_config0);
383  dpo0 =
385  (hash_c0 &
386  (lb0->lb_n_buckets_minus_1)));
387  }
388  else
389  {
390  dpo0 = load_balance_get_bucket_i (lb0, 0);
391  }
392 
393  next[0] = dpo0->dpoi_next_node;
394  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
395 
398  b[0]));
399 
400  b += 1;
401  next += 1;
402  n_left -= 1;
403  }
404 
406 
407  if (node->flags & VLIB_NODE_FLAG_TRACE)
409 
410  return frame->n_vectors;
411 }
412 
413 #endif /* __included_ip4_forward_h__ */
414 
415 /*
416  * fd.io coding-style-patch-verification: ON
417  *
418  * Local Variables:
419  * eval: (c-set-style "gnu")
420  * End:
421  */
dpo_id_t_::dpoi_next_node
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:186
im
vnet_interface_main_t * im
Definition: interface_output.c:395
load_balance_t_::lb_n_buckets
u16 lb_n_buckets
number of buckets in the load-balance.
Definition: load_balance.h:116
dpo_id_t_::dpoi_index
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:190
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:492
bufs
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:717
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
ip4_fib_mtrie_leaf_get_adj_index
static u32 ip4_fib_mtrie_leaf_get_adj_index(ip4_fib_mtrie_leaf_t n)
From the stored slot value extract the LB index value.
Definition: ip4_mtrie.h:192
ip4_forward_next_trace
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1240
ip4_main
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1105
load_balance_map.h
ip4_fib_mtrie_lookup_step_one
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step_one(const ip4_fib_mtrie_t *m, const ip4_address_t *dst_address)
Lookup step number 1.
Definition: ip4_mtrie.h:224
ip4_inlines.h
vlib_get_buffers
vlib_get_buffers(vm, from, b, n_left_from)
next
u16 * next
Definition: nat44_ei_out2in.c:718
VLIB_FRAME_SIZE
#define VLIB_FRAME_SIZE
Definition: node.h:368
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
load_balance_t_::lb_hash_config
flow_hash_config_t lb_hash_config
the hash config to use when selecting a bucket.
Definition: load_balance.h:161
ip4_fib_mtrie_t
The mutiway-TRIE.
Definition: ip4_mtrie.h:129
ip4_fib_mtrie_lookup_step
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step(const ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t current_leaf, const ip4_address_t *dst_address, u32 dst_address_byte_index)
Lookup step.
Definition: ip4_mtrie.h:202
u16
unsigned short u16
Definition: types.h:57
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
vlib_buffer_enqueue_to_next
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
vlib_frame_t
Definition: node.h:372
vlib_buffer_length_in_chain
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
ip4_header_t
Definition: ip4_packet.h:87
CLIB_PREFETCH
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
load_balance_main
load_balance_main_t load_balance_main
The one instance of load-balance main.
Definition: load_balance.c:59
load_balance_get_fwd_bucket
static const dpo_id_t * load_balance_get_fwd_bucket(const load_balance_t *lb, u16 bucket)
Definition: load_balance_map.h:94
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:437
VLIB_NODE_FLAG_TRACE
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:291
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
ip4_lookup_inline
static uword ip4_lookup_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_forward.h:56
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
flow_hash_config_t
enum flow_hash_config_t_ flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
uword
u64 uword
Definition: types.h:112
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:213
load_balance_get_bucket_i
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
Definition: load_balance.h:229
ip4_fib_get
static ip4_fib_t * ip4_fib_get(u32 index)
Get the FIB at the given index.
Definition: ip4_fib.h:113
cm
vnet_feature_config_main_t * cm
Definition: nat44_ei_hairpinning.c:591
ip4_address_t
Definition: ip4_packet.h:50
ip4_header_t::dst_address
ip4_address_t dst_address
Definition: ip4_packet.h:125
data
u8 data[128]
Definition: ipsec_types.api:92
ip4_fib_mtrie_leaf_t
u32 ip4_fib_mtrie_leaf_t
Definition: ip4_mtrie.h:52
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
load_balance_main_t_::lbm_to_counters
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
cache.h
vlib_combined_counter_main_t
A collection of combined counters.
Definition: counter.h:203
u32
unsigned int u32
Definition: types.h:88
load_balance_get
static load_balance_t * load_balance_get(index_t lbi)
Definition: load_balance.h:220
n_left
u32 n_left
Definition: interface_output.c:1078
ip4_compute_flow_hash
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip4_inlines.h:51
ip4_fib.h
ip_lookup_set_buffer_fib_index
static void ip_lookup_set_buffer_fib_index(u32 *fib_index_by_sw_if_index, vlib_buffer_t *b)
Definition: lookup.h:168
vlib_main_t
Definition: main.h:102
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
ip
vl_api_address_t ip
Definition: l2.api:558
dpo_id_t_
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:172
nexts
u16 nexts[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:718
load_balance_t_::lb_n_buckets_minus_1
u16 lb_n_buckets_minus_1
number of buckets in the load-balance - 1.
Definition: load_balance.h:121
ip4_fib_t_::mtrie
ip4_fib_mtrie_t mtrie
Mtrie for fast lookups.
Definition: ip4_fib.h:48
load_balance_t_
The FIB DPO provieds;.
Definition: load_balance.h:106
vlib_node_runtime_t
Definition: node.h:454
from
from
Definition: nat44_ei_hairpinning.c:415
is_pow2
static uword is_pow2(uword x)
Definition: clib.h:267
ip4_main_t
IPv4 main type.
Definition: ip4.h:107
VLIB_TX
@ VLIB_TX
Definition: defs.h:47
vlib_increment_combined_counter
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111