FD.io VPP  v16.09
Vector Packet Processing
mcast.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/mcast/mcast.h>
16 
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vnet/pg/pg.h>
20 #include <vppinfra/error.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/icmp46_packet.h>
23 #include <vnet/ip/ip4.h>
24 
25 typedef struct {
30 
31 /* packet trace format function */
32 static u8 * format_mcast_prep_trace (u8 * s, va_list * args)
33 {
34  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36  mcast_prep_trace_t * t = va_arg (*args, mcast_prep_trace_t *);
37 
38  s = format (s, "MCAST_PREP: group %d, next index %d, tx_sw_if_index %d",
40  return s;
41 }
42 
46 
47 #define foreach_mcast_prep_error \
48 _(MCASTS, "Multicast Packets")
49 
50 typedef enum {
51 #define _(sym,str) MCAST_PREP_ERROR_##sym,
53 #undef _
56 
57 static char * mcast_prep_error_strings[] = {
58 #define _(sym,string) string,
60 #undef _
61 };
62 
63 typedef enum {
67 
68 static uword
70  vlib_node_runtime_t * node,
71  vlib_frame_t * frame)
72 {
73  u32 n_left_from, * from, * to_next;
74  mcast_prep_next_t next_index;
75  mcast_main_t * mcm = &mcast_main;
76  vlib_node_t *n = vlib_get_node (vm, mcast_prep_node.index);
77  u32 node_counter_base_index = n->error_heap_index;
78  vlib_error_main_t * em = &vm->error_main;
79  ip4_main_t * im = &ip4_main;
80  ip_lookup_main_t * lm = &im->lookup_main;
81 
82  from = vlib_frame_vector_args (frame);
83  n_left_from = frame->n_vectors;
84  next_index = node->cached_next_index;
85 
86  while (n_left_from > 0)
87  {
88  u32 n_left_to_next;
89 
90  vlib_get_next_frame (vm, node, next_index,
91  to_next, n_left_to_next);
92 
93  while (0 && n_left_from >= 4 && n_left_to_next >= 2)
94  {
95  u32 bi0, bi1;
96  vlib_buffer_t * b0, * b1;
97  u32 next0, next1;
98  u32 sw_if_index0, sw_if_index1;
99 
100  /* Prefetch next iteration. */
101  {
102  vlib_buffer_t * p2, * p3;
103 
104  p2 = vlib_get_buffer (vm, from[2]);
105  p3 = vlib_get_buffer (vm, from[3]);
106 
107  vlib_prefetch_buffer_header (p2, LOAD);
108  vlib_prefetch_buffer_header (p3, LOAD);
109 
112  }
113 
114  /* speculatively enqueue b0 and b1 to the current next frame */
115  to_next[0] = bi0 = from[0];
116  to_next[1] = bi1 = from[1];
117  from += 2;
118  to_next += 2;
119  n_left_from -= 2;
120  n_left_to_next -= 2;
121 
122  b0 = vlib_get_buffer (vm, bi0);
123  b1 = vlib_get_buffer (vm, bi1);
124 
125  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
126  next0 = 0;
127  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
128  next1 = 0;
129 
130  /* $$$$ your message in this space. Process 2 x pkts */
131 
133  {
134  if (b0->flags & VLIB_BUFFER_IS_TRACED)
135  {
136  mcast_prep_trace_t *t =
137  vlib_add_trace (vm, node, b0, sizeof (*t));
138  t->sw_if_index = sw_if_index0;
139  t->next_index = next0;
140  }
141  if (b1->flags & VLIB_BUFFER_IS_TRACED)
142  {
143  mcast_prep_trace_t *t =
144  vlib_add_trace (vm, node, b1, sizeof (*t));
145  t->sw_if_index = sw_if_index1;
146  t->next_index = next1;
147  }
148  }
149 
150  /* verify speculative enqueues, maybe switch current next frame */
151  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
152  to_next, n_left_to_next,
153  bi0, bi1, next0, next1);
154  }
155 
156  while (n_left_from > 0 && n_left_to_next > 0)
157  {
158  u32 bi0;
159  vlib_buffer_t * b0;
160  u32 next0, adj_index0;
161  mcast_group_t * g0;
162  ip_adjacency_t * adj0;
163 
164  /* speculatively enqueue b0 to the current next frame */
165  bi0 = from[0];
166  to_next[0] = bi0;
167  from += 1;
168  to_next += 1;
169  n_left_from -= 1;
170  n_left_to_next -= 1;
171 
172  b0 = vlib_get_buffer (vm, bi0);
173 
174  adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
175  adj0 = ip_get_adjacency (lm, adj_index0);
176  vnet_buffer(b0)->mcast.mcast_group_index = adj0->mcast_group_index;
177  g0 = pool_elt_at_index (mcm->groups, adj0->mcast_group_index);
178 
179  /*
180  * Handle the degenerate single-copy case
181  * If we don't change the freelist, the packet will never
182  * make it to the recycle node...
183  */
184  if (PREDICT_TRUE(vec_len (g0->members) > 1))
185  {
186  /* Save the original free list index */
187  vnet_buffer(b0)->mcast.original_free_list_index =
188  b0->free_list_index;
189 
190  /* Swap in the multicast recycle list */
192 
193  /*
194  * Make sure that intermediate "frees" don't screw up
195  */
196  b0->recycle_count = vec_len (g0->members);
197  b0->flags |= VLIB_BUFFER_RECYCLE;
198 
199  /* Set up for the recycle node */
200  vnet_buffer(b0)->mcast.mcast_current_index = 1;
201  }
202 
203  /* Transmit the pkt on the first interface */
205  vnet_buffer(b0)->sw_if_index[VLIB_TX] =
206  g0->members[0].tx_sw_if_index;
207 
209  && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
210  mcast_prep_trace_t *t =
211  vlib_add_trace (vm, node, b0, sizeof (*t));
212  t->next_index = next0;
213  t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
214  t->group_index = vnet_buffer(b0)->mcast.mcast_group_index;
215  }
216 
217  /* verify speculative enqueue, maybe switch current next frame */
218  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
219  to_next, n_left_to_next,
220  bi0, next0);
221  }
222 
223  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
224  }
225 
226  em->counters[node_counter_base_index + MCAST_PREP_ERROR_MCASTS] +=
227  frame->n_vectors;
228 
229  return frame->n_vectors;
230 }
231 
233  .function = mcast_prep_node_fn,
234  .name = "mcast_prep",
235  .vector_size = sizeof (u32),
236  .format_trace = format_mcast_prep_trace,
238 
239  .n_errors = ARRAY_LEN(mcast_prep_error_strings),
240  .error_strings = mcast_prep_error_strings,
241 
242  .n_next_nodes = MCAST_PREP_N_NEXT,
243 
244  /* edit / add dispositions here */
245  .next_nodes = {
246  [MCAST_PREP_NEXT_DROP] = "error-drop",
247  },
248 };
249 
250 typedef struct {
256 
257 static u8 * format_mcast_recycle_trace (u8 * s, va_list * args)
258 {
259  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
260  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
261  mcast_recycle_trace_t * t = va_arg (*args, mcast_recycle_trace_t *);
262 
263  s = format (s,
264 "MCAST_R: group %d, current member %d next (node) index %d, tx_sw_if_index %d",
266  return s;
267 }
268 
269 #define foreach_mcast_recycle_error \
270 _(RECYCLES, "Multicast Recycles")
271 
272 typedef enum {
273 #define _(sym,str) MCAST_RECYCLE_ERROR_##sym,
275 #undef _
278 
279 static char * mcast_recycle_error_strings[] = {
280 #define _(sym,string) string,
282 #undef _
283 };
284 
285 typedef enum {
289 
290 static uword
292  vlib_node_runtime_t * node,
293  vlib_frame_t * frame)
294 {
295  u32 n_left_from, * from, * to_next;
296  mcast_recycle_next_t next_index;
297  mcast_main_t * mcm = &mcast_main;
299  u32 node_counter_base_index = n->error_heap_index;
300  vlib_error_main_t * em = &vm->error_main;
301 
302  from = vlib_frame_vector_args (frame);
303  n_left_from = frame->n_vectors;
304  next_index = node->cached_next_index;
305 
306  while (n_left_from > 0)
307  {
308  u32 n_left_to_next;
309 
310  vlib_get_next_frame (vm, node, next_index,
311  to_next, n_left_to_next);
312 
313  while (0 && n_left_from >= 4 && n_left_to_next >= 2)
314  {
315  u32 bi0, bi1;
316  vlib_buffer_t * b0, * b1;
317  u32 next0, next1;
318  u32 sw_if_index0, sw_if_index1;
319 
320  /* Prefetch next iteration. */
321  {
322  vlib_buffer_t * p2, * p3;
323 
324  p2 = vlib_get_buffer (vm, from[2]);
325  p3 = vlib_get_buffer (vm, from[3]);
326 
327  vlib_prefetch_buffer_header (p2, LOAD);
328  vlib_prefetch_buffer_header (p3, LOAD);
329 
332  }
333 
334  /* speculatively enqueue b0 and b1 to the current next frame */
335  to_next[0] = bi0 = from[0];
336  to_next[1] = bi1 = from[1];
337  from += 2;
338  to_next += 2;
339  n_left_from -= 2;
340  n_left_to_next -= 2;
341 
342  b0 = vlib_get_buffer (vm, bi0);
343  b1 = vlib_get_buffer (vm, bi1);
344 
345  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
346  next0 = 0;
347  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
348  next1 = 0;
349 
350  /* $$$$ your message in this space. Process 2 x pkts */
351 
353  {
354  if (b0->flags & VLIB_BUFFER_IS_TRACED)
355  {
357  vlib_add_trace (vm, node, b0, sizeof (*t));
358  t->sw_if_index = sw_if_index0;
359  t->next_index = next0;
360  }
361  if (b1->flags & VLIB_BUFFER_IS_TRACED)
362  {
364  vlib_add_trace (vm, node, b1, sizeof (*t));
365  t->sw_if_index = sw_if_index1;
366  t->next_index = next1;
367  }
368  }
369 
370  /* verify speculative enqueues, maybe switch current next frame */
371  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
372  to_next, n_left_to_next,
373  bi0, bi1, next0, next1);
374  }
375 
376  while (n_left_from > 0 && n_left_to_next > 0)
377  {
378  u32 bi0;
379  vlib_buffer_t * b0;
380  u32 next0;
381  u32 current_member0;
382  mcast_group_t * g0;
383 
384  /* speculatively enqueue b0 to the current next frame */
385  bi0 = from[0];
386  to_next[0] = bi0;
387  from += 1;
388  to_next += 1;
389  n_left_from -= 1;
390  n_left_to_next -= 1;
391 
392  b0 = vlib_get_buffer (vm, bi0);
393 
394  g0 = pool_elt_at_index (mcm->groups,
395  vnet_buffer(b0)->mcast.mcast_group_index);
396 
397  /* No more replicas? */
398  if (b0->recycle_count == 1)
399  {
400  /* Restore the original free list index */
401  b0->free_list_index =
402  vnet_buffer(b0)->mcast.original_free_list_index;
403  b0->flags &= ~(VLIB_BUFFER_RECYCLE);
404  }
405  current_member0 = vnet_buffer(b0)->mcast.mcast_current_index;
406 
407  next0 =
408  g0->members[current_member0].prep_and_recycle_node_next_index;
409  vnet_buffer(b0)->sw_if_index[VLIB_TX] =
410  g0->members[current_member0].tx_sw_if_index;
411 
412  vnet_buffer(b0)->mcast.mcast_current_index =
413  current_member0 + 1;
414 
416  && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
418  vlib_add_trace (vm, node, b0, sizeof (*t));
419  t->next_index = next0;
420  t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
421  t->group_index = vnet_buffer(b0)->mcast.mcast_group_index;
422  t->current_member = current_member0;
423  }
424 
425  /* verify speculative enqueue, maybe switch current next frame */
426  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
427  to_next, n_left_to_next,
428  bi0, next0);
429  }
430  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
431  }
432 
433  em->counters[node_counter_base_index + MCAST_RECYCLE_ERROR_RECYCLES] +=
434  frame->n_vectors;
435 
436  return frame->n_vectors;
437 }
438 
440  .function = mcast_recycle_node_fn,
441  .name = "mcast-recycle",
442  .vector_size = sizeof (u32),
443  .format_trace = format_mcast_recycle_trace,
445 
447  .error_strings = mcast_recycle_error_strings,
448 
449  .n_next_nodes = MCAST_RECYCLE_N_NEXT,
450 
451  /* edit / add dispositions here */
452  .next_nodes = {
453  [MCAST_RECYCLE_NEXT_DROP] = "error-drop",
454  },
455 };
456 
457 /*
458  * fish pkts back from the recycle queue/freelist
459  * un-flatten the context chains
460  */
463 {
464  vlib_frame_t * f = 0;
465  u32 n_left_from;
466  u32 n_left_to_next = 0;
467  u32 n_this_frame = 0;
468  u32 * from;
469  u32 * to_next;
470  u32 bi0, pi0;
471  vlib_buffer_t *b0;
472  vlib_buffer_t *bnext0;
473  int i;
474 
475  /* aligned, unaligned buffers */
476  for (i = 0; i < 2; i++)
477  {
478  if (i == 0)
479  {
480  from = fl->aligned_buffers;
481  n_left_from = vec_len (from);
482  }
483  else
484  {
485  from = fl->unaligned_buffers;
486  n_left_from = vec_len (from);
487  }
488 
489  while (n_left_from > 0)
490  {
491  if (PREDICT_FALSE(n_left_to_next == 0))
492  {
493  if (f)
494  {
495  f->n_vectors = n_this_frame;
497  }
498 
500  to_next = vlib_frame_vector_args (f);
501  n_left_to_next = VLIB_FRAME_SIZE;
502  n_this_frame = 0;
503  }
504 
505  bi0 = from[0];
506  if (PREDICT_TRUE(n_left_from > 1))
507  {
508  pi0 = from[1];
509  vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
510  }
511 
512  bnext0 = b0 = vlib_get_buffer (vm, bi0);
513 
514  while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
515  {
516  from += 1;
517  n_left_from -= 1;
518  bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
519  }
520  to_next[0] = bi0;
521 
522  if (CLIB_DEBUG > 0)
523  vlib_buffer_set_known_state (vm, bi0, VLIB_BUFFER_KNOWN_ALLOCATED);
524 
525  from++;
526  to_next++;
527  n_this_frame++;
528  n_left_to_next--;
529  n_left_from--;
530  }
531  }
532 
535 
536  if (f)
537  {
538  ASSERT(n_this_frame);
539  f->n_vectors = n_this_frame;
541  }
542 }
543 
545 {
546  mcast_main_t * mcm = &mcast_main;
547  vlib_buffer_main_t * bm = vm->buffer_main;
549 
550  mcm->vlib_main = vm;
551  mcm->vnet_main = vnet_get_main();
553  vlib_buffer_create_free_list (vm, 1024 /* fictional */, "mcast-recycle");
554 
557 
559 
560  return 0;
561 }
562 
564 
565 
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:457
mcast_group_t * groups
Definition: mcast.h:38
u32 error_heap_index
Definition: node.h:278
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
#define CLIB_UNUSED(x)
Definition: clib.h:79
vlib_node_registration_t mcast_recycle_node
(constructor) VLIB_REGISTER_NODE (mcast_recycle_node)
Definition: mcast.c:45
mcast_prep_next_t
Definition: mcast.c:63
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
u32 free_list_index
Buffer free list that this buffer was allocated from and will be freed to.
Definition: buffer.h:105
#define PREDICT_TRUE(x)
Definition: clib.h:98
IP unicast adjacency.
Definition: lookup.h:164
u32 mcast_recycle_list_index
Definition: mcast.h:41
struct _vlib_node_registration vlib_node_registration_t
vlib_buffer_main_t * buffer_main
Definition: main.h:104
u32 recycle_count
Used by L2 path recycle code.
Definition: buffer.h:138
ip_lookup_main_t lookup_main
Definition: ip4.h:115
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
mcast_group_member_t * members
Definition: mcast.h:33
#define foreach_mcast_prep_error
Definition: mcast.c:47
vnet_main_t * vnet_get_main(void)
Definition: misc.c:45
#define foreach_mcast_recycle_error
Definition: mcast.c:269
static u8 * format_mcast_prep_trace(u8 *s, va_list *args)
Definition: mcast.c:32
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:182
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
mcast_recycle_next_t
Definition: mcast.c:285
u16 mcast_group_index
Definition: lookup.h:186
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:95
static void mcast_recycle_callback(vlib_main_t *vm, vlib_buffer_free_list_t *fl)
Definition: mcast.c:461
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:369
vlib_error_main_t error_main
Definition: main.h:124
#define PREDICT_FALSE(x)
Definition: clib.h:97
#define VLIB_FRAME_SIZE
Definition: node.h:328
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:194
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:130
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:348
mcast_prep_error_t
Definition: mcast.c:50
mcast_main_t mcast_main
Definition: mcast.c:43
u64 * counters
Definition: error.h:78
u16 n_vectors
Definition: node.h:344
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
u32 prep_and_recycle_node_next_index
Definition: mcast.h:25
static uword mcast_prep_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: mcast.c:69
static char * mcast_recycle_error_strings[]
Definition: mcast.c:279
#define VLIB_BUFFER_RECYCLE
Definition: buffer.h:99
void(* buffers_added_to_freelist_function)(struct vlib_main_t *vm, struct vlib_buffer_free_list_t *fl)
Definition: buffer.h:287
#define ARRAY_LEN(x)
Definition: clib.h:59
clib_error_t * mcast_init(vlib_main_t *vm)
Definition: mcast.c:544
static char * mcast_prep_error_strings[]
Definition: mcast.c:57
u16 cached_next_index
Definition: node.h:462
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:335
u8 * format(u8 *s, char *fmt,...)
Definition: format.c:418
IPv4 main type.
Definition: ip4.h:114
static uword mcast_recycle_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: mcast.c:291
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:114
static u8 * format_mcast_recycle_trace(u8 *s, va_list *args)
Definition: mcast.c:257
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:259
vnet_main_t * vnet_main
Definition: mcast.h:45
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:93
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
Definition: defs.h:47
vlib_node_registration_t mcast_prep_node
(constructor) VLIB_REGISTER_NODE (mcast_prep_node)
Definition: mcast.c:44
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
vlib_buffer_free_list_t * buffer_free_list_pool
Definition: buffer.h:302
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:251
vlib_main_t * vlib_main
Definition: mcast.h:44
mcast_recycle_error_t
Definition: mcast.c:272
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:163
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1578
u8 data[0]
Packet data.
Definition: buffer.h:151
static vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Get vlib node by index.
Definition: node_funcs.h:58
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:185
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:85
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
u32 vlib_buffer_create_free_list(vlib_main_t *vm, u32 n_data_bytes, char *fmt,...)
Definition: dpdk_buffer.c:366
Definition: defs.h:46
static ip_adjacency_t * ip_get_adjacency(ip_lookup_main_t *lm, u32 adj_index)
Definition: lookup.h:480