FD.io VPP  v16.06
Vector Packet Processing
mcast.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/mcast/mcast.h>
16 
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vnet/pg/pg.h>
20 #include <vppinfra/error.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/icmp46_packet.h>
23 #include <vnet/ip/ip4.h>
24 
25 typedef struct {
30 
31 /* packet trace format function */
32 static u8 * format_mcast_prep_trace (u8 * s, va_list * args)
33 {
34  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36  mcast_prep_trace_t * t = va_arg (*args, mcast_prep_trace_t *);
37 
38  s = format (s, "MCAST_PREP: group %d, next index %d, tx_sw_if_index %d",
40  return s;
41 }
42 
46 
47 #define foreach_mcast_prep_error \
48 _(MCASTS, "Multicast Packets")
49 
50 typedef enum {
51 #define _(sym,str) MCAST_PREP_ERROR_##sym,
53 #undef _
56 
57 static char * mcast_prep_error_strings[] = {
58 #define _(sym,string) string,
60 #undef _
61 };
62 
63 typedef enum {
67 
68 static uword
70  vlib_node_runtime_t * node,
71  vlib_frame_t * frame)
72 {
73  u32 n_left_from, * from, * to_next;
74  mcast_prep_next_t next_index;
75  mcast_main_t * mcm = &mcast_main;
76  vlib_node_t *n = vlib_get_node (vm, mcast_prep_node.index);
77  u32 node_counter_base_index = n->error_heap_index;
78  vlib_error_main_t * em = &vm->error_main;
79  ip4_main_t * im = &ip4_main;
80  ip_lookup_main_t * lm = &im->lookup_main;
81 
82  from = vlib_frame_vector_args (frame);
83  n_left_from = frame->n_vectors;
84  next_index = node->cached_next_index;
85 
86  while (n_left_from > 0)
87  {
88  u32 n_left_to_next;
89 
90  vlib_get_next_frame (vm, node, next_index,
91  to_next, n_left_to_next);
92 
93  while (0 && n_left_from >= 4 && n_left_to_next >= 2)
94  {
95  u32 bi0, bi1;
96  vlib_buffer_t * b0, * b1;
97  u32 next0, next1;
98  u32 sw_if_index0, sw_if_index1;
99 
100  /* Prefetch next iteration. */
101  {
102  vlib_buffer_t * p2, * p3;
103 
104  p2 = vlib_get_buffer (vm, from[2]);
105  p3 = vlib_get_buffer (vm, from[3]);
106 
107  vlib_prefetch_buffer_header (p2, LOAD);
108  vlib_prefetch_buffer_header (p3, LOAD);
109 
112  }
113 
114  /* speculatively enqueue b0 and b1 to the current next frame */
115  to_next[0] = bi0 = from[0];
116  to_next[1] = bi1 = from[1];
117  from += 2;
118  to_next += 2;
119  n_left_from -= 2;
120  n_left_to_next -= 2;
121 
122  b0 = vlib_get_buffer (vm, bi0);
123  b1 = vlib_get_buffer (vm, bi1);
124 
125  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
126  next0 = 0;
127  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
128  next1 = 0;
129 
130  /* $$$$ your message in this space. Process 2 x pkts */
131 
133  {
134  if (b0->flags & VLIB_BUFFER_IS_TRACED)
135  {
136  mcast_prep_trace_t *t =
137  vlib_add_trace (vm, node, b0, sizeof (*t));
138  t->sw_if_index = sw_if_index0;
139  t->next_index = next0;
140  }
141  if (b1->flags & VLIB_BUFFER_IS_TRACED)
142  {
143  mcast_prep_trace_t *t =
144  vlib_add_trace (vm, node, b1, sizeof (*t));
145  t->sw_if_index = sw_if_index1;
146  t->next_index = next1;
147  }
148  }
149 
150  /* verify speculative enqueues, maybe switch current next frame */
151  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
152  to_next, n_left_to_next,
153  bi0, bi1, next0, next1);
154  }
155 
156  while (n_left_from > 0 && n_left_to_next > 0)
157  {
158  u32 bi0;
159  vlib_buffer_t * b0;
160  u32 next0, adj_index0;
161  mcast_group_t * g0;
162  ip_adjacency_t * adj0;
163 
164  /* speculatively enqueue b0 to the current next frame */
165  bi0 = from[0];
166  to_next[0] = bi0;
167  from += 1;
168  to_next += 1;
169  n_left_from -= 1;
170  n_left_to_next -= 1;
171 
172  b0 = vlib_get_buffer (vm, bi0);
173 
174  adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
175  adj0 = ip_get_adjacency (lm, adj_index0);
176  vnet_buffer(b0)->mcast.mcast_group_index = adj0->mcast_group_index;
177  g0 = pool_elt_at_index (mcm->groups, adj0->mcast_group_index);
178 
179  /*
180  * Handle the degenerate single-copy case
181  * If we don't change the freelist, the packet will never
182  * make it to the recycle node...
183  */
184  if (PREDICT_TRUE(vec_len (g0->members) > 1))
185  {
186  /* Save the original free list index */
187  vnet_buffer(b0)->mcast.original_free_list_index =
188  b0->free_list_index;
189 
190  /* Swap in the multicast recycle list */
192 
193  /*
194  * Make sure that intermediate "frees" don't screw up
195  */
196  b0->clone_count = vec_len (g0->members);
197 
198  /* Set up for the recycle node */
199  vnet_buffer(b0)->mcast.mcast_current_index = 1;
200  }
201 
202  /* Transmit the pkt on the first interface */
204  vnet_buffer(b0)->sw_if_index[VLIB_TX] =
205  g0->members[0].tx_sw_if_index;
206 
208  && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
209  mcast_prep_trace_t *t =
210  vlib_add_trace (vm, node, b0, sizeof (*t));
211  t->next_index = next0;
212  t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
213  t->group_index = vnet_buffer(b0)->mcast.mcast_group_index;
214  }
215 
216  /* verify speculative enqueue, maybe switch current next frame */
217  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
218  to_next, n_left_to_next,
219  bi0, next0);
220  }
221 
222  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
223  }
224 
225  em->counters[node_counter_base_index + MCAST_PREP_ERROR_MCASTS] +=
226  frame->n_vectors;
227 
228  return frame->n_vectors;
229 }
230 
232  .function = mcast_prep_node_fn,
233  .name = "mcast_prep",
234  .vector_size = sizeof (u32),
235  .format_trace = format_mcast_prep_trace,
237 
238  .n_errors = ARRAY_LEN(mcast_prep_error_strings),
239  .error_strings = mcast_prep_error_strings,
240 
241  .n_next_nodes = MCAST_PREP_N_NEXT,
242 
243  /* edit / add dispositions here */
244  .next_nodes = {
245  [MCAST_PREP_NEXT_DROP] = "error-drop",
246  },
247 };
248 
249 typedef struct {
255 
256 static u8 * format_mcast_recycle_trace (u8 * s, va_list * args)
257 {
258  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
259  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
260  mcast_recycle_trace_t * t = va_arg (*args, mcast_recycle_trace_t *);
261 
262  s = format (s,
263 "MCAST_R: group %d, current member %d next (node) index %d, tx_sw_if_index %d",
265  return s;
266 }
267 
268 #define foreach_mcast_recycle_error \
269 _(RECYCLES, "Multicast Recycles")
270 
271 typedef enum {
272 #define _(sym,str) MCAST_RECYCLE_ERROR_##sym,
274 #undef _
277 
278 static char * mcast_recycle_error_strings[] = {
279 #define _(sym,string) string,
281 #undef _
282 };
283 
284 typedef enum {
288 
289 static uword
291  vlib_node_runtime_t * node,
292  vlib_frame_t * frame)
293 {
294  u32 n_left_from, * from, * to_next;
295  mcast_recycle_next_t next_index;
296  mcast_main_t * mcm = &mcast_main;
298  u32 node_counter_base_index = n->error_heap_index;
299  vlib_error_main_t * em = &vm->error_main;
300 
301  from = vlib_frame_vector_args (frame);
302  n_left_from = frame->n_vectors;
303  next_index = node->cached_next_index;
304 
305  while (n_left_from > 0)
306  {
307  u32 n_left_to_next;
308 
309  vlib_get_next_frame (vm, node, next_index,
310  to_next, n_left_to_next);
311 
312  while (0 && n_left_from >= 4 && n_left_to_next >= 2)
313  {
314  u32 bi0, bi1;
315  vlib_buffer_t * b0, * b1;
316  u32 next0, next1;
317  u32 sw_if_index0, sw_if_index1;
318 
319  /* Prefetch next iteration. */
320  {
321  vlib_buffer_t * p2, * p3;
322 
323  p2 = vlib_get_buffer (vm, from[2]);
324  p3 = vlib_get_buffer (vm, from[3]);
325 
326  vlib_prefetch_buffer_header (p2, LOAD);
327  vlib_prefetch_buffer_header (p3, LOAD);
328 
331  }
332 
333  /* speculatively enqueue b0 and b1 to the current next frame */
334  to_next[0] = bi0 = from[0];
335  to_next[1] = bi1 = from[1];
336  from += 2;
337  to_next += 2;
338  n_left_from -= 2;
339  n_left_to_next -= 2;
340 
341  b0 = vlib_get_buffer (vm, bi0);
342  b1 = vlib_get_buffer (vm, bi1);
343 
344  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
345  next0 = 0;
346  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
347  next1 = 0;
348 
349  /* $$$$ your message in this space. Process 2 x pkts */
350 
352  {
353  if (b0->flags & VLIB_BUFFER_IS_TRACED)
354  {
356  vlib_add_trace (vm, node, b0, sizeof (*t));
357  t->sw_if_index = sw_if_index0;
358  t->next_index = next0;
359  }
360  if (b1->flags & VLIB_BUFFER_IS_TRACED)
361  {
363  vlib_add_trace (vm, node, b1, sizeof (*t));
364  t->sw_if_index = sw_if_index1;
365  t->next_index = next1;
366  }
367  }
368 
369  /* verify speculative enqueues, maybe switch current next frame */
370  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
371  to_next, n_left_to_next,
372  bi0, bi1, next0, next1);
373  }
374 
375  while (n_left_from > 0 && n_left_to_next > 0)
376  {
377  u32 bi0;
378  vlib_buffer_t * b0;
379  u32 next0;
380  u32 current_member0;
381  mcast_group_t * g0;
382 
383  /* speculatively enqueue b0 to the current next frame */
384  bi0 = from[0];
385  to_next[0] = bi0;
386  from += 1;
387  to_next += 1;
388  n_left_from -= 1;
389  n_left_to_next -= 1;
390 
391  b0 = vlib_get_buffer (vm, bi0);
392 
393  g0 = pool_elt_at_index (mcm->groups,
394  vnet_buffer(b0)->mcast.mcast_group_index);
395 
396  /* No more replicas? */
397  if (b0->clone_count == 1)
398  {
399  /* Restore the original free list index */
400  b0->free_list_index =
401  vnet_buffer(b0)->mcast.original_free_list_index;
402  }
403  current_member0 = vnet_buffer(b0)->mcast.mcast_current_index;
404 
405  next0 =
406  g0->members[current_member0].prep_and_recycle_node_next_index;
407  vnet_buffer(b0)->sw_if_index[VLIB_TX] =
408  g0->members[current_member0].tx_sw_if_index;
409 
410  vnet_buffer(b0)->mcast.mcast_current_index =
411  current_member0 + 1;
412 
414  && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
416  vlib_add_trace (vm, node, b0, sizeof (*t));
417  t->next_index = next0;
418  t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
419  t->group_index = vnet_buffer(b0)->mcast.mcast_group_index;
420  t->current_member = current_member0;
421  }
422 
423  /* verify speculative enqueue, maybe switch current next frame */
424  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
425  to_next, n_left_to_next,
426  bi0, next0);
427  }
428  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
429  }
430 
431  em->counters[node_counter_base_index + MCAST_RECYCLE_ERROR_RECYCLES] +=
432  frame->n_vectors;
433 
434  return frame->n_vectors;
435 }
436 
438  .function = mcast_recycle_node_fn,
439  .name = "mcast-recycle",
440  .vector_size = sizeof (u32),
441  .format_trace = format_mcast_recycle_trace,
443 
445  .error_strings = mcast_recycle_error_strings,
446 
447  .n_next_nodes = MCAST_RECYCLE_N_NEXT,
448 
449  /* edit / add dispositions here */
450  .next_nodes = {
451  [MCAST_RECYCLE_NEXT_DROP] = "error-drop",
452  },
453 };
454 
455 /*
456  * fish pkts back from the recycle queue/freelist
457  * un-flatten the context chains
458  */
461 {
462  vlib_frame_t * f = 0;
463  u32 n_left_from;
464  u32 n_left_to_next = 0;
465  u32 n_this_frame = 0;
466  u32 * from;
467  u32 * to_next;
468  u32 bi0, pi0;
469  vlib_buffer_t *b0;
470  vlib_buffer_t *bnext0;
471  int i;
472 
473  /* aligned, unaligned buffers */
474  for (i = 0; i < 2; i++)
475  {
476  if (i == 0)
477  {
478  from = fl->aligned_buffers;
479  n_left_from = vec_len (from);
480  }
481  else
482  {
483  from = fl->unaligned_buffers;
484  n_left_from = vec_len (from);
485  }
486 
487  while (n_left_from > 0)
488  {
489  if (PREDICT_FALSE(n_left_to_next == 0))
490  {
491  if (f)
492  {
493  f->n_vectors = n_this_frame;
495  }
496 
498  to_next = vlib_frame_vector_args (f);
499  n_left_to_next = VLIB_FRAME_SIZE;
500  n_this_frame = 0;
501  }
502 
503  bi0 = from[0];
504  if (PREDICT_TRUE(n_left_from > 1))
505  {
506  pi0 = from[1];
507  vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
508  }
509 
510  bnext0 = b0 = vlib_get_buffer (vm, bi0);
511 
512  while (bnext0->flags & VLIB_BUFFER_NEXT_PRESENT)
513  {
514  from += 1;
515  n_left_from -= 1;
516  bnext0 = vlib_get_buffer (vm, bnext0->next_buffer);
517  }
518  to_next[0] = bi0;
519 
520  if (CLIB_DEBUG > 0)
522 
523  from++;
524  to_next++;
525  n_this_frame++;
526  n_left_to_next--;
527  n_left_from--;
528  }
529  }
530 
533 
534  if (f)
535  {
536  ASSERT(n_this_frame);
537  f->n_vectors = n_this_frame;
539  }
540 }
541 
543 {
544  mcast_main_t * mcm = &mcast_main;
545  vlib_buffer_main_t * bm = vm->buffer_main;
547 
548  mcm->vlib_main = vm;
549  mcm->vnet_main = vnet_get_main();
551  vlib_buffer_create_free_list (vm, 1024 /* fictional */, "mcast-recycle");
552 
555 
557 
558  return 0;
559 }
560 
562 
563 
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
mcast_group_t * groups
Definition: mcast.h:38
u32 error_heap_index
Definition: node.h:244
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:267
#define CLIB_UNUSED(x)
Definition: clib.h:79
vlib_node_registration_t mcast_recycle_node
(constructor) VLIB_REGISTER_NODE (mcast_recycle_node)
Definition: mcast.c:45
mcast_prep_next_t
Definition: mcast.c:63
always_inline vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Definition: node_funcs.h:46
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
u32 free_list_index
Buffer free list that this buffer was allocated from and will be freed to.
Definition: buffer.h:102
#define PREDICT_TRUE(x)
Definition: clib.h:98
u32 mcast_recycle_list_index
Definition: mcast.h:41
struct _vlib_node_registration vlib_node_registration_t
vlib_buffer_main_t * buffer_main
Definition: main.h:103
ip_lookup_main_t lookup_main
Definition: ip4.h:129
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
mcast_group_member_t * members
Definition: mcast.h:33
#define foreach_mcast_prep_error
Definition: mcast.c:47
void(* buffers_added_to_freelist_function)(struct vlib_main_t *vm, struct vlib_buffer_free_list_t *fl)
Definition: buffer.h:279
vnet_main_t * vnet_get_main(void)
Definition: misc.c:45
#define foreach_mcast_recycle_error
Definition: mcast.c:268
static u8 * format_mcast_prep_trace(u8 *s, va_list *args)
Definition: mcast.c:32
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:181
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:109
mcast_recycle_next_t
Definition: mcast.c:284
u16 mcast_group_index
Definition: lookup.h:169
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:93
static void mcast_recycle_callback(vlib_main_t *vm, vlib_buffer_free_list_t *fl)
Definition: mcast.c:459
#define pool_elt_at_index(p, i)
Definition: pool.h:346
vlib_error_main_t error_main
Definition: main.h:124
#define PREDICT_FALSE(x)
Definition: clib.h:97
#define VLIB_FRAME_SIZE
Definition: node.h:292
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:191
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Definition: buffer_node.h:43
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
u32 vlib_buffer_create_free_list(vlib_main_t *vm, u32 n_data_bytes, char *fmt,...)
Definition: buffer.c:495
mcast_prep_error_t
Definition: mcast.c:50
mcast_main_t mcast_main
Definition: mcast.c:43
u64 * counters
Definition: error.h:73
u16 n_vectors
Definition: node.h:307
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
u32 prep_and_recycle_node_next_index
Definition: mcast.h:25
static uword mcast_prep_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: mcast.c:69
static char * mcast_recycle_error_strings[]
Definition: mcast.c:278
#define ARRAY_LEN(x)
Definition: clib.h:59
clib_error_t * mcast_init(vlib_main_t *vm)
Definition: mcast.c:542
static char * mcast_prep_error_strings[]
Definition: mcast.c:57
u16 cached_next_index
Definition: node.h:422
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:300
u8 * format(u8 *s, char *fmt,...)
Definition: format.c:405
static uword mcast_recycle_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: mcast.c:290
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:112
u32 clone_count
Specifies whether this buffer should be reinitialized when freed.
Definition: buffer.h:121
static u8 * format_mcast_recycle_trace(u8 *s, va_list *args)
Definition: mcast.c:256
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:225
vnet_main_t * vnet_main
Definition: mcast.h:45
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:91
always_inline ip_adjacency_t * ip_get_adjacency(ip_lookup_main_t *lm, u32 adj_index)
Definition: lookup.h:423
u64 uword
Definition: types.h:112
Definition: defs.h:46
always_inline void vlib_buffer_set_known_state(vlib_main_t *vm, u32 buffer_index, vlib_buffer_known_state_t state)
Definition: buffer_funcs.h:231
vlib_node_registration_t mcast_prep_node
(constructor) VLIB_REGISTER_NODE (mcast_prep_node)
Definition: mcast.c:44
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
vlib_buffer_free_list_t * buffer_free_list_pool
Definition: buffer.h:295
vlib_main_t * vlib_main
Definition: mcast.h:44
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
mcast_recycle_error_t
Definition: mcast.c:271
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:162
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:140
ip4_main_t ip4_main
Definition: ip4_forward.c:1394
u8 data[0]
Packet data.
Definition: buffer.h:150
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:184
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:84
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
Definition: defs.h:45