FD.io VPP  v19.01.3-6-g70449b9b9
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * node.c - ipfix probe graph node
3  *
4  * Copyright (c) 2017 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vnet/pg/pg.h>
20 #include <vppinfra/crc32.h>
21 #include <vppinfra/error.h>
22 #include <flowprobe/flowprobe.h>
23 #include <vnet/ip/ip6_packet.h>
24 #include <vlibmemory/api.h>
25 
27 
28 /**
29  * @file flow record generator graph node
30  */
31 
32 typedef struct
33 {
34  /** interface handle */
37  /** packet timestamp */
39  /** size of the buffer */
41 
42  /** L2 information */
43  u8 src_mac[6];
44  u8 dst_mac[6];
45  /** Ethertype */
47 
48  /** L3 information */
49  ip46_address_t src_address;
50  ip46_address_t dst_address;
53 
54  /** L4 information */
57 
60 
61 static char *flowprobe_variant_strings[] = {
62  [FLOW_VARIANT_IP4] = "IP4",
63  [FLOW_VARIANT_IP6] = "IP6",
64  [FLOW_VARIANT_L2] = "L2",
65  [FLOW_VARIANT_L2_IP4] = "L2-IP4",
66  [FLOW_VARIANT_L2_IP6] = "L2-IP6",
67 };
68 
69 /* packet trace format function */
70 static u8 *
71 format_flowprobe_trace (u8 * s, va_list * args)
72 {
73  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
75  flowprobe_trace_t *t = va_arg (*args, flowprobe_trace_t *);
76  u32 indent = format_get_indent (s);
77 
78  s = format (s,
79  "FLOWPROBE[%s]: rx_sw_if_index %d, tx_sw_if_index %d, "
80  "timestamp %lld, size %d", flowprobe_variant_strings[t->which],
82  t->timestamp, t->buffer_size);
83 
84  if (t->which == FLOW_VARIANT_L2)
85  s = format (s, "\n%U -> %U", format_white_space, indent,
88 
89  if (t->protocol > 0
92  s =
93  format (s, "\n%U%U: %U -> %U", format_white_space, indent,
97  return s;
98 }
99 
103 
104 /* No counters at the moment */
105 #define foreach_flowprobe_error \
106 _(COLLISION, "Hash table collisions") \
107 _(BUFFER, "Buffer allocation error") \
108 _(EXPORTED_PACKETS, "Exported packets") \
109 _(INPATH, "Exported packets in path")
110 
111 typedef enum
112 {
113 #define _(sym,str) FLOWPROBE_ERROR_##sym,
115 #undef _
118 
119 static char *flowprobe_error_strings[] = {
120 #define _(sym,string) string,
122 #undef _
123 };
124 
125 typedef enum
126 {
131 
132 #define FLOWPROBE_NEXT_NODES { \
133  [FLOWPROBE_NEXT_DROP] = "error-drop", \
134  [FLOWPROBE_NEXT_IP4_LOOKUP] = "ip4-lookup", \
135 }
136 
137 static inline flowprobe_variant_t
139  flowprobe_record_t flags, u16 ethertype)
140 {
141  if (which == FLOW_VARIANT_L2
142  && (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4))
143  return ethertype == ETHERNET_TYPE_IP6 ? FLOW_VARIANT_L2_IP6 : ethertype ==
144  ETHERNET_TYPE_IP4 ? FLOW_VARIANT_L2_IP4 : FLOW_VARIANT_L2;
145  return which;
146 }
147 
148 /*
149  * NTP rfc868 : 2 208 988 800 corresponds to 00:00 1 Jan 1970 GMT
150  */
151 #define NTP_TIMESTAMP 2208988800LU
152 
153 static inline u32
155 {
156  u16 start = offset;
157 
158  /* Ingress interface */
159  u32 rx_if = clib_host_to_net_u32 (e->key.rx_sw_if_index);
160  clib_memcpy_fast (to_b->data + offset, &rx_if, sizeof (rx_if));
161  offset += sizeof (rx_if);
162 
163  /* Egress interface */
164  u32 tx_if = clib_host_to_net_u32 (e->key.tx_sw_if_index);
165  clib_memcpy_fast (to_b->data + offset, &tx_if, sizeof (tx_if));
166  offset += sizeof (tx_if);
167 
168  /* packet delta count */
169  u64 packetdelta = clib_host_to_net_u64 (e->packetcount);
170  clib_memcpy_fast (to_b->data + offset, &packetdelta, sizeof (u64));
171  offset += sizeof (u64);
172 
173  /* flowStartNanoseconds */
174  u32 t = clib_host_to_net_u32 (e->flow_start.sec + NTP_TIMESTAMP);
175  clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
176  offset += sizeof (u32);
177  t = clib_host_to_net_u32 (e->flow_start.nsec);
178  clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
179  offset += sizeof (u32);
180 
181  /* flowEndNanoseconds */
182  t = clib_host_to_net_u32 (e->flow_end.sec + NTP_TIMESTAMP);
183  clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
184  offset += sizeof (u32);
185  t = clib_host_to_net_u32 (e->flow_end.nsec);
186  clib_memcpy_fast (to_b->data + offset, &t, sizeof (u32));
187  offset += sizeof (u32);
188 
189  return offset - start;
190 }
191 
192 static inline u32
194 {
195  u16 start = offset;
196 
197  /* src mac address */
198  clib_memcpy_fast (to_b->data + offset, &e->key.src_mac, 6);
199  offset += 6;
200 
201  /* dst mac address */
202  clib_memcpy_fast (to_b->data + offset, &e->key.dst_mac, 6);
203  offset += 6;
204 
205  /* ethertype */
206  clib_memcpy_fast (to_b->data + offset, &e->key.ethertype, 2);
207  offset += 2;
208 
209  return offset - start;
210 }
211 
212 static inline u32
214 {
215  u16 start = offset;
216 
217  /* ip6 src address */
218  clib_memcpy_fast (to_b->data + offset, &e->key.src_address,
219  sizeof (ip6_address_t));
220  offset += sizeof (ip6_address_t);
221 
222  /* ip6 dst address */
223  clib_memcpy_fast (to_b->data + offset, &e->key.dst_address,
224  sizeof (ip6_address_t));
225  offset += sizeof (ip6_address_t);
226 
227  /* Protocol */
228  to_b->data[offset++] = e->key.protocol;
229 
230  /* octetDeltaCount */
231  u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
232  clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
233  offset += sizeof (u64);
234 
235  return offset - start;
236 }
237 
238 static inline u32
240 {
241  u16 start = offset;
242 
243  /* ip4 src address */
244  clib_memcpy_fast (to_b->data + offset, &e->key.src_address.ip4,
245  sizeof (ip4_address_t));
246  offset += sizeof (ip4_address_t);
247 
248  /* ip4 dst address */
249  clib_memcpy_fast (to_b->data + offset, &e->key.dst_address.ip4,
250  sizeof (ip4_address_t));
251  offset += sizeof (ip4_address_t);
252 
253  /* Protocol */
254  to_b->data[offset++] = e->key.protocol;
255 
256  /* octetDeltaCount */
257  u64 octetdelta = clib_host_to_net_u64 (e->octetcount);
258  clib_memcpy_fast (to_b->data + offset, &octetdelta, sizeof (u64));
259  offset += sizeof (u64);
260 
261  return offset - start;
262 }
263 
264 static inline u32
266 {
267  u16 start = offset;
268 
269  /* src port */
270  clib_memcpy_fast (to_b->data + offset, &e->key.src_port, 2);
271  offset += 2;
272 
273  /* dst port */
274  clib_memcpy_fast (to_b->data + offset, &e->key.dst_port, 2);
275  offset += 2;
276 
277  /* tcp control bits */
278  u16 control_bits = htons (e->prot.tcp.flags);
279  clib_memcpy_fast (to_b->data + offset, &control_bits, 2);
280  offset += 2;
281 
282  return offset - start;
283 }
284 
285 static inline u32
287 {
289  u32 h = 0;
290 
291 #ifdef clib_crc32c_uses_intrinsics
292  h = clib_crc32c ((u8 *) k, sizeof (*k));
293 #else
294  int i;
295  u64 tmp = 0;
296  for (i = 0; i < sizeof (*k) / 8; i++)
297  tmp ^= ((u64 *) k)[i];
298 
299  h = clib_xxhash (tmp);
300 #endif
301 
302  return h >> (32 - fm->ht_log2len);
303 }
304 
306 flowprobe_lookup (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex,
307  bool * collision)
308 {
311  u32 h;
312 
313  h = (fm->active_timer) ? flowprobe_hash (k) : 0;
314 
315  /* Lookup in the flow state pool */
316  *poolindex = fm->hash_per_worker[my_cpu_number][h];
317  if (*poolindex != ~0)
318  {
319  e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], *poolindex);
320  if (e)
321  {
322  /* Verify key or report collision */
323  if (memcmp (k, &e->key, sizeof (flowprobe_key_t)))
324  *collision = true;
325  return e;
326  }
327  }
328 
329  return 0;
330 }
331 
333 flowprobe_create (u32 my_cpu_number, flowprobe_key_t * k, u32 * poolindex)
334 {
336  u32 h;
337 
339 
340  /* Get my index */
341  h = (fm->active_timer) ? flowprobe_hash (k) : 0;
342 
343  pool_get (fm->pool_per_worker[my_cpu_number], e);
344  *poolindex = e - fm->pool_per_worker[my_cpu_number];
345  fm->hash_per_worker[my_cpu_number][h] = *poolindex;
346 
347  e->key = *k;
348 
349  if (fm->passive_timer > 0)
350  {
351  e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
352  (fm->timers_per_worker[my_cpu_number], *poolindex, 0,
353  fm->passive_timer);
354  }
355  return e;
356 }
357 
358 static inline void
361  timestamp_nsec_t timestamp, u16 length,
363 {
364  if (fm->disabled)
365  return;
366 
367  u32 my_cpu_number = vm->thread_index;
368  u16 octets = 0;
369 
370  flowprobe_record_t flags = fm->context[which].flags;
371  bool collect_ip4 = false, collect_ip6 = false;
372  ASSERT (b);
374  u16 ethertype = clib_net_to_host_u16 (eth->type);
375  /* *INDENT-OFF* */
376  flowprobe_key_t k = {};
377  /* *INDENT-ON* */
378  ip4_header_t *ip4 = 0;
379  ip6_header_t *ip6 = 0;
380  udp_header_t *udp = 0;
381  tcp_header_t *tcp = 0;
382  u8 tcp_flags = 0;
383 
384  if (flags & FLOW_RECORD_L3 || flags & FLOW_RECORD_L4)
385  {
386  collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
387  collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
388  }
389 
390  k.rx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
391  k.tx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
392 
393  k.which = which;
394 
395  if (flags & FLOW_RECORD_L2)
396  {
397  clib_memcpy_fast (k.src_mac, eth->src_address, 6);
398  clib_memcpy_fast (k.dst_mac, eth->dst_address, 6);
399  k.ethertype = ethertype;
400  }
401  if (collect_ip6 && ethertype == ETHERNET_TYPE_IP6)
402  {
403  ip6 = (ip6_header_t *) (eth + 1);
404  if (flags & FLOW_RECORD_L3)
405  {
406  k.src_address.as_u64[0] = ip6->src_address.as_u64[0];
407  k.src_address.as_u64[1] = ip6->src_address.as_u64[1];
408  k.dst_address.as_u64[0] = ip6->dst_address.as_u64[0];
409  k.dst_address.as_u64[1] = ip6->dst_address.as_u64[1];
410  }
411  k.protocol = ip6->protocol;
412  if (k.protocol == IP_PROTOCOL_UDP)
413  udp = (udp_header_t *) (ip6 + 1);
414  else if (k.protocol == IP_PROTOCOL_TCP)
415  tcp = (tcp_header_t *) (ip6 + 1);
416 
417  octets = clib_net_to_host_u16 (ip6->payload_length)
418  + sizeof (ip6_header_t);
419  }
420  if (collect_ip4 && ethertype == ETHERNET_TYPE_IP4)
421  {
422  ip4 = (ip4_header_t *) (eth + 1);
423  if (flags & FLOW_RECORD_L3)
424  {
425  k.src_address.ip4.as_u32 = ip4->src_address.as_u32;
426  k.dst_address.ip4.as_u32 = ip4->dst_address.as_u32;
427  }
428  k.protocol = ip4->protocol;
429  if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_UDP)
430  udp = (udp_header_t *) (ip4 + 1);
431  else if ((flags & FLOW_RECORD_L4) && k.protocol == IP_PROTOCOL_TCP)
432  tcp = (tcp_header_t *) (ip4 + 1);
433 
434  octets = clib_net_to_host_u16 (ip4->length);
435  }
436 
437  if (udp)
438  {
439  k.src_port = udp->src_port;
440  k.dst_port = udp->dst_port;
441  }
442  else if (tcp)
443  {
444  k.src_port = tcp->src_port;
445  k.dst_port = tcp->dst_port;
446  tcp_flags = tcp->flags;
447  }
448 
449  if (t)
450  {
453  clib_memcpy_fast (t->src_mac, k.src_mac, 6);
454  clib_memcpy_fast (t->dst_mac, k.dst_mac, 6);
455  t->ethertype = k.ethertype;
456  t->src_address.ip4.as_u32 = k.src_address.ip4.as_u32;
457  t->dst_address.ip4.as_u32 = k.dst_address.ip4.as_u32;
458  t->protocol = k.protocol;
459  t->src_port = k.src_port;
460  t->dst_port = k.dst_port;
461  t->which = k.which;
462  }
463 
464  flowprobe_entry_t *e = 0;
465  f64 now = vlib_time_now (vm);
466  if (fm->active_timer > 0)
467  {
468  u32 poolindex = ~0;
469  bool collision = false;
470 
471  e = flowprobe_lookup (my_cpu_number, &k, &poolindex, &collision);
472  if (collision)
473  {
474  /* Flush data and clean up entry for reuse. */
475  if (e->packetcount)
476  flowprobe_export_entry (vm, e);
477  e->key = k;
478  e->flow_start = timestamp;
480  FLOWPROBE_ERROR_COLLISION, 1);
481  }
482  if (!e) /* Create new entry */
483  {
484  e = flowprobe_create (my_cpu_number, &k, &poolindex);
485  e->last_exported = now;
486  e->flow_start = timestamp;
487  }
488  }
489  else
490  {
491  e = &fm->stateless_entry[my_cpu_number];
492  e->key = k;
493  }
494 
495  if (e)
496  {
497  /* Updating entry */
498  e->packetcount++;
499  e->octetcount += octets;
500  e->last_updated = now;
501  e->flow_end = timestamp;
502  e->prot.tcp.flags |= tcp_flags;
503  if (fm->active_timer == 0
504  || (now > e->last_exported + fm->active_timer))
505  flowprobe_export_entry (vm, e);
506  }
507 }
508 
509 static u16
511 {
512  return sizeof (ip4_header_t) + sizeof (udp_header_t) +
513  sizeof (ipfix_message_header_t) + sizeof (ipfix_set_header_t);
514 }
515 
516 static void
518  flowprobe_variant_t which)
519 {
522  vlib_frame_t *f;
526  ip4_header_t *ip;
527  udp_header_t *udp;
528  flowprobe_record_t flags = fm->context[which].flags;
529  u32 my_cpu_number = vm->thread_index;
530 
531  /* Fill in header */
532  flow_report_stream_t *stream;
533 
534  /* Nothing to send */
535  if (fm->context[which].next_record_offset_per_worker[my_cpu_number] <=
537  return;
538 
539  u32 i, index = vec_len (frm->streams);
540  for (i = 0; i < index; i++)
541  if (frm->streams[i].domain_id == 1)
542  {
543  index = i;
544  break;
545  }
546  if (i == vec_len (frm->streams))
547  {
548  vec_validate (frm->streams, index);
549  frm->streams[index].domain_id = 1;
550  }
551  stream = &frm->streams[index];
552 
553  tp = vlib_buffer_get_current (b0);
554  ip = (ip4_header_t *) & tp->ip4;
555  udp = (udp_header_t *) (ip + 1);
556  h = (ipfix_message_header_t *) (udp + 1);
557  s = (ipfix_set_header_t *) (h + 1);
558 
559  ip->ip_version_and_header_length = 0x45;
560  ip->ttl = 254;
561  ip->protocol = IP_PROTOCOL_UDP;
565  udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
566  udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
567  udp->checksum = 0;
568 
569  /* FIXUP: message header export_time */
570  h->export_time = (u32)
571  (((f64) frm->unix_time_0) +
572  (vlib_time_now (frm->vlib_main) - frm->vlib_time_0));
573  h->export_time = clib_host_to_net_u32 (h->export_time);
574  h->domain_id = clib_host_to_net_u32 (stream->domain_id);
575 
576  /* FIXUP: message header sequence_number */
577  h->sequence_number = stream->sequence_number++;
578  h->sequence_number = clib_host_to_net_u32 (h->sequence_number);
579 
581  b0->current_length -
582  (sizeof (*ip) + sizeof (*udp) +
583  sizeof (*h)));
585  (sizeof (*ip) + sizeof (*udp)));
586 
587  ip->length = clib_host_to_net_u16 (b0->current_length);
588 
589  ip->checksum = ip4_header_checksum (ip);
590  udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
591 
592  if (frm->udp_checksum)
593  {
594  /* RFC 7011 section 10.3.2. */
595  udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
596  if (udp->checksum == 0)
597  udp->checksum = 0xffff;
598  }
599 
600  ASSERT (ip->checksum == ip4_header_checksum (ip));
601 
602  /* Find or allocate a frame */
603  f = fm->context[which].frames_per_worker[my_cpu_number];
604  if (PREDICT_FALSE (f == 0))
605  {
606  u32 *to_next;
607  f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
608  fm->context[which].frames_per_worker[my_cpu_number] = f;
609  u32 bi0 = vlib_get_buffer_index (vm, b0);
610 
611  /* Enqueue the buffer */
612  to_next = vlib_frame_vector_args (f);
613  to_next[0] = bi0;
614  f->n_vectors = 1;
615  }
616 
619  FLOWPROBE_ERROR_EXPORTED_PACKETS, 1);
620 
621  fm->context[which].frames_per_worker[my_cpu_number] = 0;
622  fm->context[which].buffers_per_worker[my_cpu_number] = 0;
623  fm->context[which].next_record_offset_per_worker[my_cpu_number] =
625 }
626 
627 static vlib_buffer_t *
629 {
632  vlib_buffer_t *b0;
633  u32 bi0;
635  u32 my_cpu_number = vm->thread_index;
636 
637  /* Find or allocate a buffer */
638  b0 = fm->context[which].buffers_per_worker[my_cpu_number];
639 
640  /* Need to allocate a buffer? */
641  if (PREDICT_FALSE (b0 == 0))
642  {
643  if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
644  {
646  FLOWPROBE_ERROR_BUFFER, 1);
647  return 0;
648  }
649 
650  /* Initialize the buffer */
651  b0 = fm->context[which].buffers_per_worker[my_cpu_number] =
652  vlib_get_buffer (vm, bi0);
653  fl =
657 
658  b0->current_data = 0;
660  b0->flags |=
661  (VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_FLOW_REPORT);
662  vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
663  vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
664  fm->context[which].next_record_offset_per_worker[my_cpu_number] =
665  b0->current_length;
666  }
667 
668  return b0;
669 }
670 
671 static void
673 {
674  u32 my_cpu_number = vm->thread_index;
677  vlib_buffer_t *b0;
678  bool collect_ip4 = false, collect_ip6 = false;
679  flowprobe_variant_t which = e->key.which;
680  flowprobe_record_t flags = fm->context[which].flags;
681  u16 offset =
682  fm->context[which].next_record_offset_per_worker[my_cpu_number];
683 
684  if (offset < flowprobe_get_headersize ())
685  offset = flowprobe_get_headersize ();
686 
687  b0 = flowprobe_get_buffer (vm, which);
688  /* No available buffer, what to do... */
689  if (b0 == 0)
690  return;
691 
692  if (flags & FLOW_RECORD_L3)
693  {
694  collect_ip4 = which == FLOW_VARIANT_L2_IP4 || which == FLOW_VARIANT_IP4;
695  collect_ip6 = which == FLOW_VARIANT_L2_IP6 || which == FLOW_VARIANT_IP6;
696  }
697 
698  offset += flowprobe_common_add (b0, e, offset);
699 
700  if (flags & FLOW_RECORD_L2)
701  offset += flowprobe_l2_add (b0, e, offset);
702  if (collect_ip6)
703  offset += flowprobe_l3_ip6_add (b0, e, offset);
704  if (collect_ip4)
705  offset += flowprobe_l3_ip4_add (b0, e, offset);
706  if (flags & FLOW_RECORD_L4)
707  offset += flowprobe_l4_add (b0, e, offset);
708 
709  /* Reset per flow-export counters */
710  e->packetcount = 0;
711  e->octetcount = 0;
712  e->last_exported = vlib_time_now (vm);
713 
714  b0->current_length = offset;
715 
716  fm->context[which].next_record_offset_per_worker[my_cpu_number] = offset;
717  /* Time to flush the buffer? */
718  if (offset + fm->template_size[flags] > frm->path_mtu)
719  flowprobe_export_send (vm, b0, which);
720 }
721 
722 uword
724  vlib_node_runtime_t * node, vlib_frame_t * frame,
725  flowprobe_variant_t which)
726 {
727  u32 n_left_from, *from, *to_next;
728  flowprobe_next_t next_index;
730  timestamp_nsec_t timestamp;
731 
732  unix_time_now_nsec_fraction (&timestamp.sec, &timestamp.nsec);
733 
734  from = vlib_frame_vector_args (frame);
735  n_left_from = frame->n_vectors;
736  next_index = node->cached_next_index;
737 
738  while (n_left_from > 0)
739  {
740  u32 n_left_to_next;
741 
742  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
743 
744  while (n_left_from >= 4 && n_left_to_next >= 2)
745  {
746  u32 next0 = FLOWPROBE_NEXT_DROP;
747  u32 next1 = FLOWPROBE_NEXT_DROP;
748  u16 len0, len1;
749  u32 bi0, bi1;
750  vlib_buffer_t *b0, *b1;
751 
752  /* Prefetch next iteration. */
753  {
754  vlib_buffer_t *p2, *p3;
755 
756  p2 = vlib_get_buffer (vm, from[2]);
757  p3 = vlib_get_buffer (vm, from[3]);
758 
759  vlib_prefetch_buffer_header (p2, LOAD);
760  vlib_prefetch_buffer_header (p3, LOAD);
761 
764  }
765 
766  /* speculatively enqueue b0 and b1 to the current next frame */
767  to_next[0] = bi0 = from[0];
768  to_next[1] = bi1 = from[1];
769  from += 2;
770  to_next += 2;
771  n_left_from -= 2;
772  n_left_to_next -= 2;
773 
774  b0 = vlib_get_buffer (vm, bi0);
775  b1 = vlib_get_buffer (vm, bi1);
776 
777  vnet_feature_next (&next0, b0);
778  vnet_feature_next (&next1, b1);
779 
780  len0 = vlib_buffer_length_in_chain (vm, b0);
782  u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
783 
784  if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
785  add_to_flow_record_state (vm, node, fm, b0, timestamp, len0,
787  (which, fm->context[which].flags,
788  ethertype0), 0);
789 
790  len1 = vlib_buffer_length_in_chain (vm, b1);
792  u16 ethertype1 = clib_net_to_host_u16 (eh1->type);
793 
794  if (PREDICT_TRUE ((b1->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
795  add_to_flow_record_state (vm, node, fm, b1, timestamp, len1,
797  (which, fm->context[which].flags,
798  ethertype1), 0);
799 
800  /* verify speculative enqueues, maybe switch current next frame */
801  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
802  to_next, n_left_to_next,
803  bi0, bi1, next0, next1);
804  }
805 
806  while (n_left_from > 0 && n_left_to_next > 0)
807  {
808  u32 bi0;
809  vlib_buffer_t *b0;
810  u32 next0 = FLOWPROBE_NEXT_DROP;
811  u16 len0;
812 
813  /* speculatively enqueue b0 to the current next frame */
814  bi0 = from[0];
815  to_next[0] = bi0;
816  from += 1;
817  to_next += 1;
818  n_left_from -= 1;
819  n_left_to_next -= 1;
820 
821  b0 = vlib_get_buffer (vm, bi0);
822 
823  vnet_feature_next (&next0, b0);
824 
825  len0 = vlib_buffer_length_in_chain (vm, b0);
827  u16 ethertype0 = clib_net_to_host_u16 (eh0->type);
828 
829  if (PREDICT_TRUE ((b0->flags & VNET_BUFFER_F_FLOW_REPORT) == 0))
830  {
831  flowprobe_trace_t *t = 0;
833  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
834  t = vlib_add_trace (vm, node, b0, sizeof (*t));
835 
836  add_to_flow_record_state (vm, node, fm, b0, timestamp, len0,
838  (which, fm->context[which].flags,
839  ethertype0), t);
840  }
841 
842  /* verify speculative enqueue, maybe switch current next frame */
843  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
844  to_next, n_left_to_next,
845  bi0, next0);
846  }
847 
848  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
849  }
850  return frame->n_vectors;
851 }
852 
853 static uword
855  vlib_node_runtime_t * node, vlib_frame_t * frame)
856 {
857  return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP4);
858 }
859 
860 static uword
862  vlib_node_runtime_t * node, vlib_frame_t * frame)
863 {
864  return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_IP6);
865 }
866 
867 static uword
869  vlib_node_runtime_t * node, vlib_frame_t * frame)
870 {
871  return flowprobe_node_fn (vm, node, frame, FLOW_VARIANT_L2);
872 }
873 
874 static inline void
876 {
878  vlib_buffer_t *b = flowprobe_get_buffer (vm, which);
879  if (b)
880  flowprobe_export_send (vm, b, which);
881 }
882 
883 void
885 {
887 }
888 
889 void
891 {
893 }
894 
895 void
897 {
901 }
902 
903 
904 static void
905 flowprobe_delete_by_index (u32 my_cpu_number, u32 poolindex)
906 {
909  u32 h;
910 
911  e = pool_elt_at_index (fm->pool_per_worker[my_cpu_number], poolindex);
912 
913  /* Get my index */
914  h = flowprobe_hash (&e->key);
915 
916  /* Reset hash */
917  fm->hash_per_worker[my_cpu_number][h] = ~0;
918 
919  pool_put_index (fm->pool_per_worker[my_cpu_number], poolindex);
920 }
921 
922 
923 /* Per worker process processing the active/passive expired entries */
924 static uword
927 {
931 
932  /*
933  * $$$$ Remove this check from here and track FRM status and disable
934  * this process if required.
935  */
936  if (frm->ipfix_collector.as_u32 == 0 || frm->src_address.as_u32 == 0)
937  {
938  fm->disabled = true;
939  return 0;
940  }
941  fm->disabled = false;
942 
943  u32 cpu_index = os_get_thread_index ();
944  u32 *to_be_removed = 0, *i;
945 
946  /*
947  * Tick the timer when required and process the vector of expired
948  * timers
949  */
950  f64 start_time = vlib_time_now (vm);
951  u32 count = 0;
952 
953  tw_timer_expire_timers_2t_1w_2048sl (fm->timers_per_worker[cpu_index],
954  start_time);
955 
956  vec_foreach (i, fm->expired_passive_per_worker[cpu_index])
957  {
958  u32 exported = 0;
959  f64 now = vlib_time_now (vm);
960  if (now > start_time + 100e-6
961  || exported > FLOW_MAXIMUM_EXPORT_ENTRIES - 1)
962  break;
963 
964  if (pool_is_free_index (fm->pool_per_worker[cpu_index], *i))
965  {
966  clib_warning ("Element is %d is freed already\n", *i);
967  continue;
968  }
969  else
970  e = pool_elt_at_index (fm->pool_per_worker[cpu_index], *i);
971 
972  /* Check last update timestamp. If it is longer than passive time nuke
973  * entry. Otherwise restart timer with what's left
974  * Premature passive timer by more than 10%
975  */
976  if ((now - e->last_updated) < (u64) (fm->passive_timer * 0.9))
977  {
978  u64 delta = fm->passive_timer - (now - e->last_updated);
979  e->passive_timer_handle = tw_timer_start_2t_1w_2048sl
980  (fm->timers_per_worker[cpu_index], *i, 0, delta);
981  }
982  else /* Nuke entry */
983  {
984  vec_add1 (to_be_removed, *i);
985  }
986  /* If anything to report send it to the exporter */
987  if (e->packetcount && now > e->last_exported + fm->active_timer)
988  {
989  exported++;
990  flowprobe_export_entry (vm, e);
991  }
992  count++;
993  }
994  if (count)
995  vec_delete (fm->expired_passive_per_worker[cpu_index], count, 0);
996 
997  vec_foreach (i, to_be_removed) flowprobe_delete_by_index (cpu_index, *i);
998  vec_free (to_be_removed);
999 
1000  return 0;
1001 }
1002 
1003 /* *INDENT-OFF* */
1005  .function = flowprobe_ip4_node_fn,
1006  .name = "flowprobe-ip4",
1007  .vector_size = sizeof (u32),
1008  .format_trace = format_flowprobe_trace,
1009  .type = VLIB_NODE_TYPE_INTERNAL,
1010  .n_errors = ARRAY_LEN(flowprobe_error_strings),
1011  .error_strings = flowprobe_error_strings,
1012  .n_next_nodes = FLOWPROBE_N_NEXT,
1013  .next_nodes = FLOWPROBE_NEXT_NODES,
1014 };
1016  .function = flowprobe_ip6_node_fn,
1017  .name = "flowprobe-ip6",
1018  .vector_size = sizeof (u32),
1019  .format_trace = format_flowprobe_trace,
1020  .type = VLIB_NODE_TYPE_INTERNAL,
1021  .n_errors = ARRAY_LEN(flowprobe_error_strings),
1022  .error_strings = flowprobe_error_strings,
1023  .n_next_nodes = FLOWPROBE_N_NEXT,
1024  .next_nodes = FLOWPROBE_NEXT_NODES,
1025 };
1027  .function = flowprobe_l2_node_fn,
1028  .name = "flowprobe-l2",
1029  .vector_size = sizeof (u32),
1030  .format_trace = format_flowprobe_trace,
1031  .type = VLIB_NODE_TYPE_INTERNAL,
1032  .n_errors = ARRAY_LEN(flowprobe_error_strings),
1033  .error_strings = flowprobe_error_strings,
1034  .n_next_nodes = FLOWPROBE_N_NEXT,
1035  .next_nodes = FLOWPROBE_NEXT_NODES,
1036 };
1038  .function = flowprobe_walker_process,
1039  .name = "flowprobe-walker",
1040  .type = VLIB_NODE_TYPE_INPUT,
1041  .state = VLIB_NODE_STATE_INTERRUPT,
1042 };
1043 /* *INDENT-ON* */
1044 
1045 /*
1046  * fd.io coding-style-patch-verification: ON
1047  *
1048  * Local Variables:
1049  * eval: (c-set-style "gnu")
1050  * End:
1051  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
flowprobe_protocol_context_t context[FLOW_N_VARIANTS]
Definition: flowprobe.h:125
format_function_t format_ip_protocol
Definition: format.h:45
flowprobe_variant_t
Definition: flowprobe.h:46
u8 dst_mac[6]
Definition: node.c:44
u16 buffer_size
size of the buffer
Definition: node.c:40
u32 flags
Definition: vhost_user.h:115
u32 ** expired_passive_per_worker
Definition: flowprobe.h:140
#define CLIB_UNUSED(x)
Definition: clib.h:82
ip46_address_t src_address
L3 information.
Definition: node.c:49
static u32 flowprobe_l4_add(vlib_buffer_t *to_b, flowprobe_entry_t *e, u16 offset)
Definition: node.c:265
ip46_address_t src_address
Definition: flowprobe.h:82
ip4_address_t src_address
Definition: ip4_packet.h:170
u8 src_mac[6]
L2 information.
Definition: node.c:43
vlib_node_registration_t flowprobe_ip6_node
(constructor) VLIB_REGISTER_NODE (flowprobe_ip6_node)
Definition: node.c:101
static char * flowprobe_error_strings[]
Definition: node.c:119
static void add_to_flow_record_state(vlib_main_t *vm, vlib_node_runtime_t *node, flowprobe_main_t *fm, vlib_buffer_t *b, timestamp_nsec_t timestamp, u16 length, flowprobe_variant_t which, flowprobe_trace_t *t)
Definition: node.c:359
#define PREDICT_TRUE(x)
Definition: clib.h:112
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:232
flowprobe_key_t key
Definition: flowprobe.h:99
u8 src_address[6]
Definition: packet.h:56
static u32 flowprobe_l3_ip6_add(vlib_buffer_t *to_b, flowprobe_entry_t *e, u16 offset)
Definition: node.c:213
u32 thread_index
Definition: main.h:179
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:525
static u64 clib_xxhash(u64 key)
Definition: xxhash.h:58
int i
static u32 flowprobe_l3_ip4_add(vlib_buffer_t *to_b, flowprobe_entry_t *e, u16 offset)
Definition: node.c:239
ip4_address_t src_address
Definition: flow_report.h:119
format_function_t format_ip46_address
Definition: format.h:61
static u32 format_get_indent(u8 *s)
Definition: format.h:72
flowprobe_entry_t * stateless_entry
Definition: flowprobe.h:145
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
u16 template_reports[FLOW_N_RECORDS]
Definition: flowprobe.h:126
struct flowprobe_entry_t::@421::@422 tcp
static uword flowprobe_ip6_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:861
u32 tx_sw_if_index
Definition: flowprobe.h:78
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:267
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:378
unsigned char u8
Definition: types.h:56
ip46_address_t dst_address
Definition: node.c:50
static void flush_record(flowprobe_variant_t which)
Definition: node.c:875
ip4_address_t ipfix_collector
Definition: flow_report.h:117
double f64
Definition: types.h:142
#define fm
vlib_node_registration_t ip4_lookup_node
(constructor) VLIB_REGISTER_NODE (ip4_lookup_node)
Definition: ip4_forward.c:103
flowprobe_entry_t ** pool_per_worker
Definition: flowprobe.h:136
static u32 flowprobe_hash(flowprobe_key_t *k)
Definition: node.c:286
flow_report_stream_t * streams
Definition: flow_report.h:114
flowprobe_variant_t which
Definition: flowprobe.h:87
flow-per-packet plugin header file
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
u8 * format_ethernet_address(u8 *s, va_list *args)
Definition: format.c:44
u64 timestamp
packet timestamp
Definition: node.c:38
ip4_address_t dst_address
Definition: ip4_packet.h:170
u8 dst_address[6]
Definition: packet.h:55
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
vlib_frame_t ** frames_per_worker
frames containing ipfix buffers, per-worker thread
Definition: flowprobe.h:70
u16 ethertype
Ethertype.
Definition: node.c:46
u32 ** hash_per_worker
Definition: flowprobe.h:135
void flowprobe_flush_callback_ip6(void)
Definition: node.c:890
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:188
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:181
unsigned int u32
Definition: types.h:88
vlib_node_registration_t flowprobe_walker_node
(constructor) VLIB_REGISTER_NODE (flowprobe_walker_node)
Definition: node.c:1037
static void unix_time_now_nsec_fraction(u32 *sec, u32 *nsec)
Definition: time.h:267
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:158
flowprobe_error_t
Definition: node.c:111
#define fl(x, y)
static uword flowprobe_ip4_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:854
u8 ht_log2len
Per CPU flow-state.
Definition: flowprobe.h:134
flowprobe_next_t
Definition: node.c:125
uword flowprobe_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, flowprobe_variant_t which)
Definition: node.c:723
u32 tx_sw_if_index
Definition: node.c:36
flow_report_main_t flow_report_main
Definition: flow_report.c:21
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:511
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:114
#define FLOWPROBE_NEXT_NODES
Definition: node.c:132
static void flowprobe_export_send(vlib_main_t *vm, vlib_buffer_t *b0, flowprobe_variant_t which)
Definition: node.c:517
static u32 flowprobe_common_add(vlib_buffer_t *to_b, flowprobe_entry_t *e, u16 offset)
Definition: node.c:154
unsigned short u16
Definition: types.h:57
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:190
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:214
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:442
u32 rx_sw_if_index
Definition: flowprobe.h:77
u16 src_port
L4 information.
Definition: node.c:55
#define PREDICT_FALSE(x)
Definition: clib.h:111
static char * flowprobe_variant_strings[]
Definition: node.c:61
u32 passive_timer_handle
Definition: flowprobe.h:106
u32 node_index
Node index.
Definition: node.h:518
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
static vlib_buffer_t * flowprobe_get_buffer(vlib_main_t *vm, flowprobe_variant_t which)
Definition: node.c:628
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static u32 version_length(u16 length)
Definition: ipfix_packet.h:33
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
static u32 flowprobe_l2_add(vlib_buffer_t *to_b, flowprobe_entry_t *e, u16 offset)
Definition: node.c:193
#define foreach_flowprobe_error
Definition: node.c:105
u16 dst_port
Definition: node.c:56
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:420
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:301
u64 packetcount
Definition: flowprobe.h:100
f64 last_exported
Definition: flowprobe.h:105
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:295
#define clib_warning(format, args...)
Definition: error.h:59
flowprobe_entry_t * flowprobe_lookup(u32 my_cpu_number, flowprobe_key_t *k, u32 *poolindex, bool *collision)
Definition: node.c:306
static u8 * format_flowprobe_trace(u8 *s, va_list *args)
Definition: node.c:71
void flowprobe_flush_callback_l2(void)
Definition: node.c:896
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:283
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:452
timestamp_nsec_t flow_start
Definition: flowprobe.h:102
flowprobe_record_t
Definition: flowprobe.h:35
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:537
#define pool_put_index(p, i)
Free pool element with given index.
Definition: pool.h:311
#define ASSERT(truth)
u32 rx_sw_if_index
interface handle
Definition: node.c:35
vlib_main_t * vlib_main
Definition: flow_report.h:136
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: ip4_forward.c:1129
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:788
void flowprobe_flush_callback_ip4(void)
Definition: node.c:884
size_t count
Definition: vapi.c:47
#define FLOW_MAXIMUM_EXPORT_ENTRIES
Definition: flowprobe.h:61
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
flowprobe_main_t flowprobe_main
Definition: flowprobe.c:53
vlib_node_registration_t flowprobe_ip4_node
(constructor) VLIB_REGISTER_NODE (flowprobe_ip4_node)
Definition: node.c:100
static uword flowprobe_walker_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: node.c:925
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
flowprobe_variant_t which
Definition: node.c:58
struct _vlib_node_registration vlib_node_registration_t
union flowprobe_entry_t::@421 prot
template key/value backing page structure
Definition: bihash_doc.h:44
static u32 ipfix_set_id_length(u16 set_id, u16 length)
Definition: ipfix_packet.h:121
vlib_buffer_t ** buffers_per_worker
ipfix buffers under construction, per-worker thread
Definition: flowprobe.h:68
Definition: defs.h:47
static void flowprobe_export_entry(vlib_main_t *vm, flowprobe_entry_t *e)
Definition: node.c:672
u16 payload_length
Definition: ip6_packet.h:369
vlib_node_registration_t flowprobe_l2_node
(constructor) VLIB_REGISTER_NODE (flowprobe_l2_node)
Definition: node.c:102
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:495
u64 uword
Definition: types.h:112
#define NTP_TIMESTAMP
Definition: node.c:151
u16 template_size[FLOW_N_RECORDS]
Definition: flowprobe.h:127
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline uword os_get_thread_index(void)
Definition: os.h:62
f64 last_updated
Definition: flowprobe.h:104
struct clib_bihash_value offset
template key/value backing page structure
static void vlib_buffer_init_for_free_list(vlib_buffer_t *dst, vlib_buffer_free_list_t *fl)
#define vnet_buffer(b)
Definition: buffer.h:368
ip46_address_t dst_address
Definition: flowprobe.h:83
u8 data[0]
Packet data.
Definition: buffer.h:176
#define vec_foreach(var, vec)
Vector iterator.
static flowprobe_variant_t flowprobe_get_variant(flowprobe_variant_t which, flowprobe_record_t flags, u16 ethertype)
Definition: node.c:138
u16 flags
Copy of main node flags.
Definition: node.h:531
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
u16 * next_record_offset_per_worker
next record offset, per worker thread
Definition: flowprobe.h:72
static vlib_buffer_free_list_t * vlib_buffer_get_free_list(vlib_main_t *vm, vlib_buffer_free_list_index_t free_list_index)
Definition: buffer_funcs.h:657
flowprobe_entry_t * flowprobe_create(u32 my_cpu_number, flowprobe_key_t *k, u32 *poolindex)
Definition: node.c:333
timestamp_nsec_t flow_end
Definition: flowprobe.h:103
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:326
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:117
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:485
flowprobe_record_t flags
Definition: flowprobe.h:66
u64 octetcount
Definition: flowprobe.h:101
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
Definition: flowprobe.h:97
static void flowprobe_delete_by_index(u32 my_cpu_number, u32 poolindex)
Definition: node.c:905
static u16 flowprobe_get_headersize(void)
Definition: node.c:510
Definition: defs.h:46
static uword flowprobe_l2_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:868
ip6_address_t dst_address
Definition: ip6_packet.h:378