FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
stats.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vpp/stats/stats.h>
16 #include <signal.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
22 #include <vnet/bier/bier_fmask.h>
23 #include <vnet/bier/bier_table.h>
24 #include <vnet/fib/fib_api.h>
25 
26 #define STATS_DEBUG 0
27 
29 
30 #include <vnet/ip/ip.h>
31 
32 #include <vpp/api/vpe_msg_enum.h>
33 
34 #define f64_endian(a)
35 #define f64_print(a,b)
36 
37 #define vl_typedefs /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
39 #undef vl_typedefs
40 
41 #define vl_endianfun /* define message structures */
42 #include <vpp/api/vpe_all_api_h.h>
43 #undef vl_endianfun
44 
45 /* instantiate all the print functions we know about */
46 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
47 #define vl_printfun
48 #include <vpp/api/vpe_all_api_h.h>
49 #undef vl_printfun
50 
51 #define foreach_stats_msg \
52 _(WANT_STATS, want_stats) \
53 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
54 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
55 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
56 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
57 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
58 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
59 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
60 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
61 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
62 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
63 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
64 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
65 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
66 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
67 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
68 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
69 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
70 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
71 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \
72 _(WANT_BIER_NEIGHBOR_STATS, want_bier_neighbor_stats)
73 
74 #define vl_msg_name_crc_list
75 #include <vpp/stats/stats.api.h>
76 #undef vl_msg_name_crc_list
77 
78 static void
80 {
81 #define _(id,n,crc) \
82  vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
83  foreach_vl_msg_name_crc_stats;
84 #undef _
85 }
86 
87 /* These constants ensure msg sizes <= 1024, aka ring allocation */
88 #define SIMPLE_COUNTER_BATCH_SIZE 126
89 #define COMBINED_COUNTER_BATCH_SIZE 63
90 #define IP4_FIB_COUNTER_BATCH_SIZE 48
91 #define IP6_FIB_COUNTER_BATCH_SIZE 30
92 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
93 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
94 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
95 #define BIER_NEIGHBOR_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_bier_neighbor_counter_t))
96 
97 /* 5ms */
98 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
99 /* ns/us us/ms */
100 
101 u8 *
103 {
104  stats_main_t *sm = &stats_main;
107 
108  char *counter_name;
109  u32 count, sw_if_index;
110  int i;
111  count = ntohl (mp->count);
112  sw_if_index = ntohl (mp->first_sw_if_index);
113 
114  vlib_counter_t *vp;
115  u64 packets, bytes;
116  vp = (vlib_counter_t *) mp->data;
117 
118  switch (mp->vnet_counter_type)
119  {
121  counter_name = "rx";
122  break;
124  counter_name = "tx";
125  break;
126  default:
127  counter_name = "bogus";
128  break;
129  }
130  for (i = 0; i < count; i++)
131  {
132  packets = clib_mem_unaligned (&vp->packets, u64);
133  packets = clib_net_to_host_u64 (packets);
134  bytes = clib_mem_unaligned (&vp->bytes, u64);
135  bytes = clib_net_to_host_u64 (bytes);
136  vp++;
137  s = format (s, "%U.%s.packets %lld\n",
139  sm->vnet_main, sw_if_index, counter_name, packets);
140  s = format (s, "%U.%s.bytes %lld\n",
142  sm->vnet_main, sw_if_index, counter_name, bytes);
143  sw_if_index++;
144  }
145  return s;
146 }
147 
148 u8 *
150 {
151  stats_main_t *sm = &stats_main;
153  va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
154  char *counter_name;
155  u32 count, sw_if_index;
156  count = ntohl (mp->count);
157  sw_if_index = ntohl (mp->first_sw_if_index);
158  u64 *vp, v;
159  vp = (u64 *) mp->data;
160  int i;
161 
162  switch (mp->vnet_counter_type)
163  {
165  counter_name = "drop";
166  break;
168  counter_name = "punt";
169  break;
171  counter_name = "ip4";
172  break;
174  counter_name = "ip6";
175  break;
177  counter_name = "rx-no-buff";
178  break;
180  counter_name = "rx-miss";
181  break;
183  counter_name = "rx-error (fifo-full)";
184  break;
186  counter_name = "tx-error (fifo-full)";
187  break;
188  default:
189  counter_name = "bogus";
190  break;
191  }
192  for (i = 0; i < count; i++)
193  {
194  v = clib_mem_unaligned (vp, u64);
195  v = clib_net_to_host_u64 (v);
196  vp++;
197  s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
198  sm->vnet_main, sw_if_index, counter_name, v);
199  sw_if_index++;
200  }
201 
202  return s;
203 }
204 
205 static void
206 dslock (stats_main_t * sm, int release_hint, int tag)
207 {
208  u32 thread_index;
210 
211  if (PREDICT_FALSE (l == 0))
212  return;
213 
214  thread_index = vlib_get_thread_index ();
215  if (l->lock && l->thread_index == thread_index)
216  {
217  l->count++;
218  return;
219  }
220 
221  if (release_hint)
222  l->release_hint++;
223 
224  while (__sync_lock_test_and_set (&l->lock, 1))
225  /* zzzz */ ;
226  l->tag = tag;
227  l->thread_index = thread_index;
228  l->count = 1;
229 }
230 
231 void
232 stats_dslock_with_hint (int hint, int tag)
233 {
234  stats_main_t *sm = &stats_main;
235  dslock (sm, hint, tag);
236 }
237 
238 static void
240 {
241  u32 thread_index;
243 
244  if (PREDICT_FALSE (l == 0))
245  return;
246 
247  thread_index = vlib_get_thread_index ();
248  ASSERT (l->lock && l->thread_index == thread_index);
249  l->count--;
250  if (l->count == 0)
251  {
252  l->tag = -l->tag;
253  l->release_hint = 0;
255  l->lock = 0;
256  }
257 }
258 
259 void
260 stats_dsunlock (int hint, int tag)
261 {
262  stats_main_t *sm = &stats_main;
263  dsunlock (sm);
264 }
265 
267 get_client_for_stat (u32 reg, u32 item, u32 client_index)
268 {
269  stats_main_t *sm = &stats_main;
270  vpe_client_stats_registration_t *registration;
271  uword *p;
272 
273  /* Is there anything listening for item in that reg */
274  p = hash_get (sm->stats_registration_hash[reg], item);
275 
276  if (!p)
277  return 0; // Fail
278 
279  /* If there is, is our client_index one of them */
280  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
281  p = hash_get (registration->client_hash, client_index);
282 
283  if (!p)
284  return 0; // Fail
285 
286  return pool_elt_at_index (registration->clients, p[0]);
287 
288 }
289 
290 static int
292 {
293  stats_main_t *sm = &stats_main;
294  vpe_client_stats_registration_t *registration;
296  uword *p;
297 
298  /* Is there anything listening for item in that reg */
299  p = hash_get (sm->stats_registration_hash[reg], item);
300 
301  if (!p)
302  {
303  pool_get (sm->stats_registrations[reg], registration);
304  registration->item = item;
305  registration->client_hash = NULL;
306  registration->clients = NULL;
307  hash_set (sm->stats_registration_hash[reg], item,
308  registration - sm->stats_registrations[reg]);
309  }
310  else
311  {
312  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
313  }
314 
315  p = hash_get (registration->client_hash, client->client_index);
316 
317  if (!p)
318  {
319  pool_get (registration->clients, cr);
320  cr->client_index = client->client_index;
321  cr->client_pid = client->client_pid;
322  hash_set (registration->client_hash, cr->client_index,
323  cr - registration->clients);
324  }
325 
326  return 1; //At least one client is doing something ... poll
327 }
328 
329 static void
330 clear_one_client (u32 reg_index, u32 reg, u32 item, u32 client_index)
331 {
332  stats_main_t *sm = &stats_main;
333  vpe_client_stats_registration_t *registration;
335  uword *p;
336 
337  registration = pool_elt_at_index (sm->stats_registrations[reg], reg_index);
338  p = hash_get (registration->client_hash, client_index);
339 
340  if (p)
341  {
342  client = pool_elt_at_index (registration->clients, p[0]);
343  hash_unset (registration->client_hash, client->client_index);
344  pool_put (registration->clients, client);
345 
346  /* Now check if that was the last client for that item */
347  if (0 == pool_elts (registration->clients))
348  {
349  hash_unset (sm->stats_registration_hash[reg], item);
350  hash_free (registration->client_hash);
351  pool_free (registration->clients);
352  pool_put (sm->stats_registrations[reg], registration);
353  }
354  }
355 }
356 
357 int
358 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
359 {
360  stats_main_t *sm = &stats_main;
361  uword *p;
362  int i, elts;
363 
364  /* Clear the client first */
365  /* Is there anything listening for item in that reg */
366  p = hash_get (sm->stats_registration_hash[reg], item);
367 
368  if (!p)
369  goto exit;
370 
371  /* If there is, is our client_index one of them */
372  clear_one_client (p[0], reg, item, client_index);
373 
374 exit:
375  elts = 0;
376  /* Now check if that was the last item in any of the listened to stats */
377  for (i = 0; i < STATS_REG_N_IDX; i++)
378  {
379  elts += pool_elts (sm->stats_registrations[i]);
380  }
381  return elts;
382 }
383 
384 static int
386 {
387  stats_main_t *sm = &stats_main;
388  u32 reg_index, item, reg;
389  int i, elts;
390 
391  /* *INDENT-OFF* */
393  {
394  hash_foreach(item, reg_index, sm->stats_registration_hash[reg],
395  ({
396  clear_one_client(reg_index, reg, item, client_index);
397  }));
398  }
399  /* *INDENT-OFF* */
400 
401  elts = 0;
402  /* Now check if that was the last item in any of the listened to stats */
403  for (i = 0; i < STATS_REG_N_IDX; i++)
404  {
405  elts += pool_elts (sm->stats_registrations[i]);
406  }
407  return elts;
408 }
409 
410 static clib_error_t *
411 want_stats_reaper (u32 client_index)
412 {
413  stats_main_t *sm = &stats_main;
414 
415  sm->enable_poller = clear_client_for_all_stats (client_index);
416 
417  return (NULL);
418 }
419 
421 
422 
423 /*
424  * Return a copy of the clients list.
425  */
428 {
429  stats_main_t *sm = &stats_main;
430  vpe_client_registration_t *client, *clients = 0;
431  vpe_client_stats_registration_t *registration;
432  uword *p;
433 
434  /* Is there anything listening for item in that reg */
435  p = hash_get (sm->stats_registration_hash[reg], item);
436 
437  if (!p)
438  return 0; // Fail
439 
440  /* If there is, is our client_index one of them */
441  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
442 
443  vec_reset_length (clients);
444 
445  /* *INDENT-OFF* */
446  pool_foreach (client, registration->clients,
447  ({
448  vec_add1 (clients, *client);}
449  ));
450  /* *INDENT-ON* */
451  return clients;
452 }
453 
454 
455 static void
456 clear_client_reg (u32 ** registrations)
457 {
458  /* When registrations[x] is a vector of pool indices
459  here is a good place to clean up the pools
460  */
461 #define stats_reg(n) vec_free(registrations[IDX_##n]);
462 #include <vpp/stats/stats.reg>
463 #undef stats_reg
464 
465  vec_free (registrations);
466 }
467 
468 u32 **
469 init_client_reg (u32 ** registrations)
470 {
471 
472  /*
473  Initialise the stats registrations for each
474  type of stat a client can register for as well as
475  a vector of "interested" indexes.
476  Initially this is a u32 of either sw_if_index or fib_index
477  but eventually this should migrate to a pool_index (u32)
478  with a type specific pool that can include more complex things
479  such as timing and structured events.
480  */
481  vec_validate (registrations, STATS_REG_N_IDX);
482 #define stats_reg(n) \
483  vec_reset_length(registrations[IDX_##n]);
484 #include <vpp/stats/stats.reg>
485 #undef stats_reg
486 
487  /*
488  When registrations[x] is a vector of pool indices, here
489  is a good place to init the pools.
490  */
491  return registrations;
492 }
493 
494 u32 **
495 enable_all_client_reg (u32 ** registrations)
496 {
497 
498  /*
499  Enable all stats known by adding
500  ~0 to the index vector. Eventually this
501  should be deprecated.
502  */
503 #define stats_reg(n) \
504  vec_add1(registrations[IDX_##n], ~0);
505 #include <vpp/stats/stats.reg>
506 #undef stats_reg
507  return registrations;
508 }
509 
510 static void
512 {
515  api_main_t *am = sm->api_main;
517  svm_queue_t *q = shmem_hdr->vl_input_queue;
519  u32 items_this_message = 0;
520  u64 v, *vp = 0;
521  int i, n_counts;
522 
523  /*
524  * Prevent interface registration from expanding / moving the vectors...
525  * That tends never to happen, so we can hold this lock for a while.
526  */
528 
529  vec_foreach (cm, im->sw_if_counters)
530  {
531  n_counts = vlib_simple_counter_n_counters (cm);
532  for (i = 0; i < n_counts; i++)
533  {
534  if (mp == 0)
535  {
536  items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
537  n_counts - i);
538 
540  (sizeof (*mp) + items_this_message * sizeof (v));
541  mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
542  mp->vnet_counter_type = cm - im->sw_if_counters;
543  mp->first_sw_if_index = htonl (i);
544  mp->count = 0;
545  vp = (u64 *) mp->data;
546  }
547  v = vlib_get_simple_counter (cm, i);
548  clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
549  vp++;
550  mp->count++;
551  if (mp->count == items_this_message)
552  {
553  mp->count = htonl (items_this_message);
554  /* Send to the main thread... */
555  vl_msg_api_send_shmem (q, (u8 *) & mp);
556  mp = 0;
557  }
558  }
559  ASSERT (mp == 0);
560  }
562 }
563 
564 void
566  u32 item, int enable_disable)
567 {
568  stats_main_t *sm = &stats_main;
569  vpe_client_registration_t *rp, _rp;
570 
571  rp = get_client_for_stat (stat, item, client->client_index);
572 
573  /* Disable case */
574  if (enable_disable == 0)
575  {
576  if (!rp) // No client to disable
577  {
578  clib_warning ("pid %d: already disabled for stats...",
579  client->client_pid);
580  return;
581  }
582  sm->enable_poller =
583  clear_client_for_stat (stat, item, client->client_index);
584  return;
585  }
586  /* Enable case */
587  if (!rp)
588  {
589  rp = &_rp;
590  rp->client_index = client->client_index;
591  rp->client_pid = client->client_pid;
592  sm->enable_poller = set_client_for_stat (stat, item, rp);
593  }
594 }
595 
596 
597 /**********************************
598  * ALL Interface Combined stats - to be deprecated
599  **********************************/
600 
601 /*
602  * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
603  */
604 static void
607 {
608  stats_main_t *sm = &stats_main;
610  vl_api_want_interface_combined_stats_reply_t *rmp;
611  uword *p;
612  i32 retval = 0;
614  u32 swif;
615 
616  swif = ~0; //Using same mechanism as _per_interface_
617  rp.client_index = mp->client_index;
618  rp.client_pid = mp->pid;
619 
620  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
621  mp->enable_disable);
622 
623 reply:
625  if (!reg)
626  {
627  sm->enable_poller =
628  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
629  mp->client_index);
630  return;
631  }
632 
633  rmp = vl_msg_api_alloc (sizeof (*rmp));
634  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
635  rmp->context = mp->context;
636  rmp->retval = retval;
637 
638  vl_api_send_msg (reg, (u8 *) rmp);
639 }
640 
641 static void
644 {
645  vpe_client_registration_t *clients, client;
646  stats_main_t *sm = &stats_main;
647  vl_api_registration_t *reg, *reg_prev = NULL;
649  u32 mp_size;
650  int i;
651 
652  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
653 
654  clients =
655  get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
656  ~0 /*flag for all */ );
657 
658  for (i = 0; i < vec_len (clients); i++)
659  {
660  client = clients[i];
662  if (reg)
663  {
664  if (reg_prev && vl_api_can_send_msg (reg_prev))
665  {
666  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
667  clib_memcpy (mp_copy, mp, mp_size);
668  vl_api_send_msg (reg_prev, (u8 *) mp);
669  mp = mp_copy;
670  }
671  reg_prev = reg;
672  }
673  }
674  vec_free (clients);
675 #if STATS_DEBUG > 0
676  fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
677 #endif
678 
679  if (reg_prev && vl_api_can_send_msg (reg_prev))
680  {
681  vl_api_send_msg (reg_prev, (u8 *) mp);
682  }
683  else
684  {
685  vl_msg_api_free (mp);
686  }
687 }
688 
689 static void
691 {
694  api_main_t *am = sm->api_main;
696  svm_queue_t *q = shmem_hdr->vl_input_queue;
698  u32 items_this_message = 0;
699  vlib_counter_t v, *vp = 0;
700  int i, n_counts;
701 
703 
705  {
706  n_counts = vlib_combined_counter_n_counters (cm);
707  for (i = 0; i < n_counts; i++)
708  {
709  if (mp == 0)
710  {
711  items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
712  n_counts - i);
713 
715  (sizeof (*mp) + items_this_message * sizeof (v));
716  mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
718  mp->first_sw_if_index = htonl (i);
719  mp->count = 0;
720  vp = (vlib_counter_t *) mp->data;
721  }
722  vlib_get_combined_counter (cm, i, &v);
724  = clib_host_to_net_u64 (v.packets);
725  clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
726  vp++;
727  mp->count++;
728  if (mp->count == items_this_message)
729  {
730  mp->count = htonl (items_this_message);
731  /* Send to the main thread... */
732  vl_msg_api_send_shmem (q, (u8 *) & mp);
733  mp = 0;
734  }
735  }
736  ASSERT (mp == 0);
737  }
739 }
740 
741 /**********************************
742  * Per Interface Combined stats
743  **********************************/
744 
745 /* Request from client registering interfaces it wants */
746 static void
749 {
750  stats_main_t *sm = &stats_main;
752  vl_api_want_per_interface_combined_stats_reply_t *rmp;
754  uword *p;
755  i32 retval = 0;
757  u32 i, swif, num = 0;
758 
759  num = ntohl (mp->num);
760 
761  /*
762  * Validate sw_if_indexes before registering
763  */
764  for (i = 0; i < num; i++)
765  {
766  swif = ntohl (mp->sw_ifs[i]);
767 
768  /*
769  * Check its a real sw_if_index that the client is allowed to see
770  */
771  if (swif != ~0)
772  {
774  {
775  retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
776  goto reply;
777  }
778  }
779  }
780 
781  for (i = 0; i < num; i++)
782  {
783  swif = ntohl (mp->sw_ifs[i]);
784 
785  rp.client_index = mp->client_index;
786  rp.client_pid = mp->pid;
787  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
788  swif, ntohl (mp->enable_disable));
789  }
790 
791 reply:
793  if (!reg)
794  {
795  for (i = 0; i < num; i++)
796  {
797  swif = ntohl (mp->sw_ifs[i]);
798 
799  sm->enable_poller =
800  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
801  mp->client_index);
802  }
803  return;
804  }
805 
806  rmp = vl_msg_api_alloc (sizeof (*rmp));
807  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
808  rmp->context = mp->context;
809  rmp->retval = retval;
810 
811  vl_api_send_msg (reg, (u8 *) rmp);
812 }
813 
814 /* Per Interface Combined distribution to client */
815 static void
817 {
820  api_main_t *am = sm->api_main;
822  vl_api_registration_t *vl_reg;
826  u32 i, j;
829  u32 *sw_if_index = 0;
830 
832 
834 
835  /* *INDENT-OFF* */
836  pool_foreach (reg,
837  sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
838  ({ vec_add1 (sm->regs_tmp, reg); }));
839  /* *INDENT-ON* */
840 
841  for (i = 0; i < vec_len (sm->regs_tmp); i++)
842  {
843  reg = sm->regs_tmp[i];
844  if (reg->item == ~0)
845  {
849  continue;
850  }
852 
853  /* *INDENT-OFF* */
854  pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
855  client);}));
856  /* *INDENT-ON* */
857 
858  for (j = 0; j < vec_len (sm->clients_tmp); j++)
859  {
860  client = sm->clients_tmp[j];
861 
863 
864  //Client may have disconnected abrubtly, clean up so we don't poll nothing.
865  if (!vl_reg)
866  {
867  sm->enable_poller =
868  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
869  reg->item, client->client_index);
870  continue;
871  }
872  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
873  memset (mp, 0, sizeof (*mp));
874 
875  mp->_vl_msg_id =
876  ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
877 
878  /*
879  * count will eventually be used to optimise the batching
880  * of per client messages for each stat. For now setting this to 1 then
881  * iterate. This will not affect API.
882  *
883  * FIXME instead of enqueueing here, this should be sent to a batch
884  * storer for per-client transmission. Each "mp" sent would be a single entry
885  * and if a client is listening to other sw_if_indexes for same, it would be
886  * appended to that *mp
887  *
888  *
889  * FIXME(s):
890  * - capturing the timestamp of the counters "when VPP knew them" is important.
891  * Less so is that the timing of the delivery to the control plane be in the same
892  * timescale.
893 
894  * i.e. As long as the control plane can delta messages from VPP and work out
895  * velocity etc based on the timestamp, it can do so in a more "batch mode".
896 
897  * It would be beneficial to keep a "per-client" message queue, and then
898  * batch all the stat messages for a client into one message, with
899  * discrete timestamps.
900 
901  * Given this particular API is for "per interface" one assumes that the scale
902  * is less than the ~0 case, which the prior API is suited for.
903  */
904 
905  /*
906  * 1 message per api call for now
907  */
908  mp->count = htonl (1);
909  mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
910 
912  vp->sw_if_index = htonl (reg->item);
913 
914  im = &vnet_get_main ()->interface_main;
915 
916 #define _(X, x) \
917  cm = im->combined_sw_if_counters + X; \
918  vlib_get_combined_counter (cm, reg->item, &v); \
919  clib_mem_unaligned (&vp->x##_packets, u64) = \
920  clib_host_to_net_u64 (v.packets); \
921  clib_mem_unaligned (&vp->x##_bytes, u64) = \
922  clib_host_to_net_u64 (v.bytes);
923 
924 
927  _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
928  _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
929  _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
930  _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
931  _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
932  _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
933 
934 #undef _
935 
936  vl_api_send_msg (vl_reg, (u8 *) mp);
937  }
938  }
939 
941 }
942 
943 /**********************************
944  * Per Interface simple stats
945  **********************************/
946 
947 /* Request from client registering interfaces it wants */
948 static void
951 {
952  stats_main_t *sm = &stats_main;
954  vl_api_want_per_interface_simple_stats_reply_t *rmp;
956  uword *p;
957  i32 retval = 0;
959  u32 i, swif, num = 0;
960 
961  num = ntohl (mp->num);
962 
963  for (i = 0; i < num; i++)
964  {
965  swif = ntohl (mp->sw_ifs[i]);
966 
967  /* Check its a real sw_if_index that the client is allowed to see */
968  if (swif != ~0)
969  {
971  {
972  retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
973  goto reply;
974  }
975  }
976  }
977 
978  for (i = 0; i < num; i++)
979  {
980  swif = ntohl (mp->sw_ifs[i]);
981 
982  rp.client_index = mp->client_index;
983  rp.client_pid = mp->pid;
984  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
985  swif, ntohl (mp->enable_disable));
986  }
987 
988 reply:
990 
991  /* Client may have disconnected abruptly, clean up */
992  if (!reg)
993  {
994  for (i = 0; i < num; i++)
995  {
996  swif = ntohl (mp->sw_ifs[i]);
997  sm->enable_poller =
998  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
999  mp->client_index);
1000  }
1001 
1002  return;
1003  }
1004 
1005 
1006  rmp = vl_msg_api_alloc (sizeof (*rmp));
1007  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
1008  rmp->context = mp->context;
1009  rmp->retval = retval;
1010 
1011  vl_api_send_msg (reg, (u8 *) rmp);
1012 }
1013 
1014 /* Per Interface Simple distribution to client */
1015 static void
1017 {
1020  api_main_t *am = sm->api_main;
1022  vl_api_registration_t *vl_reg;
1024  u32 i, j, size;
1026  vpe_client_registration_t *client;
1027  u32 timestamp, count;
1029  counter_t v;
1030 
1032 
1033  vec_reset_length (sm->regs_tmp);
1034 
1035  /* *INDENT-OFF* */
1036  pool_foreach (reg,
1037  sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
1038  ({ vec_add1 (sm->regs_tmp, reg); }));
1039  /* *INDENT-ON* */
1040 
1041  for (i = 0; i < vec_len (sm->regs_tmp); i++)
1042  {
1043  reg = sm->regs_tmp[i];
1044  if (reg->item == ~0)
1045  {
1049  continue;
1050  }
1052 
1053  /* *INDENT-OFF* */
1054  pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1055  client);}));
1056  /* *INDENT-ON* */
1057 
1058  for (j = 0; j < vec_len (sm->clients_tmp); j++)
1059  {
1060  client = sm->clients_tmp[j];
1062 
1063  /* Client may have disconnected abrubtly, clean up */
1064  if (!vl_reg)
1065  {
1066  sm->enable_poller =
1067  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1068  reg->item, client->client_index);
1069  continue;
1070  }
1071 
1072  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1073  memset (mp, 0, sizeof (*mp));
1074  mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1075 
1076  /*
1077  * count will eventually be used to optimise the batching
1078  * of per client messages for each stat. For now setting this to 1 then
1079  * iterate. This will not affect API.
1080  *
1081  * FIXME instead of enqueueing here, this should be sent to a batch
1082  * storer for per-client transmission. Each "mp" sent would be a single entry
1083  * and if a client is listening to other sw_if_indexes for same, it would be
1084  * appended to that *mp
1085  *
1086  *
1087  * FIXME(s):
1088  * - capturing the timestamp of the counters "when VPP knew them" is important.
1089  * Less so is that the timing of the delivery to the control plane be in the same
1090  * timescale.
1091 
1092  * i.e. As long as the control plane can delta messages from VPP and work out
1093  * velocity etc based on the timestamp, it can do so in a more "batch mode".
1094 
1095  * It would be beneficial to keep a "per-client" message queue, and then
1096  * batch all the stat messages for a client into one message, with
1097  * discrete timestamps.
1098 
1099  * Given this particular API is for "per interface" one assumes that the scale
1100  * is less than the ~0 case, which the prior API is suited for.
1101  */
1102 
1103  /*
1104  * 1 message per api call for now
1105  */
1106  mp->count = htonl (1);
1107  mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1108  vp = (vl_api_vnet_simple_counter_t *) mp->data;
1109 
1110  vp->sw_if_index = htonl (reg->item);
1111 
1112  // VNET_INTERFACE_COUNTER_DROP
1114  v = vlib_get_simple_counter (cm, reg->item);
1115  clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1116 
1117  // VNET_INTERFACE_COUNTER_PUNT
1119  v = vlib_get_simple_counter (cm, reg->item);
1120  clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1121 
1122  // VNET_INTERFACE_COUNTER_IP4
1124  v = vlib_get_simple_counter (cm, reg->item);
1125  clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1126 
1127  //VNET_INTERFACE_COUNTER_IP6
1129  v = vlib_get_simple_counter (cm, reg->item);
1130  clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1131 
1132  //VNET_INTERFACE_COUNTER_RX_NO_BUF
1134  v = vlib_get_simple_counter (cm, reg->item);
1136  clib_host_to_net_u64 (v);
1137 
1138  //VNET_INTERFACE_COUNTER_RX_MISS
1140  v = vlib_get_simple_counter (cm, reg->item);
1141  clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1142 
1143  //VNET_INTERFACE_COUNTER_RX_ERROR
1145  v = vlib_get_simple_counter (cm, reg->item);
1146  clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1147 
1148  //VNET_INTERFACE_COUNTER_TX_ERROR
1150  v = vlib_get_simple_counter (cm, reg->item);
1151  clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1152 
1153  //VNET_INTERFACE_COUNTER_MPLS
1155  v = vlib_get_simple_counter (cm, reg->item);
1156  clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1157 
1158  vl_api_send_msg (vl_reg, (u8 *) mp);
1159  }
1160  }
1161 
1163 }
1164 
1165 /**********************************
1166  * Per FIB IP4 stats
1167  **********************************/
1168 
1169 static void
1171 {
1172  struct timespec _req, *req = &_req;
1173  struct timespec _rem, *rem = &_rem;
1174 
1175  req->tv_sec = sec;
1176  req->tv_nsec = nsec;
1177  while (1)
1178  {
1179  if (nanosleep (req, rem) == 0)
1180  break;
1181  *req = *rem;
1182  if (errno == EINTR)
1183  continue;
1184  clib_unix_warning ("nanosleep");
1185  break;
1186  }
1187 }
1188 
1189 /**
1190  * @brief The context passed when collecting adjacency counters
1191  */
1192 typedef struct ip4_nbr_stats_ctx_t_
1193 {
1194  /**
1195  * The SW IF index all these adjs belong to
1196  */
1198 
1199  /**
1200  * A vector of ip4 nbr counters
1201  */
1204 
1205 static adj_walk_rc_t
1207 {
1208  vl_api_ip4_nbr_counter_t *vl_counter;
1209  vlib_counter_t adj_counter;
1211  ip_adjacency_t *adj;
1212 
1213  ctx = arg;
1214  vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1215 
1216  if (0 != adj_counter.packets)
1217  {
1218  vec_add2 (ctx->counters, vl_counter, 1);
1219  adj = adj_get (ai);
1220 
1221  vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1222  vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1223  vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1224  vl_counter->link_type = adj->ia_link;
1225  }
1226  return (ADJ_WALK_RC_CONTINUE);
1227 }
1228 
1229 #define MIN(x,y) (((x)<(y))?(x):(y))
1230 
1231 static void
1233 {
1234  u8 pause = 0;
1235 
1236  svm_queue_lock (q);
1237  pause = svm_queue_is_full (q);
1238 
1239  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1240  svm_queue_unlock (q);
1241  dsunlock (sm);
1242 
1243  if (pause)
1244  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1246 }
1247 
1248 static void
1250 {
1251  api_main_t *am = sm->api_main;
1253  svm_queue_t *q = shmem_hdr->vl_input_queue;
1255  int first = 0;
1256 
1257  /*
1258  * If the walk context has counters, which may be left over from the last
1259  * suspend, then we continue from there.
1260  */
1261  while (0 != vec_len (ctx->counters))
1262  {
1263  u32 n_items = MIN (vec_len (ctx->counters),
1265  u8 pause = 0;
1266 
1267  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1268 
1269  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1270  (n_items *
1271  sizeof
1273  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1274  mp->count = ntohl (n_items);
1275  mp->sw_if_index = ntohl (ctx->sw_if_index);
1276  mp->begin = first;
1277  first = 0;
1278 
1279  /*
1280  * copy the counters from the back of the context, then we can easily
1281  * 'erase' them by resetting the vector length.
1282  * The order we push the stats to the caller is not important.
1283  */
1284  clib_memcpy (mp->c,
1285  &ctx->counters[vec_len (ctx->counters) - n_items],
1286  n_items * sizeof (*ctx->counters));
1287 
1288  _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1289 
1290  /*
1291  * send to the shm q
1292  */
1293  send_and_pause (sm, q, (u8 *) & mp);
1294  }
1295 }
1296 
1297 static void
1299 {
1300  vnet_main_t *vnm = vnet_get_main ();
1302  vnet_sw_interface_t *si;
1303 
1305  .sw_if_index = 0,
1306  .counters = NULL,
1307  };
1308 
1309  /* *INDENT-OFF* */
1310  pool_foreach (si, im->sw_interfaces,
1311  ({
1312  /*
1313  * update the interface we are now concerned with
1314  */
1315  ctx.sw_if_index = si->sw_if_index;
1316 
1317  /*
1318  * we are about to walk another interface, so we shouldn't have any pending
1319  * stats to export.
1320  */
1321  ASSERT(ctx.counters == NULL);
1322 
1323  /*
1324  * visit each neighbour adjacency on the interface and collect
1325  * its current stats.
1326  * Because we hold the lock the walk is synchronous, so safe to routing
1327  * updates. It's limited in work by the number of adjacenies on an
1328  * interface, which is typically not huge.
1329  */
1330  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1331  adj_nbr_walk (si->sw_if_index,
1332  FIB_PROTOCOL_IP4,
1333  ip4_nbr_stats_cb,
1334  &ctx);
1335  dsunlock (sm);
1336 
1337  /*
1338  * if this interface has some adjacencies with counters then ship them,
1339  * else continue to the next interface.
1340  */
1341  if (NULL != ctx.counters)
1342  {
1343  ip4_nbr_ship(sm, &ctx);
1344  }
1345  }));
1346  /* *INDENT-OFF* */
1347 }
1348 
1349 /**
1350  * @brief The context passed when collecting adjacency counters
1351  */
1352 typedef struct ip6_nbr_stats_ctx_t_
1353 {
1354  /**
1355  * The SW IF index all these adjs belong to
1356  */
1358 
1359  /**
1360  * A vector of ip6 nbr counters
1361  */
1364 
1365 static adj_walk_rc_t
1367  void *arg)
1368 {
1369  vl_api_ip6_nbr_counter_t *vl_counter;
1370  vlib_counter_t adj_counter;
1372  ip_adjacency_t *adj;
1373 
1374  ctx = arg;
1375  vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1376 
1377  if (0 != adj_counter.packets)
1378  {
1379  vec_add2(ctx->counters, vl_counter, 1);
1380  adj = adj_get(ai);
1381 
1382  vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1383  vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1384  vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1385  vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1386  vl_counter->link_type = adj->ia_link;
1387  }
1388  return (ADJ_WALK_RC_CONTINUE);
1389 }
1390 
1391 #define MIN(x,y) (((x)<(y))?(x):(y))
1392 
1393 static void
1396 {
1397  api_main_t *am = sm->api_main;
1399  svm_queue_t *q = shmem_hdr->vl_input_queue;
1401  int first = 0;
1402 
1403  /*
1404  * If the walk context has counters, which may be left over from the last
1405  * suspend, then we continue from there.
1406  */
1407  while (0 != vec_len(ctx->counters))
1408  {
1409  u32 n_items = MIN (vec_len (ctx->counters),
1411  u8 pause = 0;
1412 
1413  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1414 
1415  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1416  (n_items *
1417  sizeof
1419  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1420  mp->count = ntohl (n_items);
1421  mp->sw_if_index = ntohl (ctx->sw_if_index);
1422  mp->begin = first;
1423  first = 0;
1424 
1425  /*
1426  * copy the counters from the back of the context, then we can easily
1427  * 'erase' them by resetting the vector length.
1428  * The order we push the stats to the caller is not important.
1429  */
1430  clib_memcpy (mp->c,
1431  &ctx->counters[vec_len (ctx->counters) - n_items],
1432  n_items * sizeof (*ctx->counters));
1433 
1434  _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1435 
1436  /*
1437  * send to the shm q
1438  */
1439  send_and_pause(sm, q, (u8 *) & mp);
1440  }
1441 }
1442 
1443 static void
1445 {
1446  vnet_main_t *vnm = vnet_get_main ();
1448  vnet_sw_interface_t *si;
1449 
1451  .sw_if_index = 0,
1452  .counters = NULL,
1453  };
1454 
1455  /* *INDENT-OFF* */
1456  pool_foreach (si, im->sw_interfaces,
1457  ({
1458  /*
1459  * update the interface we are now concerned with
1460  */
1461  ctx.sw_if_index = si->sw_if_index;
1462 
1463  /*
1464  * we are about to walk another interface, so we shouldn't have any pending
1465  * stats to export.
1466  */
1467  ASSERT(ctx.counters == NULL);
1468 
1469  /*
1470  * visit each neighbour adjacency on the interface and collect
1471  * its current stats.
1472  * Because we hold the lock the walk is synchronous, so safe to routing
1473  * updates. It's limited in work by the number of adjacenies on an
1474  * interface, which is typically not huge.
1475  */
1476  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1477  adj_nbr_walk (si->sw_if_index,
1478  FIB_PROTOCOL_IP6,
1479  ip6_nbr_stats_cb,
1480  &ctx);
1481  dsunlock (sm);
1482 
1483  /*
1484  * if this interface has some adjacencies with counters then ship them,
1485  * else continue to the next interface.
1486  */
1487  if (NULL != ctx.counters)
1488  {
1489  ip6_nbr_ship(sm, &ctx);
1490  }
1491  }));
1492  /* *INDENT-OFF* */
1493 }
1494 
1495 static void
1497 {
1498  ip4_main_t *im4 = &ip4_main;
1499  api_main_t *am = sm->api_main;
1501  svm_queue_t *q = shmem_hdr->vl_input_queue;
1502  ip4_route_t *r;
1503  fib_table_t *fib;
1504  ip4_fib_t *v4_fib;
1505  do_ip46_fibs_t *do_fibs;
1507  u32 items_this_message;
1508  vl_api_ip4_fib_counter_t *ctrp = 0;
1509  u32 start_at_fib_index = 0;
1510  int i, j, k;
1511 
1512  do_fibs = &sm->do_ip46_fibs;
1513 
1514 again:
1515  vec_reset_length (do_fibs->fibs);
1516  /* *INDENT-OFF* */
1517  pool_foreach (fib, im4->fibs,
1518  ({vec_add1(do_fibs->fibs,fib);}));
1519 
1520  /* *INDENT-ON* */
1521 
1522  for (j = 0; j < vec_len (do_fibs->fibs); j++)
1523  {
1524  fib = do_fibs->fibs[j];
1525  /* We may have bailed out due to control-plane activity */
1526  while ((fib - im4->fibs) < start_at_fib_index)
1527  continue;
1528 
1529  v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1530 
1531  if (mp == 0)
1532  {
1533  items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1535  (sizeof (*mp) +
1536  items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1537  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1538  mp->count = 0;
1539  mp->vrf_id = ntohl (fib->ft_table_id);
1540  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1541  }
1542  else
1543  {
1544  /* happens if the last FIB was empty... */
1545  ASSERT (mp->count == 0);
1546  mp->vrf_id = ntohl (fib->ft_table_id);
1547  }
1548 
1549  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1550 
1551  vec_reset_length (do_fibs->ip4routes);
1552  vec_reset_length (do_fibs->results);
1553 
1554  for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1555  {
1556  uword *hash = v4_fib->fib_entry_by_dst_address[i];
1557  hash_pair_t *p;
1558  ip4_route_t x;
1559 
1560  vec_reset_length (do_fibs->pvec);
1561 
1562  x.address_length = i;
1563 
1564  hash_foreach_pair (p, hash, (
1565  {
1566  vec_add1 (do_fibs->pvec, p);}
1567  ));
1568  for (k = 0; k < vec_len (do_fibs->pvec); k++)
1569  {
1570  p = do_fibs->pvec[k];
1571  x.address.data_u32 = p->key;
1572  x.index = p->value[0];
1573 
1574  vec_add1 (do_fibs->ip4routes, x);
1576  {
1577  start_at_fib_index = fib - im4->fibs;
1578  dsunlock (sm);
1579  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1581  mp->count = 0;
1582  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1583  goto again;
1584  }
1585  }
1586  }
1587 
1588  vec_foreach (r, do_fibs->ip4routes)
1589  {
1590  vlib_counter_t c;
1591  const dpo_id_t *dpo_id;
1592  u32 index;
1593 
1594  dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1595  index = (u32) dpo_id->dpoi_index;
1596 
1598  index, &c);
1599  /*
1600  * If it has actually
1601  * seen at least one packet, send it.
1602  */
1603  if (c.packets > 0)
1604  {
1605 
1606  /* already in net byte order */
1607  ctrp->address = r->address.as_u32;
1608  ctrp->address_length = r->address_length;
1609  ctrp->packets = clib_host_to_net_u64 (c.packets);
1610  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1611  mp->count++;
1612  ctrp++;
1613 
1614  if (mp->count == items_this_message)
1615  {
1616  mp->count = htonl (items_this_message);
1617  /*
1618  * If the main thread's input queue is stuffed,
1619  * drop the data structure lock (which the main thread
1620  * may want), and take a pause.
1621  */
1622  svm_queue_lock (q);
1623  if (svm_queue_is_full (q))
1624  {
1625  dsunlock (sm);
1626  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1627  svm_queue_unlock (q);
1628  mp = 0;
1629  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1631  goto again;
1632  }
1633  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1634  svm_queue_unlock (q);
1635 
1636  items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1638  (sizeof (*mp) +
1639  items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1640  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1641  mp->count = 0;
1642  mp->vrf_id = ntohl (fib->ft_table_id);
1643  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1644  }
1645  } /* for each (mp or single) adj */
1647  {
1648  start_at_fib_index = fib - im4->fibs;
1649  dsunlock (sm);
1650  ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1651  mp->count = 0;
1652  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1653  goto again;
1654  }
1655  } /* vec_foreach (routes) */
1656 
1657  dsunlock (sm);
1658 
1659  /* Flush any data from this fib */
1660  if (mp->count)
1661  {
1662  mp->count = htonl (mp->count);
1663  vl_msg_api_send_shmem (q, (u8 *) & mp);
1664  mp = 0;
1665  }
1666  }
1667 
1668  /* If e.g. the last FIB had no reportable routes, free the buffer */
1669  if (mp)
1670  vl_msg_api_free (mp);
1671 }
1672 
1673 static int
1675 {
1676  stats_main_t *sm = ctx;
1677  do_ip46_fibs_t *do_fibs;
1678  mfib_entry_t *entry;
1679 
1680  do_fibs = &sm->do_ip46_fibs;
1681  entry = mfib_entry_get (fei);
1682 
1683  vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1684 
1685  return (1);
1686 }
1687 
1688 static void
1690 {
1691  ip4_main_t *im4 = &ip4_main;
1692  api_main_t *am = sm->api_main;
1694  svm_queue_t *q = shmem_hdr->vl_input_queue;
1695  mfib_prefix_t *pfx;
1696  mfib_table_t *mfib;
1697  do_ip46_fibs_t *do_fibs;
1699  u32 items_this_message;
1700  vl_api_ip4_mfib_counter_t *ctrp = 0;
1701  u32 start_at_mfib_index = 0;
1702  int i, j, k;
1703 
1704  do_fibs = &sm->do_ip46_fibs;
1705 
1706  vec_reset_length (do_fibs->mfibs);
1707  /* *INDENT-OFF* */
1708  pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1709  /* *INDENT-ON* */
1710 
1711  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1712  {
1713  mfib = do_fibs->mfibs[j];
1714  /* We may have bailed out due to control-plane activity */
1715  while ((mfib - im4->mfibs) < start_at_mfib_index)
1716  continue;
1717 
1718  if (mp == 0)
1719  {
1720  items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1722  (sizeof (*mp) +
1723  items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1724  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1725  mp->count = 0;
1726  mp->vrf_id = ntohl (mfib->mft_table_id);
1727  ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1728  }
1729  else
1730  {
1731  /* happens if the last MFIB was empty... */
1732  ASSERT (mp->count == 0);
1733  mp->vrf_id = ntohl (mfib->mft_table_id);
1734  }
1735 
1736  vec_reset_length (do_fibs->mroutes);
1737 
1738  /*
1739  * walk the table with table updates blocked
1740  */
1741  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1742 
1743  mfib_table_walk (mfib->mft_index,
1745  dsunlock (sm);
1746 
1747  vec_foreach (pfx, do_fibs->mroutes)
1748  {
1749  const dpo_id_t *dpo_id;
1750  fib_node_index_t mfei;
1751  vlib_counter_t c;
1752  u32 index;
1753 
1754  /*
1755  * re-lookup the entry, since we suspend during the collection
1756  */
1757  mfei = mfib_table_lookup (mfib->mft_index, pfx);
1758 
1759  if (FIB_NODE_INDEX_INVALID == mfei)
1760  continue;
1761 
1762  dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1763  index = (u32) dpo_id->dpoi_index;
1764 
1766  dpo_id->dpoi_index, &c);
1767  /*
1768  * If it has seen at least one packet, send it.
1769  */
1770  if (c.packets > 0)
1771  {
1772  /* already in net byte order */
1773  memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1774  memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1775  ctrp->group_length = pfx->fp_len;
1776  ctrp->packets = clib_host_to_net_u64 (c.packets);
1777  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1778  mp->count++;
1779  ctrp++;
1780 
1781  if (mp->count == items_this_message)
1782  {
1783  mp->count = htonl (items_this_message);
1784  /*
1785  * If the main thread's input queue is stuffed,
1786  * drop the data structure lock (which the main thread
1787  * may want), and take a pause.
1788  */
1789  svm_queue_lock (q);
1790 
1791  while (svm_queue_is_full (q))
1792  {
1793  svm_queue_unlock (q);
1794  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1796  svm_queue_lock (q);
1797  }
1798  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1799  svm_queue_unlock (q);
1800 
1801  items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1803  (sizeof (*mp) +
1804  items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1805  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1806  mp->count = 0;
1807  mp->vrf_id = ntohl (mfib->mft_table_id);
1808  ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1809  }
1810  }
1811  }
1812 
1813  /* Flush any data from this mfib */
1814  if (mp->count)
1815  {
1816  mp->count = htonl (mp->count);
1817  vl_msg_api_send_shmem (q, (u8 *) & mp);
1818  mp = 0;
1819  }
1820  }
1821 
1822  /* If e.g. the last FIB had no reportable routes, free the buffer */
1823  if (mp)
1824  vl_msg_api_free (mp);
1825 }
1826 
1827 static void
1829 {
1830  ip6_main_t *im6 = &ip6_main;
1831  api_main_t *am = sm->api_main;
1833  svm_queue_t *q = shmem_hdr->vl_input_queue;
1834  mfib_prefix_t *pfx;
1835  mfib_table_t *mfib;
1836  do_ip46_fibs_t *do_fibs;
1838  u32 items_this_message;
1839  vl_api_ip6_mfib_counter_t *ctrp = 0;
1840  u32 start_at_mfib_index = 0;
1841  int i, j, k;
1842 
1843  do_fibs = &sm->do_ip46_fibs;
1844 
1845  vec_reset_length (do_fibs->mfibs);
1846  /* *INDENT-OFF* */
1847  pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1848  /* *INDENT-ON* */
1849 
1850  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1851  {
1852  mfib = do_fibs->mfibs[j];
1853  /* We may have bailed out due to control-plane activity */
1854  while ((mfib - im6->mfibs) < start_at_mfib_index)
1855  continue;
1856 
1857  if (mp == 0)
1858  {
1859  items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1861  (sizeof (*mp) +
1862  items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1863  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1864  mp->count = 0;
1865  mp->vrf_id = ntohl (mfib->mft_table_id);
1866  ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1867  }
1868  else
1869  {
1870  /* happens if the last MFIB was empty... */
1871  ASSERT (mp->count == 0);
1872  mp->vrf_id = ntohl (mfib->mft_table_id);
1873  }
1874 
1875  vec_reset_length (do_fibs->mroutes);
1876 
1877  /*
1878  * walk the table with table updates blocked
1879  */
1880  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1881 
1882  mfib_table_walk (mfib->mft_index,
1884  dsunlock (sm);
1885 
1886  vec_foreach (pfx, do_fibs->mroutes)
1887  {
1888  const dpo_id_t *dpo_id;
1889  fib_node_index_t mfei;
1890  vlib_counter_t c;
1891  u32 index;
1892 
1893  /*
1894  * re-lookup the entry, since we suspend during the collection
1895  */
1896  mfei = mfib_table_lookup (mfib->mft_index, pfx);
1897 
1898  if (FIB_NODE_INDEX_INVALID == mfei)
1899  continue;
1900 
1901  dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1902  index = (u32) dpo_id->dpoi_index;
1903 
1905  dpo_id->dpoi_index, &c);
1906  /*
1907  * If it has seen at least one packet, send it.
1908  */
1909  if (c.packets > 0)
1910  {
1911  /* already in net byte order */
1912  memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1913  memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1914  ctrp->group_length = pfx->fp_len;
1915  ctrp->packets = clib_host_to_net_u64 (c.packets);
1916  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1917  mp->count++;
1918  ctrp++;
1919 
1920  if (mp->count == items_this_message)
1921  {
1922  mp->count = htonl (items_this_message);
1923  /*
1924  * If the main thread's input queue is stuffed,
1925  * drop the data structure lock (which the main thread
1926  * may want), and take a pause.
1927  */
1928  svm_queue_lock (q);
1929 
1930  while (svm_queue_is_full (q))
1931  {
1932  svm_queue_unlock (q);
1933  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1935  svm_queue_lock (q);
1936  }
1937  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1938  svm_queue_unlock (q);
1939 
1940  items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1942  (sizeof (*mp) +
1943  items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1944  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1945  mp->count = 0;
1946  mp->vrf_id = ntohl (mfib->mft_table_id);
1947  ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1948  }
1949  }
1950  }
1951 
1952  /* Flush any data from this mfib */
1953  if (mp->count)
1954  {
1955  mp->count = htonl (mp->count);
1956  vl_msg_api_send_shmem (q, (u8 *) & mp);
1957  mp = 0;
1958  }
1959  }
1960 
1961  /* If e.g. the last FIB had no reportable routes, free the buffer */
1962  if (mp)
1963  vl_msg_api_free (mp);
1964 }
1965 
1966 typedef struct
1967 {
1972 
1973 static void
1974 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1975 {
1976  add_routes_in_fib_arg_t *ap = arg;
1977  stats_main_t *sm = ap->sm;
1978 
1980  clib_longjmp (&sm->jmp_buf, 1);
1981 
1982  if (kvp->key[2] >> 32 == ap->fib_index)
1983  {
1985  ip6_route_t *r;
1986  addr = (ip6_address_t *) kvp;
1987  vec_add2 (*ap->routep, r, 1);
1988  r->address = addr[0];
1989  r->address_length = kvp->key[2] & 0xFF;
1990  r->index = kvp->value;
1991  }
1992 }
1993 
1994 static void
1996 {
1997  ip6_main_t *im6 = &ip6_main;
1998  api_main_t *am = sm->api_main;
2000  svm_queue_t *q = shmem_hdr->vl_input_queue;
2001  ip6_route_t *r;
2002  fib_table_t *fib;
2003  do_ip46_fibs_t *do_fibs;
2005  u32 items_this_message;
2006  vl_api_ip6_fib_counter_t *ctrp = 0;
2007  u32 start_at_fib_index = 0;
2008  BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
2009  add_routes_in_fib_arg_t _a, *a = &_a;
2010  int i;
2011 
2012  do_fibs = &sm->do_ip46_fibs;
2013 again:
2014  vec_reset_length (do_fibs->fibs);
2015  /* *INDENT-OFF* */
2016  pool_foreach (fib, im6->fibs,
2017  ({vec_add1(do_fibs->fibs,fib);}));
2018  /* *INDENT-ON* */
2019 
2020 
2021  for (i = 0; i < vec_len (do_fibs->fibs); i++)
2022  {
2023  fib = do_fibs->fibs[i];
2024  /* We may have bailed out due to control-plane activity */
2025  while ((fib - im6->fibs) < start_at_fib_index)
2026  continue;
2027 
2028  if (mp == 0)
2029  {
2030  items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2032  (sizeof (*mp) +
2033  items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2034  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2035  mp->count = 0;
2036  mp->vrf_id = ntohl (fib->ft_table_id);
2037  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2038  }
2039 
2040  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2041 
2042  vec_reset_length (do_fibs->ip6routes);
2043  vec_reset_length (do_fibs->results);
2044 
2045  a->fib_index = fib - im6->fibs;
2046  a->routep = &do_fibs->ip6routes;
2047  a->sm = sm;
2048 
2049  if (clib_setjmp (&sm->jmp_buf, 0) == 0)
2050  {
2051  start_at_fib_index = fib - im6->fibs;
2053  }
2054  else
2055  {
2056  dsunlock (sm);
2057  ip46_fib_stats_delay (sm, 0 /* sec */ ,
2059  mp->count = 0;
2060  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2061  goto again;
2062  }
2063 
2064  vec_foreach (r, do_fibs->ip6routes)
2065  {
2066  vlib_counter_t c;
2067 
2069  r->index, &c);
2070  /*
2071  * If it has actually
2072  * seen at least one packet, send it.
2073  */
2074  if (c.packets > 0)
2075  {
2076  /* already in net byte order */
2077  ctrp->address[0] = r->address.as_u64[0];
2078  ctrp->address[1] = r->address.as_u64[1];
2079  ctrp->address_length = (u8) r->address_length;
2080  ctrp->packets = clib_host_to_net_u64 (c.packets);
2081  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2082  mp->count++;
2083  ctrp++;
2084 
2085  if (mp->count == items_this_message)
2086  {
2087  mp->count = htonl (items_this_message);
2088  /*
2089  * If the main thread's input queue is stuffed,
2090  * drop the data structure lock (which the main thread
2091  * may want), and take a pause.
2092  */
2093  svm_queue_lock (q);
2094  if (svm_queue_is_full (q))
2095  {
2096  dsunlock (sm);
2097  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2098  svm_queue_unlock (q);
2099  mp = 0;
2100  ip46_fib_stats_delay (sm, 0 /* sec */ ,
2102  goto again;
2103  }
2104  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2105  svm_queue_unlock (q);
2106 
2107  items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2109  (sizeof (*mp) +
2110  items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2111  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2112  mp->count = 0;
2113  mp->vrf_id = ntohl (fib->ft_table_id);
2114  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2115  }
2116  }
2117 
2119  {
2120  start_at_fib_index = fib - im6->fibs;
2121  dsunlock (sm);
2122  ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2123  mp->count = 0;
2124  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2125  goto again;
2126  }
2127  } /* vec_foreach (routes) */
2128 
2129  dsunlock (sm);
2130 
2131  /* Flush any data from this fib */
2132  if (mp->count)
2133  {
2134  mp->count = htonl (mp->count);
2135  vl_msg_api_send_shmem (q, (u8 *) & mp);
2136  mp = 0;
2137  }
2138  }
2139 
2140  /* If e.g. the last FIB had no reportable routes, free the buffer */
2141  if (mp)
2142  vl_msg_api_free (mp);
2143 }
2144 
2146 {
2149 
2150 static walk_rc_t
2152 {
2153  udp_encap_stats_walk_t *ctx = arg;
2155  udp_encap_t *ue;
2156 
2157  ue = udp_encap_get (uei);
2158  vec_add2 (ctx->stats, stat, 1);
2159 
2160  stat->id = ue->ue_id;
2161  udp_encap_get_stats (ue->ue_id, &stat->packets, &stat->bytes);
2162 
2163  return (WALK_CONTINUE);
2164 }
2165 
2166 static void
2168 {
2171  stats_main_t *sm;
2172  api_main_t *am;
2173  svm_queue_t *q;
2174 
2175  mp = NULL;
2176  sm = &stats_main;
2177  am = sm->api_main;
2178  shmem_hdr = am->shmem_hdr;
2179  q = shmem_hdr->vl_input_queue;
2180 
2181  /*
2182  * If the walk context has counters, which may be left over from the last
2183  * suspend, then we continue from there.
2184  */
2185  while (0 != vec_len (ctx->stats))
2186  {
2187  u32 n_items = MIN (vec_len (ctx->stats),
2189  u8 pause = 0;
2190 
2191  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2192 
2193  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2194  (n_items *
2195  sizeof
2197  mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2198  mp->count = ntohl (n_items);
2199 
2200  /*
2201  * copy the counters from the back of the context, then we can easily
2202  * 'erase' them by resetting the vector length.
2203  * The order we push the stats to the caller is not important.
2204  */
2205  clib_memcpy (mp->c,
2206  &ctx->stats[vec_len (ctx->stats) - n_items],
2207  n_items * sizeof (*ctx->stats));
2208 
2209  _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2210 
2211  /*
2212  * send to the shm q
2213  */
2214  send_and_pause (sm, q, (u8 *) & mp);
2215  }
2216 }
2217 
2218 static void
2220 {
2222 
2224  .stats = NULL,
2225  };
2226 
2227  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2229  dsunlock (sm);
2230 
2231  udp_encap_ship (&ctx);
2232 }
2233 
2235 {
2238 
2239 static walk_rc_t
2241 {
2245  bier_table_id_t btid;
2246 
2247  vec_add2 (ctx->stats, stat, 1);
2248 
2249  bier_fmask_encode (bfmi, &btid, &rpath);
2250 
2251  stat->tbl_id.bt_set = btid.bti_set;
2252  stat->tbl_id.bt_sub_domain = btid.bti_sub_domain;
2253  stat->tbl_id.bt_hdr_len_id = btid.bti_hdr_len;
2254  fib_api_path_encode (&rpath, &stat->path);
2255  bier_fmask_get_stats (bfmi, &stat->packets, &stat->bytes);
2256 
2257  return (WALK_CONTINUE);
2258 }
2259 
2260 static void
2262 {
2265  stats_main_t *sm;
2266  api_main_t *am;
2267  svm_queue_t *q;
2268 
2269  mp = NULL;
2270  sm = &stats_main;
2271  am = sm->api_main;
2272  shmem_hdr = am->shmem_hdr;
2273  q = shmem_hdr->vl_input_queue;
2274 
2275  /*
2276  * If the walk context has counters, which may be left over from the last
2277  * suspend, then we continue from there.
2278  */
2279  while (0 != vec_len (ctx->stats))
2280  {
2281  u32 n_items = MIN (vec_len (ctx->stats),
2283  u8 pause = 0;
2284 
2285  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2286 
2287  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2288  (n_items *
2289  sizeof
2291  mp->_vl_msg_id = ntohs (VL_API_VNET_BIER_NEIGHBOR_COUNTERS);
2292  mp->count = ntohl (n_items);
2293 
2294  /*
2295  * copy the counters from the back of the context, then we can easily
2296  * 'erase' them by resetting the vector length.
2297  * The order we push the stats to the caller is not important.
2298  */
2299  clib_memcpy (mp->c,
2300  &ctx->stats[vec_len (ctx->stats) - n_items],
2301  n_items * sizeof (*ctx->stats));
2302 
2303  _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2304 
2305  /*
2306  * send to the shm q
2307  */
2308  send_and_pause (sm, q, (u8 *) & mp);
2309  }
2310 }
2311 
2312 static void
2314 {
2316 
2318  .stats = NULL,
2319  };
2320 
2321  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2323  dsunlock (sm);
2324 
2325  bier_neighbor_ship (&ctx);
2326 }
2327 
2328 int
2329 stats_set_poller_delay (u32 poller_delay_sec)
2330 {
2331  stats_main_t *sm = &stats_main;
2332  if (!poller_delay_sec)
2333  {
2334  return VNET_API_ERROR_INVALID_ARGUMENT;
2335  }
2336  else
2337  {
2338  sm->stats_poll_interval_in_seconds = poller_delay_sec;
2339  return 0;
2340  }
2341 }
2342 
2343 /*
2344  * Accept connection on the socket and exchange the fd for the shared
2345  * memory segment.
2346  */
2347 static clib_error_t *
2349 {
2350  stats_main_t *sm = &stats_main;
2351  ssvm_private_t *ssvmp = &sm->stat_segment;
2352  clib_error_t *err;
2353  clib_socket_t client = { 0 };
2354 
2355  err = clib_socket_accept (sm->socket, &client);
2356  if (err)
2357  {
2358  clib_error_report (err);
2359  return err;
2360  }
2361 
2362  /* Send the fd across and close */
2363  err = clib_socket_sendmsg (&client, 0, 0, &ssvmp->fd, 1);
2364  if (err)
2365  clib_error_report (err);
2366  clib_socket_close (&client);
2367 
2368  return 0;
2369 }
2370 
2371 static void
2373 {
2374  stats_main_t *sm = &stats_main;
2375  clib_error_t *error;
2376  clib_socket_t *s = clib_mem_alloc (sizeof (clib_socket_t));
2377 
2378  s->config = (char *) sm->socket_name;
2381  if ((error = clib_socket_init (s)))
2382  {
2383  clib_error_report (error);
2384  return;
2385  }
2386 
2387  clib_file_t template = { 0 };
2388  clib_file_main_t *fm = &file_main;
2389  template.read_function = stats_socket_accept_ready;
2390  template.file_descriptor = s->fd;
2391  template.description =
2392  format (0, "stats segment listener %s", STAT_SEGMENT_SOCKET_FILE);
2393  clib_file_add (fm, &template);
2394 
2395  sm->socket = s;
2396 }
2397 
2398 static clib_error_t *
2400 {
2401  stats_main_t *sm = &stats_main;
2402  u32 sec;
2403 
2404  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2405  {
2406  if (unformat (input, "socket-name %s", &sm->socket_name))
2407  ;
2408  else if (unformat (input, "default"))
2409  sm->socket_name = format (0, "%s", STAT_SEGMENT_SOCKET_FILE);
2410  else if (unformat (input, "interval %u", &sec))
2411  {
2412  int rv = stats_set_poller_delay (sec);
2413  if (rv)
2414  {
2415  return clib_error_return (0,
2416  "`stats_set_poller_delay' API call failed, rv=%d:%U",
2417  (int) rv, format_vnet_api_errno, rv);
2418  }
2419  }
2420  else
2421  {
2422  return clib_error_return (0, "unknown input '%U'",
2423  format_unformat_error, input);
2424  }
2425  }
2426 
2427  if (sm->socket_name)
2429 
2430  return 0;
2431 }
2432 
2433 /* stats { ... } configuration. */
2434 /*?
2435  *
2436  * @cfgcmd{interval, &lt;seconds&gt;}
2437  * Configure stats poller delay to be @c seconds.
2438  *
2439 ?*/
2441 
2442 static void
2445 {
2446  stats_main_t *sm = &stats_main;
2447  vl_api_registration_t *reg;
2449  if (!reg)
2450  return;
2452 
2453  rmp = vl_msg_api_alloc (sizeof (*rmp));
2454  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2455  rmp->context = mp->context;
2456  rmp->retval = 0;
2457  rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2458 
2459  vl_api_send_msg (reg, (u8 *) rmp);
2460 
2461 }
2462 
2463 static void
2464 stats_thread_fn (void *arg)
2465 {
2466  stats_main_t *sm = &stats_main;
2469 
2470  /* stats thread wants no signals. */
2471  {
2472  sigset_t s;
2473  sigfillset (&s);
2474  pthread_sigmask (SIG_SETMASK, &s, 0);
2475  }
2476 
2477  if (vec_len (tm->thread_prefix))
2478  vlib_set_thread_name ((char *)
2479  format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2480 
2482 
2483  while (1)
2484  {
2486  0 /* nsec */ );
2487 
2488  /* Always update stats segment data */
2490 
2491  if (!(sm->enable_poller))
2492  continue;
2493 
2494  if (pool_elts
2495  (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2497 
2498  if (pool_elts
2499  (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2501 
2502  if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2503  do_ip4_fib_counters (sm);
2504 
2505  if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2506  do_ip6_fib_counters (sm);
2507 
2508  if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2509  do_ip4_mfib_counters (sm);
2510 
2511  if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2512  do_ip6_mfib_counters (sm);
2513 
2514  if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2515  do_ip4_nbr_counters (sm);
2516 
2517  if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2518  do_ip6_nbr_counters (sm);
2519 
2520  if (pool_elts (sm->stats_registrations[IDX_BIER_NEIGHBOR_COUNTERS]))
2522  }
2523 }
2524 
2525 static void
2528 {
2529  vpe_client_registration_t *clients, client;
2530  stats_main_t *sm = &stats_main;
2531  vl_api_registration_t *reg, *reg_prev = NULL;
2533  u32 mp_size;
2534  int i;
2535 
2536  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2537 
2538  clients =
2539  get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2540  ~0 /*flag for all */ );
2541 
2542  for (i = 0; i < vec_len (clients); i++)
2543  {
2544  client = clients[i];
2546  if (reg)
2547  {
2548  if (reg_prev && vl_api_can_send_msg (reg_prev))
2549  {
2550  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2551  clib_memcpy (mp_copy, mp, mp_size);
2552  vl_api_send_msg (reg_prev, (u8 *) mp);
2553  mp = mp_copy;
2554  }
2555  reg_prev = reg;
2556  }
2557  else
2558  {
2559  sm->enable_poller =
2560  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2561  client.client_index);
2562  continue;
2563  }
2564  }
2565  vec_free (clients);
2566 
2567 #if STATS_DEBUG > 0
2568  fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2569 #endif
2570 
2571  if (reg_prev && vl_api_can_send_msg (reg_prev))
2572  {
2573  vl_api_send_msg (reg_prev, (u8 *) mp);
2574  }
2575  else
2576  {
2577  vl_msg_api_free (mp);
2578  }
2579 }
2580 
2581 static void
2583 {
2584  stats_main_t *sm = &stats_main;
2585  vl_api_registration_t *reg, *reg_prev = NULL;
2587  u32 mp_size;
2588  vpe_client_registration_t *clients, client;
2589  int i;
2590 
2591  mp_size = sizeof (*mp_copy) +
2592  ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2593 
2594  clients =
2595  get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2596 
2597  for (i = 0; i < vec_len (clients); i++)
2598  {
2599  client = clients[i];
2601  if (reg)
2602  {
2603  if (reg_prev && vl_api_can_send_msg (reg_prev))
2604  {
2605  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2606  clib_memcpy (mp_copy, mp, mp_size);
2607  vl_api_send_msg (reg_prev, (u8 *) mp);
2608  mp = mp_copy;
2609  }
2610  reg_prev = reg;
2611  }
2612  else
2613  {
2614  sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2615  ~0, client.client_index);
2616  continue;
2617  }
2618  }
2619  vec_free (clients);
2620 
2621  if (reg_prev && vl_api_can_send_msg (reg_prev))
2622  {
2623  vl_api_send_msg (reg_prev, (u8 *) mp);
2624  }
2625  else
2626  {
2627  vl_msg_api_free (mp);
2628  }
2629 }
2630 
2631 static void
2633 {
2634  stats_main_t *sm = &stats_main;
2635  vl_api_registration_t *reg, *reg_prev = NULL;
2637  u32 mp_size;
2638  vpe_client_registration_t *clients, client;
2639  int i;
2640 
2641  mp_size = sizeof (*mp_copy) +
2642  ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2643 
2644  clients =
2645  get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2646 
2647  for (i = 0; i < vec_len (clients); i++)
2648  {
2649  client = clients[i];
2651  if (reg)
2652  {
2653  if (reg_prev && vl_api_can_send_msg (reg_prev))
2654  {
2655  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2656  clib_memcpy (mp_copy, mp, mp_size);
2657  vl_api_send_msg (reg_prev, (u8 *) mp);
2658  mp = mp_copy;
2659  }
2660  reg_prev = reg;
2661  }
2662  else
2663  {
2664  sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2665  ~0, client.client_index);
2666  continue;
2667  }
2668  }
2669  vec_free (clients);
2670 
2671  /* *INDENT-ON* */
2672  if (reg_prev && vl_api_can_send_msg (reg_prev))
2673  {
2674  vl_api_send_msg (reg_prev, (u8 *) mp);
2675  }
2676  else
2677  {
2678  vl_msg_api_free (mp);
2679  }
2680 }
2681 
2682 static void
2684 {
2685  stats_main_t *sm = &stats_main;
2686  vl_api_registration_t *reg, *reg_prev = NULL;
2688  u32 mp_size;
2689  vpe_client_registration_t *clients, client;
2690  int i;
2691 
2692  mp_size = sizeof (*mp_copy) +
2693  ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2694 
2695  clients =
2696  get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2697 
2698  for (i = 0; i < vec_len (clients); i++)
2699  {
2700  client = clients[i];
2702  if (reg)
2703  {
2704  if (reg_prev && vl_api_can_send_msg (reg_prev))
2705  {
2706  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2707  clib_memcpy (mp_copy, mp, mp_size);
2708  vl_api_send_msg (reg_prev, (u8 *) mp);
2709  mp = mp_copy;
2710  }
2711  reg_prev = reg;
2712  }
2713  else
2714  {
2715  sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2716  ~0, client.client_index);
2717  continue;
2718  }
2719  }
2720  vec_free (clients);
2721 
2722  /* *INDENT-ON* */
2723  if (reg_prev && vl_api_can_send_msg (reg_prev))
2724  {
2725  vl_api_send_msg (reg_prev, (u8 *) mp);
2726  }
2727  else
2728  {
2729  vl_msg_api_free (mp);
2730  }
2731 }
2732 
2733 static void
2735 {
2736  stats_main_t *sm = &stats_main;
2737  vl_api_registration_t *reg, *reg_prev = NULL;
2739  u32 mp_size;
2740  vpe_client_registration_t *clients, client;
2741  int i;
2742 
2743  mp_size = sizeof (*mp_copy) +
2744  ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2745 
2746  clients =
2747  get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2748 
2749  for (i = 0; i < vec_len (clients); i++)
2750  {
2751  client = clients[i];
2753  if (reg)
2754  {
2755  if (reg_prev && vl_api_can_send_msg (reg_prev))
2756  {
2757  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2758  clib_memcpy (mp_copy, mp, mp_size);
2759  vl_api_send_msg (reg_prev, (u8 *) mp);
2760  mp = mp_copy;
2761  }
2762  reg_prev = reg;
2763  }
2764  else
2765  {
2766  sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2767  ~0, client.client_index);
2768  continue;
2769  }
2770  }
2771  vec_free (clients);
2772 
2773  /* *INDENT-ON* */
2774  if (reg_prev && vl_api_can_send_msg (reg_prev))
2775  {
2776  vl_api_send_msg (reg_prev, (u8 *) mp);
2777  }
2778  else
2779  {
2780  vl_msg_api_free (mp);
2781  }
2782 }
2783 
2784 static void
2786 {
2787  stats_main_t *sm = &stats_main;
2789  vl_api_want_udp_encap_stats_reply_t *rmp;
2790  uword *p;
2791  i32 retval = 0;
2792  vl_api_registration_t *reg;
2793  u32 fib;
2794 
2795  fib = ~0; //Using same mechanism as _per_interface_
2796  rp.client_index = mp->client_index;
2797  rp.client_pid = mp->pid;
2798 
2799  handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2800 
2801 reply:
2803 
2804  if (!reg)
2805  {
2806  sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2807  fib, mp->client_index);
2808  return;
2809  }
2810 
2811  rmp = vl_msg_api_alloc (sizeof (*rmp));
2812  rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2813  rmp->context = mp->context;
2814  rmp->retval = retval;
2815 
2816  vl_api_send_msg (reg, (u8 *) rmp);
2817 }
2818 
2819 static void
2821  mp)
2822 {
2823  stats_main_t *sm = &stats_main;
2825  vl_api_want_bier_neighbor_stats_reply_t *rmp;
2826  uword *p;
2827  i32 retval = 0;
2828  vl_api_registration_t *reg;
2829  u32 fib;
2830 
2831  fib = ~0; //Using same mechanism as _per_interface_
2832  rp.client_index = mp->client_index;
2833  rp.client_pid = mp->pid;
2834 
2835  handle_client_registration (&rp, IDX_BIER_NEIGHBOR_COUNTERS, fib,
2836  mp->enable);
2837 
2838 reply:
2840 
2841  if (!reg)
2842  {
2843  sm->enable_poller = clear_client_for_stat (IDX_BIER_NEIGHBOR_COUNTERS,
2844  fib, mp->client_index);
2845  return;
2846  }
2847 
2848  rmp = vl_msg_api_alloc (sizeof (*rmp));
2849  rmp->_vl_msg_id = ntohs (VL_API_WANT_BIER_NEIGHBOR_STATS_REPLY);
2850  rmp->context = mp->context;
2851  rmp->retval = retval;
2852 
2853  vl_api_send_msg (reg, (u8 *) rmp);
2854 }
2855 
2856 static void
2858 {
2859  stats_main_t *sm = &stats_main;
2861  vl_api_want_stats_reply_t *rmp;
2862  uword *p;
2863  i32 retval = 0;
2864  u32 item;
2865  vl_api_registration_t *reg;
2866 
2867  item = ~0; //"ALL THE THINGS IN THE THINGS
2868  rp.client_index = mp->client_index;
2869  rp.client_pid = mp->pid;
2870 
2871  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2872  item, mp->enable_disable);
2873 
2874  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2875  item, mp->enable_disable);
2876 
2877  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2878  item, mp->enable_disable);
2879 
2880  handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2881  item, mp->enable_disable);
2882 
2883  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2884  item, mp->enable_disable);
2885 
2886  handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2887  item, mp->enable_disable);
2888 
2889 reply:
2891  if (!reg)
2892  return;
2893 
2894  rmp = vl_msg_api_alloc (sizeof (*rmp));
2895  rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2896  rmp->context = mp->context;
2897  rmp->retval = retval;
2898 
2899  vl_api_send_msg (reg, (u8 *) rmp);
2900 }
2901 
2902 static void
2905 {
2906  stats_main_t *sm = &stats_main;
2908  vl_api_want_interface_simple_stats_reply_t *rmp;
2909  uword *p;
2910  i32 retval = 0;
2911  u32 swif;
2912  vl_api_registration_t *reg;
2913 
2914  swif = ~0; //Using same mechanism as _per_interface_
2915  rp.client_index = mp->client_index;
2916  rp.client_pid = mp->pid;
2917 
2918  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2919  mp->enable_disable);
2920 
2921 reply:
2923 
2924  if (!reg)
2925  {
2926  sm->enable_poller =
2927  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2928  mp->client_index);
2929  return;
2930  }
2931 
2932  rmp = vl_msg_api_alloc (sizeof (*rmp));
2933  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2934  rmp->context = mp->context;
2935  rmp->retval = retval;
2936 
2937  vl_api_send_msg (reg, (u8 *) rmp);
2938 }
2939 
2940 
2941 static void
2943 {
2944  stats_main_t *sm = &stats_main;
2946  vl_api_want_ip4_fib_stats_reply_t *rmp;
2947  uword *p;
2948  i32 retval = 0;
2949  vl_api_registration_t *reg;
2950  u32 fib;
2951 
2952  fib = ~0; //Using same mechanism as _per_interface_
2953  rp.client_index = mp->client_index;
2954  rp.client_pid = mp->pid;
2955 
2956  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2957  mp->enable_disable);
2958 
2959 reply:
2961 
2962  if (!reg)
2963  {
2964  sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2965  fib, mp->client_index);
2966  return;
2967  }
2968 
2969  rmp = vl_msg_api_alloc (sizeof (*rmp));
2970  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2971  rmp->context = mp->context;
2972  rmp->retval = retval;
2973 
2974  vl_api_send_msg (reg, (u8 *) rmp);
2975 }
2976 
2977 static void
2979 {
2980  stats_main_t *sm = &stats_main;
2982  vl_api_want_ip4_mfib_stats_reply_t *rmp;
2983  uword *p;
2984  i32 retval = 0;
2985  vl_api_registration_t *reg;
2986  u32 mfib;
2987 
2988  mfib = ~0; //Using same mechanism as _per_interface_
2989  rp.client_index = mp->client_index;
2990  rp.client_pid = mp->pid;
2991 
2992  handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2993  mp->enable_disable);
2994 
2995 reply:
2997  if (!reg)
2998  {
2999  sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
3000  mfib, mp->client_index);
3001  return;
3002  }
3003 
3004  rmp = vl_msg_api_alloc (sizeof (*rmp));
3005  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
3006  rmp->context = mp->context;
3007  rmp->retval = retval;
3008 
3009  vl_api_send_msg (reg, (u8 *) rmp);
3010 }
3011 
3012 static void
3014 {
3015  stats_main_t *sm = &stats_main;
3017  vl_api_want_ip4_fib_stats_reply_t *rmp;
3018  uword *p;
3019  i32 retval = 0;
3020  vl_api_registration_t *reg;
3021  u32 fib;
3022 
3023  fib = ~0; //Using same mechanism as _per_interface_
3024  rp.client_index = mp->client_index;
3025  rp.client_pid = mp->pid;
3026 
3027  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
3028  mp->enable_disable);
3029 
3030 reply:
3032  if (!reg)
3033  {
3034  sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
3035  fib, mp->client_index);
3036  return;
3037  }
3038 
3039  rmp = vl_msg_api_alloc (sizeof (*rmp));
3040  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
3041  rmp->context = mp->context;
3042  rmp->retval = retval;
3043 
3044  vl_api_send_msg (reg, (u8 *) rmp);
3045 }
3046 
3047 static void
3049 {
3050  stats_main_t *sm = &stats_main;
3052  vl_api_want_ip4_mfib_stats_reply_t *rmp;
3053  uword *p;
3054  i32 retval = 0;
3055  vl_api_registration_t *reg;
3056  u32 mfib;
3057 
3058  mfib = ~0; //Using same mechanism as _per_interface_
3059  rp.client_index = mp->client_index;
3060  rp.client_pid = mp->pid;
3061 
3062  handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
3063  mp->enable_disable);
3064 
3065 reply:
3067  if (!reg)
3068  {
3069  sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
3070  mfib, mp->client_index);
3071  return;
3072  }
3073 
3074  rmp = vl_msg_api_alloc (sizeof (*rmp));
3075  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
3076  rmp->context = mp->context;
3077  rmp->retval = retval;
3078 
3079  vl_api_send_msg (reg, (u8 *) rmp);
3080 }
3081 
3082 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
3083 static void
3085 {
3086 }
3087 
3088 static void
3090 {
3091 }
3092 
3093 static void
3095 {
3096  stats_main_t *sm = &stats_main;
3100  vlib_counter_t v;
3101  int i, which;
3102  u64 total_pkts[VLIB_N_RX_TX];
3103  u64 total_bytes[VLIB_N_RX_TX];
3104  vl_api_registration_t *reg;
3105 
3107  if (!reg)
3108  return;
3109 
3110  rmp = vl_msg_api_alloc (sizeof (*rmp));
3111  rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
3112  rmp->context = mp->context;
3113  rmp->retval = 0;
3114 
3115  memset (total_pkts, 0, sizeof (total_pkts));
3116  memset (total_bytes, 0, sizeof (total_bytes));
3117 
3119 
3121  {
3122  which = cm - im->combined_sw_if_counters;
3123 
3124  for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
3125  {
3126  vlib_get_combined_counter (cm, i, &v);
3127  total_pkts[which] += v.packets;
3128  total_bytes[which] += v.bytes;
3129  }
3130  }
3132 
3133  rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
3134  rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
3135  rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
3136  rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
3137  rmp->vector_rate =
3138  clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
3139 
3140  vl_api_send_msg (reg, (u8 *) rmp);
3141 }
3142 
3143 int
3145 {
3147  stats_main_t *sm = &stats_main;
3148  uword *p;
3149 
3150  // FIXME
3151  /* p = hash_get (sm->stats_registration_hash, client_index); */
3152  /* if (p) */
3153  /* { */
3154  /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
3155  /* pool_put (sm->stats_registrations, rp); */
3156  /* hash_unset (sm->stats_registration_hash, client_index); */
3157  /* } */
3158 
3159  return 0;
3160 }
3161 
3162 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
3163 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
3164 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
3165 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
3166 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
3167 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
3168 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
3169 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
3170 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
3171 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
3172 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
3173 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
3174 
3175 static clib_error_t *
3177 {
3178  stats_main_t *sm = &stats_main;
3179  api_main_t *am = &api_main;
3180  void *vlib_worker_thread_bootstrap_fn (void *arg);
3181 
3182  sm->vlib_main = vm;
3183  sm->vnet_main = vnet_get_main ();
3185  sm->api_main = am;
3187  sm->data_structure_lock =
3190  memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
3191 
3192 #define _(N,n) \
3193  vl_msg_api_set_handlers(VL_API_##N, #n, \
3194  vl_api_##n##_t_handler, \
3195  vl_noop_handler, \
3196  vl_api_##n##_t_endian, \
3197  vl_api_##n##_t_print, \
3198  sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
3200 #undef _
3201 
3202  /* tell the msg infra not to free these messages... */
3203  am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
3204  am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
3205  am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
3206  am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
3207  am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
3208  am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
3209 
3210  /*
3211  * Set up the (msg_name, crc, message-id) table
3212  */
3214 
3217 #define stats_reg(n) \
3218  sm->stats_registrations[IDX_##n] = 0; \
3219  sm->stats_registration_hash[IDX_##n] = 0;
3220 #include <vpp/stats/stats.reg>
3221 #undef stats_reg
3222 
3223  return 0;
3224 }
3225 
3227 
3228 /* *INDENT-OFF* */
3229 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
3230  .name = "stats",
3231  .function = stats_thread_fn,
3232  .fixed_count = 1,
3233  .count = 1,
3234  .no_data_structure_clone = 1,
3235  .use_pthreads = 1,
3236 };
3237 /* *INDENT-ON* */
3238 
3239 /*
3240  * fd.io coding-style-patch-verification: ON
3241  *
3242  * Local Variables:
3243  * eval: (c-set-style "gnu")
3244  * End:
3245  */
#define IP4_MFIB_COUNTER_BATCH_SIZE
Definition: stats.c:92
static void vl_api_stats_get_poller_delay_t_handler(vl_api_stats_get_poller_delay_t *mp)
Definition: stats.c:2444
void udp_encap_walk(udp_encap_walk_cb_t cb, void *ctx)
Walk each of the encap objects.
Definition: udp_encap.c:595
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:437
static void vl_api_vnet_get_summary_stats_t_handler(vl_api_vnet_get_summary_stats_t *mp)
Definition: stats.c:3094
static void udp_encap_ship(udp_encap_stats_walk_t *ctx)
Definition: stats.c:2167
Want Interface Simple Stats, register for detailed interface stats.
Definition: stats.api:89
static void stats_thread_fn(void *arg)
Definition: stats.c:2464
static int set_client_for_stat(u32 reg, u32 item, vpe_client_registration_t *client)
Definition: stats.c:291
#define vec_foreach_index(var, v)
Iterate over vector indices.
vpe_client_registration_t ** clients_tmp
Definition: stats.h:162
Want Stats, enable/disable ALL stats updates.
Definition: stats.api:73
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
ip46_address_t fp_src_addr
Definition: mfib_types.h:47
#define hash_set(h, key, value)
Definition: hash.h:255
fib_node_index_t mfib_table_lookup(u32 fib_index, const mfib_prefix_t *prefix)
Perfom a longest prefix match in the non-forwarding table.
Definition: mfib_table.c:65
u32 vlib_simple_counter_n_counters(const vlib_simple_counter_main_t *cm)
The number of counters (not the number of per-thread counters)
Definition: counter.c:127
do_ip46_fibs_t do_ip46_fibs
Definition: stats.h:155
#define clib_min(x, y)
Definition: clib.h:289
vl_api_vlib_counter_t data[count]
Definition: stats.api:370
The UDP encap represenation.
Definition: udp_encap.h:46
int svm_queue_is_full(svm_queue_t *q)
Definition: queue.c:115
A representation of a fib path for fib_path_encode to convey the information to the caller...
Definition: fib_types.h:564
vl_api_vnet_combined_counter_t data[count]
Definition: stats.api:395
vpe_client_registration_t * clients
Definition: stats.h:97
#define hash_unset(h, key)
Definition: hash.h:261
a
Definition: bitmap.h:537
vl_api_vnet_simple_counter_t data[count]
Definition: stats.api:383
vl_api_ip6_mfib_counter_t c[count]
Definition: stats.api:320
static adj_walk_rc_t ip6_nbr_stats_cb(adj_index_t ai, void *arg)
Definition: stats.c:1366
hash_pair_t ** pvec
Definition: stats.h:72
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
mfib_table_t ** mfibs
Definition: stats.h:71
static void vl_api_want_interface_simple_stats_t_handler(vl_api_want_interface_simple_stats_t *mp)
Definition: stats.c:2904
vnet_interface_main_t interface_main
Definition: vnet.h:56
static void vl_api_vnet_interface_simple_counters_t_handler(vl_api_vnet_interface_simple_counters_t *mp)
Definition: stats.c:2527
#define BIER_NEIGHBOR_COUNTER_BATCH_SIZE
Definition: stats.c:95
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define NULL
Definition: clib.h:55
An entry in a FIB table.
Definition: mfib_entry.h:31
Request for a single block of summary stats.
Definition: stats.api:402
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:225
IP unicast adjacency.
Definition: adj.h:175
vl_api_udp_encap_counter_t * stats
Definition: stats.c:2147
static void vl_api_send_msg(vl_api_registration_t *rp, u8 *elem)
Definition: api.h:34
const dpo_id_t * fib_entry_contribute_ip_forwarding(fib_node_index_t fib_entry_index)
Definition: fib_entry.c:478
u8 * message_bounce
Don&#39;t automatically free message buffer vetor.
Definition: api_common.h:221
u32 enable_poller
Definition: stats.h:110
static void vl_api_want_bier_neighbor_stats_t_handler(vl_api_want_bier_neighbor_stats_t *mp)
Definition: stats.c:2820
u32 sw_if_index
The SW IF index all these adjs belong to.
Definition: stats.c:1357
vlib_main_t * vlib_main
Definition: stats.h:190
static void send_and_pause(stats_main_t *sm, svm_queue_t *q, u8 *mp)
Definition: stats.c:1232
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
struct ip_adjacency_t_::@43::@44 nbr
IP_LOOKUP_NEXT_ARP/IP_LOOKUP_NEXT_REWRITE.
VL_MSG_API_REAPER_FUNCTION(want_stats_reaper)
Combined counter to hold both packets and byte differences.
Definition: counter.h:140
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:562
struct udp_encap_stats_walk_t_ udp_encap_stats_walk_t
int i
vl_api_ip4_nbr_counter_t c[count]
Definition: stats.api:289
clib_error_t * clib_socket_init(clib_socket_t *s)
Definition: socket.c:376
vl_api_bier_table_id_t tbl_id
Definition: stats.api:502
VLIB_REGISTER_THREAD(stats_thread_reg, static)
Want UDP encap Stats, register for continuous stats.
Definition: stats.api:452
void clib_longjmp(clib_longjmp_t *save, uword return_value)
void bier_fmask_db_walk(bier_fmask_walk_fn_t fn, void *ctx)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
clib_socket_t * socket
Definition: stats.h:168
static void ip46_fib_stats_delay(stats_main_t *sm, u32 sec, u32 nsec)
Definition: stats.c:1170
void handle_client_registration(vpe_client_registration_t *client, u32 stat, u32 item, int enable_disable)
Definition: stats.c:565
vl_api_bier_neighbor_counter_t c[count]
Definition: stats.api:512
Combined stats counters structure.
Definition: stats.api:365
#define MIN(x, y)
Definition: stats.c:1391
The ID of a table.
Definition: bier_types.h:394
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:227
static void do_bier_neighbor_counters(stats_main_t *sm)
Definition: stats.c:2313
uword clib_setjmp(clib_longjmp_t *save, uword return_value_not_taken)
void * vl_msg_api_alloc(int nbytes)
static void vl_api_vnet_ip4_fib_counters_t_handler(vl_api_vnet_ip4_fib_counters_t *mp)
Definition: stats.c:2582
static walk_rc_t udp_encap_stats_walk_cb(index_t uei, void *arg)
Definition: stats.c:2151
static clib_error_t * stats_init(vlib_main_t *vm)
Definition: stats.c:3176
format_function_t format_vnet_sw_if_index_name
unsigned char u8
Definition: types.h:56
void svm_queue_unlock(svm_queue_t *q)
Definition: queue.c:109
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
uword value[0]
Definition: hash.h:165
union ip_adjacency_t_::@43 sub_type
vnet_link_t ia_link
link/ether-type 1 bytes
Definition: adj.h:196
static void do_ip6_nbr_counters(stats_main_t *sm)
Definition: stats.c:1444
u32 ue_id
The ID given by the user/client.
Definition: udp_encap.h:96
static void vl_api_vnet_ip4_nbr_counters_t_handler(vl_api_vnet_ip4_nbr_counters_t *mp)
Definition: stats.c:2632
static f64 vlib_last_vector_length_per_node(vlib_main_t *vm)
Definition: main.h:316
#define CLIB_SOCKET_F_IS_SERVER
Definition: socket.h:58
static clib_error_t * stats_config(vlib_main_t *vm, unformat_input_t *input)
Definition: stats.c:2399
#define IP6_MFIB_COUNTER_BATCH_SIZE
Definition: stats.c:93
enum walk_rc_t_ walk_rc_t
Walk return code.
static int vl_api_can_send_msg(vl_api_registration_t *rp)
Definition: api.h:47
volatile u32 release_hint
Definition: stats.h:38
static counter_t vlib_get_simple_counter(vlib_simple_counter_main_t *cm, u32 index)
Get the value of a simple counter Scrapes the entire set of per-thread counters.
Definition: counter.h:98
static void stats_segment_socket_init(void)
Definition: stats.c:2372
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
Definition: adj.h:370
Combined stats counters structure per interface.
Definition: stats.api:391
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:440
static void vl_api_want_ip4_mfib_stats_t_handler(vl_api_want_ip4_mfib_stats_t *mp)
Definition: stats.c:2978
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:156
vpe_client_stats_registration_t ** regs_tmp
Definition: stats.h:161
fib_node_index_t mft_index
Index into FIB vector.
Definition: mfib_table.h:71
static void do_ip4_fib_counters(stats_main_t *sm)
Definition: stats.c:1496
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:810
static BVT(clib_bihash)
Definition: adj_nbr.c:26
enum adj_walk_rc_t_ adj_walk_rc_t
return codes from a adjacency walker callback function
static clib_error_t * clib_socket_sendmsg(clib_socket_t *s, void *msg, int msglen, int fds[], int num_fds)
Definition: socket.h:151
bier_table_set_id_t bti_set
The SET-ID The control plane divdies the bit-position space into sets in the case the max bit-positio...
Definition: bier_types.h:401
#define hash_foreach(key_var, value_var, h, body)
Definition: hash.h:442
#define IP6_FIB_COUNTER_BATCH_SIZE
Definition: stats.c:91
#define clib_error_return(e, args...)
Definition: error.h:99
mfib_prefix_t mfe_prefix
The prefix of the route.
Definition: mfib_entry.h:40
clib_file_main_t file_main
Definition: main.c:63
struct vl_shmem_hdr_ * shmem_hdr
Binary API shared-memory segment header pointer.
Definition: api_common.h:264
volatile u32 lock
Definition: stats.h:37
unsigned int u32
Definition: types.h:88
struct mfib_table_t_ * mfibs
Vector of MFIBs.
Definition: ip4.h:106
A collection of simple counters.
Definition: counter.h:58
static adj_walk_rc_t ip4_nbr_stats_cb(adj_index_t ai, void *arg)
Definition: stats.c:1206
static void vl_api_want_ip6_mfib_stats_t_handler(vl_api_want_ip6_mfib_stats_t *mp)
Definition: stats.c:3048
int stats_set_poller_delay(u32 poller_delay_sec)
Definition: stats.c:2329
void vlib_set_thread_name(char *name)
Definition: threads.c:267
stats_main_t stats_main
Definition: stats.c:28
#define foreach_stats_msg
Definition: stats.c:51
static void vl_api_want_ip4_fib_stats_t_handler(vl_api_want_ip4_fib_stats_t *mp)
Definition: stats.c:2942
vl_shmem_hdr_t * shmem_hdr
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:168
struct ip4_nbr_stats_ctx_t_ ip4_nbr_stats_ctx_t
The context passed when collecting adjacency counters.
vl_api_ip4_nbr_counter_t * counters
A vector of ip4 nbr counters.
Definition: stats.c:1202
#define hash_get(h, key)
Definition: hash.h:249
Want Per Interface simple Stats, register for continuous stats.
Definition: stats.api:105
static void vl_api_want_per_interface_combined_stats_t_handler(vl_api_want_per_interface_combined_stats_t *mp)
Definition: stats.c:748
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
uword size
ip6_route_t * ip6routes
Definition: stats.h:68
u32 ** enable_all_client_reg(u32 **registrations)
Definition: stats.c:495
counter_t packets
packet counter
Definition: counter.h:142
bier_hdr_len_id_t bti_hdr_len
The size of the bit string processed by this table.
Definition: bier_types.h:419
static void vl_api_want_ip6_fib_stats_t_handler(vl_api_want_ip6_fib_stats_t *mp)
Definition: stats.c:3013
void svm_queue_lock(svm_queue_t *q)
Definition: queue.c:103
clib_error_t * clib_socket_accept(clib_socket_t *server, clib_socket_t *client)
Definition: socket.c:514
static void vl_api_want_ip4_nbr_stats_t_handler(vl_api_want_ip4_nbr_stats_t *mp)
Definition: stats.c:3084
static clib_error_t * clib_socket_close(clib_socket_t *sock)
Definition: socket.h:175
#define v
Definition: acl.c:491
u32 vlib_combined_counter_n_counters(const vlib_combined_counter_main_t *cm)
The number of counters (not the number of per-thread counters)
Definition: counter.c:120
struct _unformat_input_t unformat_input_t
static void vl_api_want_ip6_nbr_stats_t_handler(vl_api_want_ip6_nbr_stats_t *mp)
Definition: stats.c:3089
#define hash_free(h)
Definition: hash.h:310
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:273
static void vl_api_want_udp_encap_stats_t_handler(vl_api_want_udp_encap_stats_t *mp)
Definition: stats.c:2785
Stat for one UDP encap object.
Definition: stats.api:465
vlib_combined_counter_main_t repm_counters
Definition: replicate_dpo.h:35
#define PREDICT_FALSE(x)
Definition: clib.h:105
#define VLIB_CONFIG_FUNCTION(x, n,...)
Definition: init.h:164
vl_api_bier_neighbor_counter_t * stats
Definition: stats.c:2236
Stat for one BIER neighbor object.
Definition: stats.api:500
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:809
load_balance_main_t load_balance_main
The one instance of load-balance main.
Definition: load_balance.c:56
static u8 * format_vnet_api_errno(u8 *s, va_list *args)
Definition: api_errno.h:157
static void ip4_nbr_ship(stats_main_t *sm, ip4_nbr_stats_ctx_t *ctx)
Definition: stats.c:1249
#define SIMPLE_COUNTER_BATCH_SIZE
Definition: stats.c:88
ip4_route_t * ip4routes
Definition: stats.h:67
#define COMBINED_COUNTER_BATCH_SIZE
Definition: stats.c:89
vl_api_ip6_nbr_counter_t * counters
A vector of ip6 nbr counters.
Definition: stats.c:1362
vlib_combined_counter_main_t adjacency_counters
Adjacency packet counters.
Definition: adj.c:25
static void do_simple_interface_counters(stats_main_t *sm)
Definition: stats.c:511
static mfib_entry_t * mfib_entry_get(fib_node_index_t index)
Definition: mfib_entry.h:153
u32 index
Definition: stats.h:62
word fformat(FILE *f, char *fmt,...)
Definition: format.c:453
The context passed when collecting adjacency counters.
Definition: stats.c:1192
API main structure, used by both vpp and binary API clients.
Definition: api_common.h:201
int clear_client_for_stat(u32 reg, u32 item, u32 client_index)
Definition: stats.c:358
void do_stat_segment_updates(stats_main_t *sm)
Definition: stat_segment.c:471
#define pool_free(p)
Free a pool.
Definition: pool.h:354
An API client registration, only in vpp/vlib.
Definition: api_common.h:44
void clib_bihash_foreach_key_value_pair(clib_bihash *h, void *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
The IPv4 FIB.
Definition: ip4_fib.h:39
u8 * format_vnet_interface_combined_counters(u8 *s, va_list *args)
Definition: stats.c:102
u32 sw_if_index
The SW IF index all these adjs belong to.
Definition: stats.c:1197
fib_node_index_t ft_index
Index into FIB vector.
Definition: fib_table.h:94
void mfib_table_walk(u32 fib_index, fib_protocol_t proto, mfib_table_walk_fn_t fn, void *ctx)
Walk all entries in a FIB table N.B: This is NOT safe to deletes.
Definition: mfib_table.c:599
static void add_routes_in_fib(BVT(clib_bihash_kv)*kvp, void *arg)
Definition: stats.c:1974
stats_main_t * sm
Definition: stats.c:1970
void vl_msg_api_send_shmem(svm_queue_t *q, u8 *elem)
static vpe_client_registration_t * get_client_for_stat(u32 reg, u32 item, u32 client_index)
Definition: stats.c:267
#define UNFORMAT_END_OF_INPUT
Definition: format.h:143
svmdb_client_t * c
u8 * socket_name
Definition: stats.h:169
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
u32 ft_table_id
Table ID (hash key) for this FIB.
Definition: fib_table.h:89
static void vlib_get_combined_counter(const vlib_combined_counter_main_t *cm, u32 index, vlib_counter_t *result)
Get the value of a combined counter, never called in the speed path Scrapes the entire set of per-thr...
Definition: counter.h:252
vlib_main_t * vm
Definition: buffer.c:294
uword * results
Definition: stats.h:73
static void do_udp_encap_counters(stats_main_t *sm)
Definition: stats.c:2219
clib_longjmp_t jmp_buf
Definition: stats.h:152
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
#define IP4_FIB_COUNTER_BATCH_SIZE
Definition: stats.c:90
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:226
Want Interface Combined Stats, register for continuous stats.
Definition: stats.api:125
#define clib_warning(format, args...)
Definition: error.h:59
This table stores the routes that are used to forward traffic.
Definition: ip6.h:128
#define clib_memcpy(a, b, c)
Definition: string.h:75
static void vnet_interface_counter_unlock(vnet_interface_main_t *im)
Definition: interface.h:835
u32 fib_node_index_t
A typedef of a node index.
Definition: fib_types.h:30
static void vl_api_vnet_ip6_fib_counters_t_handler(vl_api_vnet_ip6_fib_counters_t *mp)
Definition: stats.c:2683
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:270
Want IP6 FIB Stats, register for continuous stats.
Definition: stats.api:172
api_main_t * api_main
Definition: stats.h:193
struct bier_neighbor_stats_walk_t_ bier_neighbor_stats_walk_t
u32 adj_index_t
An index for adjacencies.
Definition: adj_types.h:30
#define ARRAY_LEN(x)
Definition: clib.h:59
static vl_api_registration_t * vl_api_client_index_to_registration(u32 index)
Definition: api.h:56
vl_api_ip6_fib_counter_t c[count]
Definition: stats.api:304
Simple per interface stats counters structure.
Definition: stats.api:379
svm_queue_t * vl_input_queue
Definition: memory_shared.h:84
Aggregrate type for a prefix.
Definition: mfib_types.h:24
static void clear_one_client(u32 reg_index, u32 reg, u32 item, u32 client_index)
Definition: stats.c:330
int fd
memfd segments
Definition: ssvm.h:92
static void vnet_interface_counter_lock(vnet_interface_main_t *im)
Definition: interface.h:827
signed int i32
Definition: types.h:81
struct ip6_nbr_stats_ctx_t_ ip6_nbr_stats_ctx_t
The context passed when collecting adjacency counters.
#define ASSERT(truth)
void stats_dslock_with_hint(int hint, int tag)
Definition: stats.c:232
ip6_main_t ip6_main
Definition: ip6_forward.c:2574
static void ip6_nbr_ship(stats_main_t *sm, ip6_nbr_stats_ctx_t *ctx)
Definition: stats.c:1394
long ctx[MAX_CONNS]
Definition: main.c:126
#define STATS_RELEASE_DELAY_NS
Definition: stats.c:98
The context passed when collecting adjacency counters.
Definition: stats.c:1352
static void setup_message_id_table(api_main_t *am)
Definition: stats.c:79
static uword clib_file_add(clib_file_main_t *um, clib_file_t *template)
Definition: file.h:96
uword ** stats_registration_hash
Definition: stats.h:145
#define UDP_ENCAP_COUNTER_BATCH_SIZE
Definition: stats.c:94
IPv4 main type.
Definition: ip4.h:95
static walk_rc_t bier_neighbor_stats_walk_cb(index_t bfmi, void *arg)
Definition: stats.c:2240
const dpo_id_t * mfib_entry_contribute_ip_forwarding(fib_node_index_t mfib_entry_index)
Definition: mfib_entry.c:1242
int stats_memclnt_delete_callback(u32 client_index)
Definition: stats.c:3144
static void do_combined_interface_counters(stats_main_t *sm)
Definition: stats.c:690
vpe_client_stats_registration_t ** stats_registrations
Definition: stats.h:146
struct mfib_table_t_ * mfibs
Vector of MFIBs.
Definition: ip6.h:170
#define clib_error_report(e)
Definition: error.h:113
Combined interface counter data type for vnet_interface_combined_counters.
Definition: interface.api:271
fib_table_t ** fibs
Definition: stats.h:70
struct _socket_t clib_socket_t
static void bier_neighbor_ship(bier_neighbor_stats_walk_t *ctx)
Definition: stats.c:2261
size_t count
Definition: vapi.c:42
#define clib_mem_unaligned(pointer, type)
Definition: types.h:155
u64 counter_t
64bit counters
Definition: counter.h:54
u32 mft_table_id
Table ID (hash key) for this FIB.
Definition: mfib_table.h:66
Simple stats counters structure.
Definition: stats.api:348
vl_api_udp_encap_counter_t c[count]
Definition: stats.api:476
Want IP6 NBR Stats, register for continuous stats.
Definition: stats.api:228
static void * clib_mem_alloc(uword size)
Definition: mem.h:112
void udp_encap_get_stats(index_t uei, u64 *packets, u64 *bytes)
Definition: udp_encap.c:312
#define CLIB_SOCKET_F_SEQPACKET
Definition: socket.h:63
u32 stats_poll_interval_in_seconds
Definition: stats.h:109
void fib_api_path_encode(const fib_route_path_encode_t *api_rpath, vl_api_fib_path_t *out)
Definition: fib_api.c:207
ip6_fib_table_instance_t ip6_table[IP6_FIB_NUM_TABLES]
The two FIB tables; fwding and non-fwding.
Definition: ip6.h:159
Get delay between polling statistics.
Definition: stats.api:428
void vl_msg_api_free(void *)
vl_api_ip4_fib_counter_t c[count]
Definition: stats.api:248
counter_t bytes
byte counter
Definition: counter.h:143
Want IP4 FIB Stats, register for continuous stats.
Definition: stats.api:158
data_structure_lock_t * data_structure_lock
Definition: stats.h:149
Definition: defs.h:47
u32 ** init_client_reg(u32 **registrations)
Definition: stats.c:469
Per-neighbour (i.e.
Definition: stats.api:284
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
Want IP6 multicast FIB Stats, register for continuous stats.
Definition: stats.api:200
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
struct ip4_fib_t_ * v4_fibs
Vector of MTries.
Definition: ip4.h:103
u8 * format_vnet_interface_simple_counters(u8 *s, va_list *args)
Definition: stats.c:149
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vpe_client_registration_t * get_clients_for_stat(u32 reg, u32 item)
Definition: stats.c:427
vnet_interface_main_t * interface_main
Definition: stats.h:192
#define STAT_SEGMENT_SOCKET_FILE
Definition: stats.h:33
static void vl_api_want_stats_t_handler(vl_api_want_stats_t *mp)
Definition: stats.c:2857
#define hash_foreach_pair(p, v, body)
Iterate over hash pairs.
Definition: hash.h:373
void bier_fmask_get_stats(index_t bfmi, u64 *packets, u64 *bytes)
Definition: bier_fmask.c:388
#define CLIB_SOCKET_F_PASSCRED
Definition: socket.h:64
static void clear_client_reg(u32 **registrations)
Definition: stats.c:456
bier_table_sub_domain_id_t bti_sub_domain
The Sub-Domain-ID The control plane has the configuration option to specify multiple domains or topol...
Definition: bier_types.h:408
static void do_ip4_mfib_counters(stats_main_t *sm)
Definition: stats.c:1689
u64 uword
Definition: types.h:112
static int clear_client_for_all_stats(u32 client_index)
Definition: stats.c:385
Want BIER neighbor Stats, register for continuous stats.
Definition: stats.api:485
vnet_sw_interface_t * sw_interfaces
Definition: interface.h:801
static int mfib_table_stats_walk_cb(fib_node_index_t fei, void *ctx)
Definition: stats.c:1674
#define clib_unix_warning(format, args...)
Definition: error.h:68
u32 address_length
Definition: stats.h:61
replicate_main_t replicate_main
The one instance of replicate main.
Definition: replicate_dpo.c:62
A collection of combined counters.
Definition: counter.h:181
Want Per Interface Combined Stats, register for continuous stats.
Definition: stats.api:141
uword * fib_entry_by_dst_address[33]
Definition: ip4_fib.h:48
struct _svm_queue svm_queue_t
vl_api_ip4_mfib_counter_t c[count]
Definition: stats.api:264
static void vl_api_vnet_ip6_nbr_counters_t_handler(vl_api_vnet_ip6_nbr_counters_t *mp)
Definition: stats.c:2734
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:120
static void do_ip6_fib_counters(stats_main_t *sm)
Definition: stats.c:1995
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static void do_simple_per_interface_counters(stats_main_t *sm)
Definition: stats.c:1016
static clib_error_t * stats_socket_accept_ready(clib_file_t *uf)
Definition: stats.c:2348
A protocol Independent IP multicast FIB table.
Definition: mfib_table.h:35
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:832
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
static void do_ip6_mfib_counters(stats_main_t *sm)
Definition: stats.c:1828
Reply for vnet_get_summary_stats request.
Definition: stats.api:415
ip6_route_t ** routep
Definition: stats.c:1969
struct fib_table_t_ * fibs
Vector of FIBs.
Definition: ip4.h:100
Get delay between polling statistics reply.
Definition: stats.api:439
#define vec_foreach(var, vec)
Vector iterator.
Want IP4 NBR Stats, register for continuous stats.
Definition: stats.api:214
static void vl_api_want_interface_combined_stats_t_handler(vl_api_want_interface_combined_stats_t *mp)
Definition: stats.c:606
void * vlib_worker_thread_bootstrap_fn(void *arg)
Definition: threads.c:670
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
static void do_combined_per_interface_counters(stats_main_t *sm)
Definition: stats.c:816
static void vl_api_vnet_interface_combined_counters_t_handler(vl_api_vnet_interface_combined_counters_t *mp)
Definition: stats.c:643
Definition: file.h:51
void * vl_msg_api_alloc_as_if_client(int nbytes)
void bier_fmask_encode(index_t bfmi, bier_table_id_t *btid, fib_route_path_encode_t *rpath)
Definition: bier_fmask.c:399
vhost_vring_addr_t addr
Definition: vhost-user.h:83
ssvm_private_t stat_segment
Definition: stats.h:165
void stats_dsunlock(int hint, int tag)
Definition: stats.c:260
u16 fp_len
The mask length.
Definition: mfib_types.h:28
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
vnet_main_t * vnet_main
Definition: stats.h:191
#define CLIB_SOCKET_F_ALLOW_GROUP_WRITE
Definition: socket.h:62
static clib_error_t * want_stats_reaper(u32 client_index)
Definition: stats.c:411
api_main_t api_main
Definition: api_shared.c:35
struct fib_table_t_ * fibs
Definition: ip6.h:164
void vl_msg_api_send_shmem_nolock(svm_queue_t *q, u8 *elem)
static void vl_api_want_per_interface_simple_stats_t_handler(vl_api_want_per_interface_simple_stats_t *mp)
Definition: stats.c:950
static void dsunlock(stats_main_t *sm)
Definition: stats.c:239
static void do_ip4_nbr_counters(stats_main_t *sm)
Definition: stats.c:1298
vl_api_fib_path_t path
Definition: stats.api:503
static void dslock(stats_main_t *sm, int release_hint, int tag)
Definition: stats.c:206
uword key
Definition: hash.h:162
Simple interface counter data type for vnet_interface_simple_counters.
Definition: interface.api:305
static udp_encap_t * udp_encap_get(index_t uei)
Definition: udp_encap.h:147
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
vl_api_ip6_nbr_counter_t c[count]
Definition: stats.api:336
Definition: defs.h:46
Want IP4 muilticast FIB Stats, register for continuous stats.
Definition: stats.api:186
ip46_address_t fp_grp_addr
The address type is not deriveable from the fp_addr member.
Definition: mfib_types.h:46
mfib_prefix_t * mroutes
Definition: stats.h:69
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:169
A protocol Independent FIB table.
Definition: fib_table.h:69
ip6_address_t address
Definition: stats.h:60
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128