FD.io VPP  v21.01.1
Vector Packet Processing
load_balance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vnet/dpo/load_balance.h>
18 #include <vnet/dpo/drop_dpo.h>
19 #include <vppinfra/math.h> /* for fabs */
20 #include <vnet/adj/adj.h>
21 #include <vnet/adj/adj_internal.h>
22 #include <vnet/fib/fib_urpf_list.h>
23 #include <vnet/bier/bier_fwd.h>
24 #include <vnet/fib/mpls_fib.h>
25 #include <vnet/ip/ip4_inlines.h>
26 #include <vnet/ip/ip6_inlines.h>
27 
28 /*
29  * distribution error tolerance for load-balancing
30  */
32 
34 
35 /**
36  * the logger
37  */
39 
40 #define LB_DBG(_lb, _fmt, _args...) \
41 { \
42  vlib_log_debug(load_balance_logger, \
43  "lb:[%U]:" _fmt, \
44  format_load_balance, load_balance_get_index(_lb), \
45  LOAD_BALANCE_FORMAT_NONE, \
46  ##_args); \
47 }
48 
49 /**
50  * Pool of all DPOs. It's not static so the DP can have fast access
51  */
53 
54 /**
55  * The one instance of load-balance main
56  */
57 load_balance_main_t load_balance_main = {
58  .lbm_to_counters = {
59  .name = "route-to",
60  .stat_segment_name = "/net/route/to",
61  },
62  .lbm_via_counters = {
63  .name = "route-via",
64  .stat_segment_name = "/net/route/via",
65  }
66 };
67 
68 f64
70 {
72 }
73 
74 static inline index_t
76 {
77  return (lb - load_balance_pool);
78 }
79 
80 static inline dpo_id_t*
82 {
83  if (LB_HAS_INLINE_BUCKETS(lb))
84  {
85  return (lb->lb_buckets_inline);
86  }
87  else
88  {
89  return (lb->lb_buckets);
90  }
91 }
92 
93 static load_balance_t *
95 {
96  load_balance_t *lb;
97  u8 need_barrier_sync = 0;
99  ASSERT (vm->thread_index == 0);
100 
101  pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync,
103  if (need_barrier_sync)
105 
106  pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
107  clib_memset(lb, 0, sizeof(*lb));
108 
109  lb->lb_map = INDEX_INVALID;
110  lb->lb_urpf = INDEX_INVALID;
111 
112  if (need_barrier_sync == 0)
113  {
114  need_barrier_sync += vlib_validate_combined_counter_will_expand
115  (&(load_balance_main.lbm_to_counters),
117  need_barrier_sync += vlib_validate_combined_counter_will_expand
118  (&(load_balance_main.lbm_via_counters),
120  if (need_barrier_sync)
122  }
123 
124  vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
128  vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
130  vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
132 
133  if (need_barrier_sync)
135 
136  return (lb);
137 }
138 
139 static u8*
142  u32 indent,
143  u8 *s)
144 {
145  vlib_counter_t to, via;
146  load_balance_t *lb;
147  dpo_id_t *buckets;
148  u32 i;
149 
150  lb = load_balance_get(lbi);
151  vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
152  vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
153  buckets = load_balance_get_buckets(lb);
154 
155  s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
156  s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
157  s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
158  s = format(s, "uRPF:%d ", lb->lb_urpf);
159  if (lb->lb_flags)
160  {
161  load_balance_attr_t attr;
162 
163  s = format(s, "flags:[");
164 
166  {
167  if (lb->lb_flags & (1 << attr))
168  {
169  s = format (s, "%s", load_balance_attr_names[attr]);
170  }
171  }
172  s = format(s, "] ");
173  }
174  s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
175  if (0 != via.packets)
176  {
177  s = format(s, " via:[%Ld:%Ld]",
178  via.packets, via.bytes);
179  }
180  s = format(s, "]");
181 
182  if (INDEX_INVALID != lb->lb_map)
183  {
184  s = format(s, "\n%U%U",
185  format_white_space, indent+4,
186  format_load_balance_map, lb->lb_map, indent+4);
187  }
188  for (i = 0; i < lb->lb_n_buckets; i++)
189  {
190  s = format(s, "\n%U[%d] %U",
191  format_white_space, indent+2,
192  i,
194  &buckets[i], indent+6);
195  }
196  return (s);
197 }
198 
199 u8*
200 format_load_balance (u8 * s, va_list * args)
201 {
202  index_t lbi = va_arg(*args, index_t);
204 
205  return (load_balance_format(lbi, flags, 0, s));
206 }
207 
208 static u8*
209 format_load_balance_dpo (u8 * s, va_list * args)
210 {
211  index_t lbi = va_arg(*args, index_t);
212  u32 indent = va_arg(*args, u32);
213 
214  return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
215 }
216 
219 {
220  switch (lb_proto)
221  {
222  case DPO_PROTO_IP4:
223  case DPO_PROTO_IP6:
224  return (IP_FLOW_HASH_DEFAULT);
225 
226  case DPO_PROTO_MPLS:
227  return (MPLS_FLOW_HASH_DEFAULT);
228 
229  case DPO_PROTO_ETHERNET:
230  case DPO_PROTO_BIER:
231  case DPO_PROTO_NSH:
232  break;
233  }
234 
235  return (0);
236 }
237 
238 static load_balance_t *
240  dpo_proto_t lb_proto,
241  flow_hash_config_t fhc)
242 {
243  load_balance_t *lb;
244 
245  lb = load_balance_alloc_i();
246  lb->lb_hash_config = fhc;
247  lb->lb_n_buckets = num_buckets;
248  lb->lb_n_buckets_minus_1 = num_buckets-1;
249  lb->lb_proto = lb_proto;
250 
251  if (!LB_HAS_INLINE_BUCKETS(lb))
252  {
254  lb->lb_n_buckets - 1,
256  }
257 
258  LB_DBG(lb, "create");
259 
260  return (lb);
261 }
262 
263 index_t
265  dpo_proto_t lb_proto,
266  flow_hash_config_t fhc)
267 {
268  return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
269 }
270 
271 static inline void
273  u32 bucket,
274  dpo_id_t *buckets,
275  const dpo_id_t *next)
276 {
277  dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
278 }
279 
280 void
282  u32 bucket,
283  const dpo_id_t *next)
284 {
285  load_balance_t *lb;
286  dpo_id_t *buckets;
287 
288  lb = load_balance_get(lbi);
289  buckets = load_balance_get_buckets(lb);
290 
291  ASSERT(bucket < lb->lb_n_buckets);
292 
293  load_balance_set_bucket_i(lb, bucket, buckets, next);
294 }
295 
296 int
298 {
299  load_balance_t *lb;
300 
301  if (DPO_LOAD_BALANCE != dpo->dpoi_type)
302  return (0);
303 
304  lb = load_balance_get(dpo->dpoi_index);
305 
306  if (1 == lb->lb_n_buckets)
307  {
308  return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
309  }
310  return (0);
311 }
312 
313 u16
315 {
316  load_balance_t *lb;
317 
318  lb = load_balance_get(lbi);
319 
320  return (lb->lb_n_buckets);
321 }
322 
323 void
326 {
327  load_balance_t *lb;
328 
329  lb = load_balance_get(lbi);
331 }
332 
333 
334 void
336  index_t urpf)
337 {
338  load_balance_t *lb;
339  index_t old;
340 
341  lb = load_balance_get(lbi);
342 
343  /*
344  * packets in flight we see this change. but it's atomic, so :P
345  */
346  old = lb->lb_urpf;
347  lb->lb_urpf = urpf;
348 
350  fib_urpf_list_lock(urpf);
351 }
352 
353 index_t
355 {
356  load_balance_t *lb;
357 
358  lb = load_balance_get(lbi);
359 
360  return (lb->lb_urpf);
361 }
362 
363 const dpo_id_t *
365  u32 bucket)
366 {
367  load_balance_t *lb;
368 
369  lb = load_balance_get(lbi);
370 
371  return (load_balance_get_bucket_i(lb, bucket));
372 }
373 
374 static int
376  const load_balance_path_t * n2)
377 {
378  return ((int) n1->path_weight - (int) n2->path_weight);
379 }
380 
381 /* Given next hop vector is over-written with normalized one with sorted weights and
382  with weights corresponding to the number of adjacencies for each next hop.
383  Returns number of adjacencies in block. */
384 u32
386  load_balance_path_t ** normalized_next_hops,
387  u32 *sum_weight_in,
389 {
391  uword n_nhs, n_adj, n_adj_left, i, sum_weight;
392  f64 norm, error;
393 
394  n_nhs = vec_len (raw_next_hops);
395  ASSERT (n_nhs > 0);
396  if (n_nhs == 0)
397  return 0;
398 
399  /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
400  nhs = *normalized_next_hops;
401  vec_validate (nhs, 2*n_nhs - 1);
402 
403  /* Fast path: 1 next hop in block. */
404  n_adj = n_nhs;
405  if (n_nhs == 1)
406  {
407  nhs[0] = raw_next_hops[0];
408  nhs[0].path_weight = 1;
409  _vec_len (nhs) = 1;
410  sum_weight = 1;
411  goto done;
412  }
413 
414  else if (n_nhs == 2)
415  {
416  int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
417 
418  /* Fast sort. */
419  nhs[0] = raw_next_hops[cmp];
420  nhs[1] = raw_next_hops[cmp ^ 1];
421 
422  /* Fast path: equal cost multipath with 2 next hops. */
423  if (nhs[0].path_weight == nhs[1].path_weight)
424  {
425  nhs[0].path_weight = nhs[1].path_weight = 1;
426  _vec_len (nhs) = 2;
427  sum_weight = 2;
428  goto done;
429  }
430  }
431  else
432  {
433  clib_memcpy_fast (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
434  qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
435  }
436 
437  /* Find total weight to normalize weights. */
438  sum_weight = 0;
439  for (i = 0; i < n_nhs; i++)
440  sum_weight += nhs[i].path_weight;
441 
442  /* In the unlikely case that all weights are given as 0, set them all to 1. */
443  if (sum_weight == 0)
444  {
445  for (i = 0; i < n_nhs; i++)
446  nhs[i].path_weight = 1;
447  sum_weight = n_nhs;
448  }
449 
450  /* Save copies of all next hop weights to avoid being overwritten in loop below. */
451  for (i = 0; i < n_nhs; i++)
452  nhs[n_nhs + i].path_weight = nhs[i].path_weight;
453 
454  /* Try larger and larger power of 2 sized adjacency blocks until we
455  find one where traffic flows to within 1% of specified weights. */
456  for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
457  {
458  error = 0;
459 
460  norm = n_adj / ((f64) sum_weight);
461  n_adj_left = n_adj;
462  for (i = 0; i < n_nhs; i++)
463  {
464  f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
465  word n = flt_round_nearest (nf);
466 
467  n = n > n_adj_left ? n_adj_left : n;
468  n_adj_left -= n;
469  error += fabs (nf - n);
470  nhs[i].path_weight = n;
471 
472  if (0 == nhs[i].path_weight)
473  {
474  /*
475  * when the weight skew is high (norm is small) and n == nf.
476  * without this correction the path with a low weight would have
477  * no representation in the load-balanace - don't want that.
478  * If the weight skew is high so the load-balance has many buckets
479  * to allow it. pays ya money takes ya choice.
480  */
481  error = n_adj;
482  break;
483  }
484  }
485 
486  nhs[0].path_weight += n_adj_left;
487 
488  /* Less than 5% average error per adjacency with this size adjacency block? */
489  if (error <= multipath_next_hop_error_tolerance*n_adj)
490  {
491  /* Truncate any next hops with zero weight. */
492  _vec_len (nhs) = i;
493  break;
494  }
495  }
496 
497 done:
498  /* Save vector for next call. */
499  *normalized_next_hops = nhs;
500  *sum_weight_in = sum_weight;
501  return n_adj;
502 }
503 
504 static load_balance_path_t *
506  dpo_proto_t drop_proto)
507 {
508  if (0 == vec_len(nhs))
509  {
510  load_balance_path_t *new_nhs = NULL, *nh;
511 
512  /*
513  * we need something for the load-balance. so use the drop
514  */
515  vec_add2(new_nhs, nh, 1);
516 
517  nh->path_weight = 1;
518  dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
519 
520  return (new_nhs);
521  }
522 
523  return (NULL);
524 }
525 
526 /*
527  * Fill in adjacencies in block based on corresponding
528  * next hop adjacencies.
529  */
530 static void
533  dpo_id_t *buckets,
534  u32 n_buckets)
535 {
537  u16 ii, bucket;
538 
539  bucket = 0;
540 
541  /*
542  * the next-hops have normalised weights. that means their sum is the number
543  * of buckets we need to fill.
544  */
545  vec_foreach (nh, nhs)
546  {
547  for (ii = 0; ii < nh->path_weight; ii++)
548  {
549  ASSERT(bucket < n_buckets);
550  load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
551  }
552  }
553 }
554 static void
557  dpo_id_t *buckets,
558  u32 n_buckets)
559 {
560  load_balance_path_t *nh, *fwding_paths;
561  u16 ii, bucket, fpath;
562 
563  fpath = bucket = 0;
564  fwding_paths = NULL;
565 
566  vec_foreach (nh, nhs)
567  {
568  if (!dpo_is_drop(&nh->path_dpo))
569  {
570  vec_add1(fwding_paths, *nh);
571  }
572  }
573  if (vec_len(fwding_paths) == 0)
574  fwding_paths = vec_dup(nhs);
575 
576  /*
577  * the next-hops have normalised weights. that means their sum is the number
578  * of buckets we need to fill.
579  */
580  vec_foreach (nh, nhs)
581  {
582  for (ii = 0; ii < nh->path_weight; ii++)
583  {
584  ASSERT(bucket < n_buckets);
585  if (!dpo_is_drop(&nh->path_dpo))
586  {
587  load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
588  }
589  else
590  {
591  /* fill the bucks from the next up path */
592  load_balance_set_bucket_i(lb, bucket++, buckets, &fwding_paths[fpath].path_dpo);
593  fpath = (fpath + 1) % vec_len(fwding_paths);
594  }
595  }
596  }
597 
598  vec_free(fwding_paths);
599 }
600 
601 static void
604  dpo_id_t *buckets,
605  u32 n_buckets,
607 {
608  if (flags & LOAD_BALANCE_FLAG_STICKY)
609  {
610  load_balance_fill_buckets_sticky(lb, nhs, buckets, n_buckets);
611  }
612  else
613  {
614  load_balance_fill_buckets_norm(lb, nhs, buckets, n_buckets);
615  }
616 }
617 
618 static inline void
620  u32 n_buckets)
621 {
622  lb->lb_n_buckets = n_buckets;
623  lb->lb_n_buckets_minus_1 = n_buckets-1;
624 }
625 
626 void
628  const load_balance_path_t * raw_nhs,
630 {
631  load_balance_path_t *nh, *nhs, *fixed_nhs;
632  u32 sum_of_weights, n_buckets, ii;
633  index_t lbmi, old_lbmi;
634  load_balance_t *lb;
635  dpo_id_t *tmp_dpo;
636 
637  nhs = NULL;
638 
640  lb = load_balance_get(dpo->dpoi_index);
641  lb->lb_flags = flags;
642  fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
643  n_buckets =
644  ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
645  raw_nhs :
646  fixed_nhs),
647  &nhs,
648  &sum_of_weights,
650 
651  ASSERT (n_buckets >= vec_len (raw_nhs));
652 
653  /*
654  * Save the old load-balance map used, and get a new one if required.
655  */
656  old_lbmi = lb->lb_map;
657  if (flags & LOAD_BALANCE_FLAG_USES_MAP)
658  {
659  lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
660  }
661  else
662  {
663  lbmi = INDEX_INVALID;
664  }
665 
666  if (0 == lb->lb_n_buckets)
667  {
668  /*
669  * first time initialisation. no packets inflight, so we can write
670  * at leisure.
671  */
672  load_balance_set_n_buckets(lb, n_buckets);
673 
674  if (!LB_HAS_INLINE_BUCKETS(lb))
676  lb->lb_n_buckets - 1,
678 
681  n_buckets, flags);
682  lb->lb_map = lbmi;
683  }
684  else
685  {
686  /*
687  * This is a modification of an existing load-balance.
688  * We need to ensure that packets inflight see a consistent state, that
689  * is the number of reported buckets the LB has (read from
690  * lb_n_buckets_minus_1) is not more than it actually has. So if the
691  * number of buckets is increasing, we must update the bucket array first,
692  * then the reported number. vice-versa if the number of buckets goes down.
693  */
694  if (n_buckets == lb->lb_n_buckets)
695  {
696  /*
697  * no change in the number of buckets. we can simply fill what
698  * is new over what is old.
699  */
702  n_buckets, flags);
703  lb->lb_map = lbmi;
704  }
705  else if (n_buckets > lb->lb_n_buckets)
706  {
707  /*
708  * we have more buckets. the old load-balance map (if there is one)
709  * will remain valid, i.e. mapping to indices within range, so we
710  * update it last.
711  */
712  if (n_buckets > LB_NUM_INLINE_BUCKETS &&
714  {
715  /*
716  * the new increased number of buckets is crossing the threshold
717  * from the inline storage to out-line. Alloc the outline buckets
718  * first, then fixup the number. then reset the inlines.
719  */
720  ASSERT(NULL == lb->lb_buckets);
722  n_buckets - 1,
724 
726  lb->lb_buckets,
727  n_buckets, flags);
729  load_balance_set_n_buckets(lb, n_buckets);
730 
732 
733  for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
734  {
735  dpo_reset(&lb->lb_buckets_inline[ii]);
736  }
737  }
738  else
739  {
740  if (n_buckets <= LB_NUM_INLINE_BUCKETS)
741  {
742  /*
743  * we are not crossing the threshold and it's still inline buckets.
744  * we can write the new on the old..
745  */
748  n_buckets, flags);
750  load_balance_set_n_buckets(lb, n_buckets);
751  }
752  else
753  {
754  /*
755  * we are not crossing the threshold. We need a new bucket array to
756  * hold the increased number of choices.
757  */
758  dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
759 
760  new_buckets = NULL;
761  old_buckets = load_balance_get_buckets(lb);
762 
763  vec_validate_aligned(new_buckets,
764  n_buckets - 1,
766 
767  load_balance_fill_buckets(lb, nhs, new_buckets,
768  n_buckets, flags);
770  lb->lb_buckets = new_buckets;
772  load_balance_set_n_buckets(lb, n_buckets);
773 
774  vec_foreach(tmp_dpo, old_buckets)
775  {
776  dpo_reset(tmp_dpo);
777  }
778  vec_free(old_buckets);
779  }
780  }
781 
782  /*
783  * buckets fixed. ready for the MAP update.
784  */
785  lb->lb_map = lbmi;
786  }
787  else
788  {
789  /*
790  * bucket size shrinkage.
791  * Any map we have will be based on the old
792  * larger number of buckets, so will be translating to indices
793  * out of range. So the new MAP must be installed first.
794  */
795  lb->lb_map = lbmi;
797 
798 
799  if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
801  {
802  /*
803  * the new decreased number of buckets is crossing the threshold
804  * from out-line storage to inline:
805  * 1 - Fill the inline buckets,
806  * 2 - fixup the number (and this point the inline buckets are
807  * used).
808  * 3 - free the outline buckets
809  */
811  lb->lb_buckets_inline,
812  n_buckets, flags);
814  load_balance_set_n_buckets(lb, n_buckets);
816 
817  vec_foreach(tmp_dpo, lb->lb_buckets)
818  {
819  dpo_reset(tmp_dpo);
820  }
821  vec_free(lb->lb_buckets);
822  }
823  else
824  {
825  /*
826  * not crossing the threshold.
827  * 1 - update the number to the smaller size
828  * 2 - write the new buckets
829  * 3 - reset those no longer used.
830  */
831  dpo_id_t *buckets;
832  u32 old_n_buckets;
833 
834  old_n_buckets = lb->lb_n_buckets;
835  buckets = load_balance_get_buckets(lb);
836 
837  load_balance_set_n_buckets(lb, n_buckets);
839 
840  load_balance_fill_buckets(lb, nhs, buckets,
841  n_buckets, flags);
842 
843  for (ii = n_buckets; ii < old_n_buckets; ii++)
844  {
845  dpo_reset(&buckets[ii]);
846  }
847  }
848  }
849  }
850 
851  vec_foreach (nh, nhs)
852  {
853  dpo_reset(&nh->path_dpo);
854  }
855  vec_free(nhs);
856  vec_free(fixed_nhs);
857 
858  load_balance_map_unlock(old_lbmi);
859 }
860 
861 static void
863 {
864  load_balance_t *lb;
865 
866  lb = load_balance_get(dpo->dpoi_index);
867 
868  lb->lb_locks++;
869 }
870 
871 static void
873 {
874  dpo_id_t *buckets;
875  int i;
876 
877  buckets = load_balance_get_buckets(lb);
878 
879  for (i = 0; i < lb->lb_n_buckets; i++)
880  {
881  dpo_reset(&buckets[i]);
882  }
883 
884  LB_DBG(lb, "destroy");
885  if (!LB_HAS_INLINE_BUCKETS(lb))
886  {
887  vec_free(lb->lb_buckets);
888  }
889 
892 
893  pool_put(load_balance_pool, lb);
894 }
895 
896 static void
898 {
899  load_balance_t *lb;
900 
901  lb = load_balance_get(dpo->dpoi_index);
902 
903  lb->lb_locks--;
904 
905  if (0 == lb->lb_locks)
906  {
908  }
909 }
910 
911 static void
913 {
914  fib_show_memory_usage("load-balance",
915  pool_elts(load_balance_pool),
916  pool_len(load_balance_pool),
917  sizeof(load_balance_t));
919 }
920 
921 const static dpo_vft_t lb_vft = {
923  .dv_unlock = load_balance_unlock,
924  .dv_format = format_load_balance_dpo,
925  .dv_mem_show = load_balance_mem_show,
926 };
927 
928 /**
929  * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
930  * object.
931  *
932  * this means that these graph nodes are ones from which a load-balance is the
933  * parent object in the DPO-graph.
934  *
935  * We do not list all the load-balance nodes, such as the *-lookup. instead
936  * we are relying on the correct use of the .sibling_of field when setting
937  * up these sibling nodes.
938  */
939 const static char* const load_balance_ip4_nodes[] =
940 {
941  "ip4-load-balance",
942  NULL,
943 };
944 const static char* const load_balance_ip6_nodes[] =
945 {
946  "ip6-load-balance",
947  NULL,
948 };
949 const static char* const load_balance_mpls_nodes[] =
950 {
951  "mpls-load-balance",
952  NULL,
953 };
954 const static char* const load_balance_l2_nodes[] =
955 {
956  "l2-load-balance",
957  NULL,
958 };
959 const static char* const load_balance_nsh_nodes[] =
960 {
961  "nsh-load-balance",
962  NULL
963 };
964 const static char* const load_balance_bier_nodes[] =
965 {
966  "bier-load-balance",
967  NULL,
968 };
969 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
970 {
977 };
978 
979 void
981 {
982  index_t lbi;
983 
985 
986  /*
987  * Special LB with index zero. we need to define this since the v4 mtrie
988  * assumes an index of 0 implies the ply is empty. therefore all 'real'
989  * adjs need a non-zero index.
990  * This should never be used, but just in case, stack it on a drop.
991  */
992  lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
994 
996  vlib_log_register_class("dpo", "load-balance");
997 
999 }
1000 
1001 static clib_error_t *
1003  unformat_input_t * input,
1004  vlib_cli_command_t * cmd)
1005 {
1006  index_t lbi = INDEX_INVALID;
1007 
1008  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1009  {
1010  if (unformat (input, "%d", &lbi))
1011  ;
1012  else
1013  break;
1014  }
1015 
1016  if (INDEX_INVALID != lbi)
1017  {
1018  if (pool_is_free_index(load_balance_pool, lbi))
1019  {
1020  vlib_cli_output (vm, "no such load-balance:%d", lbi);
1021  }
1022  else
1023  {
1024  vlib_cli_output (vm, "%U", format_load_balance, lbi,
1026  }
1027  }
1028  else
1029  {
1030  load_balance_t *lb;
1031 
1032  pool_foreach (lb, load_balance_pool)
1033  {
1037  }
1038  }
1039 
1040  return 0;
1041 }
1042 
1043 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
1044  .path = "show load-balance",
1045  .short_help = "show load-balance [<index>]",
1046  .function = load_balance_show,
1047 };
1048 
1049 
1052 {
1053  ip4_header_t *iph = (ip4_header_t *) data;
1054 
1055  if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1057  else
1059 }
1060 
1063 {
1064  return (*((u64 *) m) & 0xffffffffffff);
1065 }
1066 
1069 {
1070  ethernet_header_t *eh;
1071  u64 a, b, c;
1072  uword is_ip, eh_size;
1073  u16 eh_type;
1074 
1075  eh = vlib_buffer_get_current (b0);
1076  eh_type = clib_net_to_host_u16 (eh->type);
1077  eh_size = ethernet_buffer_header_size (b0);
1078 
1079  is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1080 
1081  /* since we have 2 cache lines, use them */
1082  if (is_ip)
1083  a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1084  else
1085  a = eh->type;
1086 
1087  b = mac_to_u64 ((u8 *) eh->dst_address);
1088  c = mac_to_u64 ((u8 *) eh->src_address);
1089  hash_mix64 (a, b, c);
1090 
1091  return (u32) c;
1092 }
1093 
1095 {
1098 
1102  vlib_frame_t * frame,
1103  int is_l2)
1104 {
1105  u32 n_left_from, next_index, *from, *to_next;
1106 
1107  from = vlib_frame_vector_args (frame);
1108  n_left_from = frame->n_vectors;
1109 
1110  next_index = node->cached_next_index;
1111 
1112  while (n_left_from > 0)
1113  {
1114  u32 n_left_to_next;
1115 
1116  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1117 
1118  while (n_left_from > 0 && n_left_to_next > 0)
1119  {
1120  vlib_buffer_t *b0;
1121  u32 bi0, lbi0, next0;
1122  const dpo_id_t *dpo0;
1123  const load_balance_t *lb0;
1124 
1125  bi0 = from[0];
1126  to_next[0] = bi0;
1127  from += 1;
1128  to_next += 1;
1129  n_left_from -= 1;
1130  n_left_to_next -= 1;
1131 
1132  b0 = vlib_get_buffer (vm, bi0);
1133 
1134  /* lookup dst + src mac */
1135  lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1136  lb0 = load_balance_get(lbi0);
1137 
1138  if (is_l2)
1139  {
1140  vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
1141  }
1142  else
1143  {
1144  /* it's BIER */
1145  const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
1146  vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
1147  }
1148 
1149  dpo0 = load_balance_get_bucket_i(lb0,
1150  vnet_buffer(b0)->ip.flow_hash &
1151  (lb0->lb_n_buckets_minus_1));
1152 
1153  next0 = dpo0->dpoi_next_node;
1154  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1155 
1156  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1157  {
1158  load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1159  sizeof (*tr));
1160  tr->lb_index = lbi0;
1161  }
1162  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1163  n_left_to_next, bi0, next0);
1164  }
1165 
1166  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1167  }
1168 
1169  return frame->n_vectors;
1170 }
1171 
1172 static uword
1175  vlib_frame_t * frame)
1176 {
1177  return (load_balance_inline(vm, node, frame, 1));
1178 }
1179 
1180 static u8 *
1181 format_l2_load_balance_trace (u8 * s, va_list * args)
1182 {
1183  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1184  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1185  load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1186 
1187  s = format (s, "L2-load-balance: index %d", t->lb_index);
1188  return s;
1189 }
1190 
1191 /**
1192  * @brief
1193  */
1195  .function = l2_load_balance,
1196  .name = "l2-load-balance",
1197  .vector_size = sizeof (u32),
1198 
1199  .format_trace = format_l2_load_balance_trace,
1200  .n_next_nodes = 1,
1201  .next_nodes = {
1202  [0] = "error-drop",
1203  },
1204 };
1205 
1206 static uword
1209  vlib_frame_t * frame)
1210 {
1211  u32 n_left_from, next_index, *from, *to_next;
1212 
1213  from = vlib_frame_vector_args (frame);
1214  n_left_from = frame->n_vectors;
1215 
1216  next_index = node->cached_next_index;
1217 
1218  while (n_left_from > 0)
1219  {
1220  u32 n_left_to_next;
1221 
1222  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1223 
1224  while (n_left_from > 0 && n_left_to_next > 0)
1225  {
1226  vlib_buffer_t *b0;
1227  u32 bi0, lbi0, next0, *nsh0;
1228  const dpo_id_t *dpo0;
1229  const load_balance_t *lb0;
1230 
1231  bi0 = from[0];
1232  to_next[0] = bi0;
1233  from += 1;
1234  to_next += 1;
1235  n_left_from -= 1;
1236  n_left_to_next -= 1;
1237 
1238  b0 = vlib_get_buffer (vm, bi0);
1239 
1240  lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1241  lb0 = load_balance_get(lbi0);
1242 
1243  /* SPI + SI are the second word of the NSH header */
1244  nsh0 = vlib_buffer_get_current (b0);
1245  vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1246 
1247  dpo0 = load_balance_get_bucket_i(lb0,
1248  vnet_buffer(b0)->ip.flow_hash &
1249  (lb0->lb_n_buckets_minus_1));
1250 
1251  next0 = dpo0->dpoi_next_node;
1252  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1253 
1254  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1255  {
1256  load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1257  sizeof (*tr));
1258  tr->lb_index = lbi0;
1259  }
1260  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1261  n_left_to_next, bi0, next0);
1262  }
1263 
1264  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1265  }
1266 
1267  return frame->n_vectors;
1268 }
1269 
1270 static u8 *
1271 format_nsh_load_balance_trace (u8 * s, va_list * args)
1272 {
1273  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1274  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1275  load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1276 
1277  s = format (s, "NSH-load-balance: index %d", t->lb_index);
1278  return s;
1279 }
1280 
1281 /**
1282  * @brief
1283  */
1285  .function = nsh_load_balance,
1286  .name = "nsh-load-balance",
1287  .vector_size = sizeof (u32),
1288 
1289  .format_trace = format_nsh_load_balance_trace,
1290  .n_next_nodes = 1,
1291  .next_nodes = {
1292  [0] = "error-drop",
1293  },
1294 };
1295 
1296 static u8 *
1297 format_bier_load_balance_trace (u8 * s, va_list * args)
1298 {
1299  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1300  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1301  load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1302 
1303  s = format (s, "BIER-load-balance: index %d", t->lb_index);
1304  return s;
1305 }
1306 
1307 static uword
1310  vlib_frame_t * frame)
1311 {
1312  return (load_balance_inline(vm, node, frame, 0));
1313 }
1314 
1315 /**
1316  * @brief
1317  */
1319  .function = bier_load_balance,
1320  .name = "bier-load-balance",
1321  .vector_size = sizeof (u32),
1322 
1323  .format_trace = format_bier_load_balance_trace,
1324  .sibling_of = "mpls-load-balance",
1325 };
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:338
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
u16 lb_n_buckets
number of buckets in the load-balance.
Definition: load_balance.h:116
vlib_log_class_t load_balance_logger
the logger
Definition: load_balance.c:38
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static const char *const load_balance_ip6_nodes[]
Definition: load_balance.c:944
dpo_id_t * lb_buckets
Vector of buckets containing the next DPOs, sized as lbo_num.
Definition: load_balance.h:166
dpo_lock_fn_t dv_lock
A reference counting lock function.
Definition: dpo.h:411
static u8 * format_l2_load_balance_trace(u8 *s, va_list *args)
void load_balance_map_unlock(index_t lbmi)
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
index_t load_balance_map_add_or_lock(u32 n_buckets, u32 sum_of_weights, const load_balance_path_t *paths)
fib_entry_flag_t lb_fib_entry_flags
Flags from the load-balance&#39;s associated fib_entry_t.
Definition: load_balance.h:138
static index_t load_balance_get_index(const load_balance_t *lb)
Definition: load_balance.c:75
static const char *const *const load_balance_nodes[DPO_PROTO_NUM]
Definition: load_balance.c:969
#define CLIB_UNUSED(x)
Definition: clib.h:87
A virtual function table regisitered for a DPO type.
Definition: dpo.h:406
static const char *const load_balance_mpls_nodes[]
Definition: load_balance.c:949
void vlib_validate_combined_counter(vlib_combined_counter_main_t *cm, u32 index)
validate a combined counter
Definition: counter.c:108
u16 load_balance_n_buckets(index_t lbi)
Definition: load_balance.c:314
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
a
Definition: bitmap.h:544
u8 * format_dpo_type(u8 *s, va_list *args)
format a DPO type
Definition: dpo.c:138
dpo_id_t path_dpo
ID of the Data-path object.
Definition: load_balance.h:66
static void load_balance_set_n_buckets(load_balance_t *lb, u32 n_buckets)
Definition: load_balance.c:619
#define pool_foreach(VAR, POOL)
Iterate through pool.
Definition: pool.h:527
vl_api_fib_path_nh_t nh
Definition: fib_types.api:126
#define LOAD_BALANCE_ATTR_NAMES
Definition: load_balance.h:87
static u32 ip_flow_hash(void *data)
unsigned long u64
Definition: types.h:89
enum load_balance_format_flags_t_ load_balance_format_flags_t
Flags controlling load-balance formatting/display.
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip4_inlines.h:51
int vlib_validate_combined_counter_will_expand(vlib_combined_counter_main_t *cm, u32 index)
Definition: counter.c:124
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
void load_balance_set_urpf(index_t lbi, index_t urpf)
Definition: load_balance.c:335
#define ethernet_buffer_header_size(b)
Determine the size of the Ethernet headers of the current frame in the buffer.
Definition: ethernet.h:463
flow_hash_config_t lb_hash_config
the hash config to use when selecting a bucket.
Definition: load_balance.h:161
u8 src_address[6]
Definition: packet.h:56
u32 thread_index
Definition: main.h:250
#define pool_get_aligned_will_expand(P, YESNO, A)
See if pool_get will expand the pool or not.
Definition: pool.h:257
void dpo_copy(dpo_id_t *dst, const dpo_id_t *src)
atomic copy a data-plane object.
Definition: dpo.c:262
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
static const char * load_balance_attr_names[]
Definition: load_balance.c:33
Combined counter to hold both packets and byte differences.
Definition: counter_types.h:26
static const char *const load_balance_ip4_nodes[]
The per-protocol VLIB graph nodes that are assigned to a load-balance object.
Definition: load_balance.c:939
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
static u8 * format_load_balance_dpo(u8 *s, va_list *args)
Definition: load_balance.c:209
vlib_main_t * vm
Definition: in2out_ed.c:1580
load_balance_flags_t lb_flags
Flags concenring the LB&#39;s creation and modification.
Definition: load_balance.h:133
#define FOR_EACH_LOAD_BALANCE_ATTR(_attr)
Definition: load_balance.h:92
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:520
static u8 * load_balance_format(index_t lbi, load_balance_format_flags_t flags, u32 indent, u8 *s)
Definition: load_balance.c:140
static load_balance_t * load_balance_alloc_i(void)
Definition: load_balance.c:94
index_t load_balance_get_urpf(index_t lbi)
Definition: load_balance.c:354
unsigned char u8
Definition: types.h:56
static const char *const load_balance_l2_nodes[]
Definition: load_balance.c:954
#define pool_len(p)
Number of elements in pool vector.
Definition: pool.h:139
u8 data[128]
Definition: ipsec_types.api:90
double f64
Definition: types.h:142
#define vlib_worker_thread_barrier_sync(X)
Definition: threads.h:205
index_t load_balance_create(u32 n_buckets, dpo_proto_t lb_proto, flow_hash_config_t fhc)
Definition: load_balance.c:264
const dpo_id_t * drop_dpo_get(dpo_proto_t proto)
Definition: drop_dpo.c:25
u32 vlib_log_class_t
Definition: vlib.h:51
void dpo_register(dpo_type_t type, const dpo_vft_t *vft, const char *const *const *nodes)
For a given DPO type Register:
Definition: dpo.c:327
i64 word
Definition: types.h:111
load_balance_t * load_balance_pool
Pool of all DPOs.
Definition: load_balance.c:52
void load_balance_map_module_init(void)
Make/add a new or lock an existing Load-balance map.
static dpo_id_t * load_balance_get_buckets(load_balance_t *lb)
Definition: load_balance.c:81
void load_balance_module_init(void)
Definition: load_balance.c:980
u16 lb_n_buckets_minus_1
number of buckets in the load-balance - 1.
Definition: load_balance.h:121
u8 dst_address[6]
Definition: packet.h:55
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
static int next_hop_sort_by_weight(const load_balance_path_t *n1, const load_balance_path_t *n2)
Definition: load_balance.c:375
description fragment has unexpected format
Definition: map.api:433
static void load_balance_mem_show(void)
Definition: load_balance.c:912
void fib_urpf_list_lock(index_t ui)
Definition: fib_urpf_list.c:87
static load_balance_t * load_balance_create_i(u32 num_buckets, dpo_proto_t lb_proto, flow_hash_config_t fhc)
Definition: load_balance.c:239
void fib_show_memory_usage(const char *name, u32 in_use_elts, u32 allocd_elts, size_t size_elt)
Show the memory usage for a type.
Definition: fib_node.c:220
const cJSON *const b
Definition: cJSON.h:255
void load_balance_multipath_update(const dpo_id_t *dpo, const load_balance_path_t *raw_nhs, load_balance_flags_t flags)
Definition: load_balance.c:627
unsigned int u32
Definition: types.h:88
f64 load_balance_get_multipath_tolerance(void)
Definition: load_balance.c:69
enum dpo_proto_t_ dpo_proto_t
Data path protocol.
static void load_balance_lock(dpo_id_t *dpo)
Definition: load_balance.c:862
static void load_balance_fill_buckets(load_balance_t *lb, load_balance_path_t *nhs, dpo_id_t *buckets, u32 n_buckets, load_balance_flags_t flags)
Definition: load_balance.c:602
int load_balance_is_drop(const dpo_id_t *dpo)
Definition: load_balance.c:297
static void load_balance_unlock(dpo_id_t *dpo)
Definition: load_balance.c:897
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:170
Definition: cJSON.c:84
static load_balance_path_t * load_balance_multipath_next_hop_fixup(const load_balance_path_t *nhs, dpo_proto_t drop_proto)
Definition: load_balance.c:505
static void vlib_zero_combined_counter(vlib_combined_counter_main_t *cm, u32 index)
Clear a combined counter Clears the set of per-thread counters.
Definition: counter.h:304
static uword bier_load_balance(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
counter_t packets
packet counter
Definition: counter_types.h:28
u8 * format_load_balance(u8 *s, va_list *args)
Definition: load_balance.c:200
dpo_type_t dpoi_type
the type
Definition: dpo.h:176
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
Definition: load_balance.h:229
dpo_proto_t lb_proto
The protocol of packets that traverse this LB.
Definition: load_balance.h:128
static_always_inline u32 bier_compute_flow_hash(const bier_hdr_t *hdr)
Definition: bier_fwd.h:23
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
flow_hash_config_t load_balance_get_default_flow_hash(dpo_proto_t lb_proto)
Definition: load_balance.c:218
void load_balance_set_fib_entry_flags(index_t lbi, fib_entry_flag_t flags)
Definition: load_balance.c:324
load-balancing over a choice of [un]equal cost paths
Definition: dpo.h:102
static u8 * format_bier_load_balance_trace(u8 *s, va_list *args)
#define MPLS_FLOW_HASH_DEFAULT
There are no options for controlling the MPLS flow hash.
Definition: mpls_fib.h:39
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:301
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:429
The FIB DPO provieds;.
Definition: load_balance.h:106
u8 n_nhs
Definition: gbp.api:312
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
u8 * format_load_balance_map(u8 *s, va_list *ap)
#define LB_DBG(_lb, _fmt, _args...)
Definition: load_balance.c:40
vl_api_gbp_next_hop_t nhs[8]
Definition: gbp.api:313
static const char *const load_balance_bier_nodes[]
Definition: load_balance.c:964
#define IP_FLOW_HASH_DEFAULT
Default: 5-tuple without the "reverse" bit.
Definition: ip_flow_hash.h:29
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
const dpo_id_t * load_balance_get_bucket(index_t lbi, u32 bucket)
Definition: load_balance.c:364
The load-balance object represents an ECMP choice.
Definition: load_balance.h:44
vlib_node_registration_t l2_load_balance_node
(constructor) VLIB_REGISTER_NODE (l2_load_balance_node)
dpo_id_t lb_buckets_inline[LB_NUM_INLINE_BUCKETS]
The rest of the cache line is used for buckets.
Definition: load_balance.h:174
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:245
static void load_balance_fill_buckets_norm(load_balance_t *lb, load_balance_path_t *nhs, dpo_id_t *buckets, u32 n_buckets)
Definition: load_balance.c:531
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
#define UNFORMAT_END_OF_INPUT
Definition: format.h:144
#define hash_mix64(a0, b0, c0)
Definition: hash.h:531
svmdb_client_t * c
u16 n_vectors
Definition: node.h:397
static void vlib_get_combined_counter(const vlib_combined_counter_main_t *cm, u32 index, vlib_counter_t *result)
Get the value of a combined counter, never called in the speed path Scrapes the entire set of per-thr...
Definition: counter.h:278
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
static const char *const load_balance_nsh_nodes[]
Definition: load_balance.c:959
void load_balance_map_show_mem(void)
enum load_balance_attr_t_ load_balance_attr_t
Flags controlling load-balance creation and modification.
static f64 fabs(f64 x)
Definition: math.h:50
static uword max_pow2(uword x)
Definition: clib.h:244
static u8 * format_nsh_load_balance_trace(u8 *s, va_list *args)
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:298
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
#define LB_NUM_INLINE_BUCKETS
The number of buckets that a load-balance object can have and still fit in one cache-line.
Definition: load_balance.h:56
vlib_combined_counter_main_t lbm_via_counters
Definition: load_balance.h:47
enum fib_entry_flag_t_ fib_entry_flag_t
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
static uword nsh_load_balance(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:158
vlib_node_registration_t bier_load_balance_node
(constructor) VLIB_REGISTER_NODE (bier_load_balance_node)
vlib_node_registration_t nsh_load_balance_node
(constructor) VLIB_REGISTER_NODE (nsh_load_balance_node)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:511
#define ASSERT(truth)
index_t lb_urpf
This is the index of the uRPF list for this LB.
Definition: load_balance.h:156
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:696
static load_balance_t * load_balance_get(index_t lbi)
Definition: load_balance.h:220
static clib_error_t * load_balance_show(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
u32 lb_locks
The number of locks, which is approximately the number of users, of this load-balance.
Definition: load_balance.h:146
static u64 mac_to_u64(u8 *m)
#define LB_HAS_INLINE_BUCKETS(_lb)
Definition: load_balance.h:225
void load_balance_set_bucket(index_t lbi, u32 bucket, const dpo_id_t *next)
Definition: load_balance.c:281
u8 * format_dpo_id(u8 *s, va_list *args)
Format a DPO_id_t oject.
Definition: dpo.c:148
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static u32 ip6_compute_flow_hash(const ip6_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip6_inlines.h:49
enum load_balance_flags_t_ load_balance_flags_t
counter_t bytes
byte counter
Definition: counter_types.h:29
static uword l2_load_balance(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: defs.h:47
#define DPO_PROTO_NUM
Definition: dpo.h:70
A BIER header of variable length The encoding follows: https://tools.ietf.org/html/draft-ietf-bier-mp...
Definition: bier_types.h:321
static word flt_round_nearest(f64 x)
Definition: clib.h:301
void qsort(void *base, uword n, uword size, int(*compar)(const void *, const void *))
Definition: qsort.c:56
vl_api_address_t ip
Definition: l2.api:501
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:188
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 path_weight
weight for the path.
Definition: load_balance.h:76
#define INDEX_INVALID
Invalid index - used when no index is known blazoned capitals INVALID speak volumes where ~0 does not...
Definition: dpo.h:47
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
static void load_balance_destroy(load_balance_t *lb)
Definition: load_balance.c:872
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static uword load_balance_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_l2)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
char * name
The counter collection&#39;s name.
Definition: counter.h:212
void fib_urpf_list_unlock(index_t ui)
Definition: fib_urpf_list.c:68
One path from an [EU]CMP set that the client wants to add to a load-balance object.
Definition: load_balance.h:62
u8 * format_dpo_proto(u8 *s, va_list *args)
format a DPO protocol
Definition: dpo.c:178
static void load_balance_fill_buckets_sticky(load_balance_t *lb, load_balance_path_t *nhs, dpo_id_t *buckets, u32 n_buckets)
Definition: load_balance.c:555
static u32 l2_flow_hash(vlib_buffer_t *b0)
#define vnet_buffer(b)
Definition: buffer.h:417
index_t lb_map
index of the load-balance map, INVALID if this LB does not use one
Definition: load_balance.h:151
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
Definition: threads.c:1561
const f64 multipath_next_hop_error_tolerance
Definition: load_balance.c:31
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
Definition: ip_flow_hash.h:43
int dpo_is_drop(const dpo_id_t *dpo)
The Drop DPO will drop all packets, no questions asked.
Definition: drop_dpo.c:33
u32 ip_multipath_normalize_next_hops(const load_balance_path_t *raw_next_hops, load_balance_path_t **normalized_next_hops, u32 *sum_weight_in, f64 multipath_next_hop_error_tolerance)
Definition: load_balance.c:385
void dpo_reset(dpo_id_t *dpo)
reset a DPO ID The DPO will be unlocked.
Definition: dpo.c:232
#define vec_foreach(var, vec)
Vector iterator.
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:133
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:184
u8 ip_version_and_header_length
Definition: ip4_packet.h:93
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
struct load_balance_trace_t_ load_balance_trace_t
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:170
void dpo_stack(dpo_type_t child_type, dpo_proto_t child_proto, dpo_id_t *dpo, const dpo_id_t *parent)
Stack one DPO object on another, and thus establish a child-parent relationship.
Definition: dpo.c:521
static void load_balance_set_bucket_i(load_balance_t *lb, u32 bucket, dpo_id_t *buckets, const dpo_id_t *next)
Definition: load_balance.c:272
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:127