FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
sr_policy_rewrite.c
Go to the documentation of this file.
1 /*
2  * sr_policy_rewrite.c: ipv6 sr policy creation
3  *
4  * Copyright (c) 2016 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 /**
19  * @file
20  * @brief SR policy creation and application
21  *
22  * Create an SR policy.
23  * An SR policy can be either of 'default' type or 'spray' type
24  * An SR policy has attached a list of SID lists.
25  * In case the SR policy is a default one it will load balance among them.
26  * An SR policy has associated a BindingSID.
27  * In case any packet arrives with IPv6 DA == BindingSID then the SR policy
28  * associated to such bindingSID will be applied to such packet.
29  *
30  * SR policies can be applied either by using IPv6 encapsulation or
31  * SRH insertion. Both methods can be found on this file.
32  *
33  * Traffic input usually is IPv6 packets. However it is possible to have
34  * IPv4 packets or L2 frames. (that are encapsulated into IPv6 with SRH)
35  *
36  * This file provides the appropiates VPP graph nodes to do any of these
37  * methods.
38  *
39  */
40 
41 #include <vlib/vlib.h>
42 #include <vnet/vnet.h>
43 #include <vnet/srv6/sr.h>
44 #include <vnet/ip/ip.h>
45 #include <vnet/srv6/sr_packet.h>
46 #include <vnet/ip/ip6_packet.h>
47 #include <vnet/fib/ip6_fib.h>
48 #include <vnet/dpo/dpo.h>
49 #include <vnet/dpo/replicate_dpo.h>
50 
51 #include <vppinfra/error.h>
52 #include <vppinfra/elog.h>
53 
54 /**
55  * @brief SR policy rewrite trace
56  */
57 typedef struct
58 {
61 
62 /* Graph arcs */
63 #define foreach_sr_policy_rewrite_next \
64 _(IP6_LOOKUP, "ip6-lookup") \
65 _(ERROR, "error-drop")
66 
67 typedef enum
68 {
69 #define _(s,n) SR_POLICY_REWRITE_NEXT_##s,
71 #undef _
74 
75 /* SR rewrite errors */
76 #define foreach_sr_policy_rewrite_error \
77 _(INTERNAL_ERROR, "Segment Routing undefined error") \
78 _(BSID_ZERO, "BSID with SL = 0") \
79 _(COUNTER_TOTAL, "SR steered IPv6 packets") \
80 _(COUNTER_ENCAP, "SR: Encaps packets") \
81 _(COUNTER_INSERT, "SR: SRH inserted packets") \
82 _(COUNTER_BSID, "SR: BindingSID steered packets")
83 
84 typedef enum
85 {
86 #define _(sym,str) SR_POLICY_REWRITE_ERROR_##sym,
88 #undef _
91 
93 #define _(sym,string) string,
95 #undef _
96 };
97 
98 /**
99  * @brief Dynamically added SR SL DPO type
100  */
105 
106 /**
107  * @brief IPv6 SA for encapsulated packets
108  */
110 
111 /******************* SR rewrite set encaps IPv6 source addr *******************/
112 /* Note: This is temporal. We don't know whether to follow this path or
113  take the ip address of a loopback interface or even the OIF */
114 
115 void
117 {
118  clib_memcpy_fast (&sr_pr_encaps_src, address, sizeof (sr_pr_encaps_src));
119 }
120 
121 static clib_error_t *
123  vlib_cli_command_t * cmd)
124 {
126  {
127  if (unformat
128  (input, "addr %U", unformat_ip6_address, &sr_pr_encaps_src))
129  return 0;
130  else
131  return clib_error_return (0, "No address specified");
132  }
133  return clib_error_return (0, "No address specified");
134 }
135 
136 /* *INDENT-OFF* */
137 VLIB_CLI_COMMAND (set_sr_src_command, static) = {
138  .path = "set sr encaps source",
139  .short_help = "set sr encaps source addr <ip6_addr>",
140  .function = set_sr_src_command_fn,
141 };
142 /* *INDENT-ON* */
143 
144 /*********************** SR rewrite string computation ************************/
145 /**
146  * @brief SR rewrite string computation for IPv6 encapsulation (inline)
147  *
148  * @param sl is a vector of IPv6 addresses composing the Segment List
149  *
150  * @return precomputed rewrite string for encapsulation
151  */
152 static inline u8 *
154 {
155  ip6_header_t *iph;
156  ip6_sr_header_t *srh;
157  ip6_address_t *addrp, *this_address;
158  u32 header_length = 0;
159  u8 *rs = NULL;
160 
161  header_length = 0;
162  header_length += IPv6_DEFAULT_HEADER_LENGTH;
163  if (vec_len (sl) > 1)
164  {
165  header_length += sizeof (ip6_sr_header_t);
166  header_length += vec_len (sl) * sizeof (ip6_address_t);
167  }
168 
169  vec_validate (rs, header_length - 1);
170 
171  iph = (ip6_header_t *) rs;
173  clib_host_to_net_u32 (0 | ((6 & 0xF) << 28));
174  iph->src_address.as_u64[0] = sr_pr_encaps_src.as_u64[0];
175  iph->src_address.as_u64[1] = sr_pr_encaps_src.as_u64[1];
176  iph->payload_length = header_length - IPv6_DEFAULT_HEADER_LENGTH;
177  iph->protocol = IP_PROTOCOL_IPV6;
179 
180  if (vec_len (sl) > 1)
181  {
182  srh = (ip6_sr_header_t *) (iph + 1);
183  iph->protocol = IP_PROTOCOL_IPV6_ROUTE;
184  srh->protocol = IP_PROTOCOL_IPV6;
186  srh->segments_left = vec_len (sl) - 1;
187  srh->last_entry = vec_len (sl) - 1;
188  srh->length = ((sizeof (ip6_sr_header_t) +
189  (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1;
190  srh->flags = 0x00;
191  srh->tag = 0x0000;
192  addrp = srh->segments + vec_len (sl) - 1;
193  vec_foreach (this_address, sl)
194  {
195  clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
196  sizeof (ip6_address_t));
197  addrp--;
198  }
199  }
200  iph->dst_address.as_u64[0] = sl->as_u64[0];
201  iph->dst_address.as_u64[1] = sl->as_u64[1];
202  return rs;
203 }
204 
205 /**
206  * @brief SR rewrite string computation for SRH insertion (inline)
207  *
208  * @param sl is a vector of IPv6 addresses composing the Segment List
209  *
210  * @return precomputed rewrite string for SRH insertion
211  */
212 static inline u8 *
214 {
215  ip6_sr_header_t *srh;
216  ip6_address_t *addrp, *this_address;
217  u32 header_length = 0;
218  u8 *rs = NULL;
219 
220  header_length = 0;
221  header_length += sizeof (ip6_sr_header_t);
222  header_length += (vec_len (sl) + 1) * sizeof (ip6_address_t);
223 
224  vec_validate (rs, header_length - 1);
225 
226  srh = (ip6_sr_header_t *) rs;
228  srh->segments_left = vec_len (sl);
229  srh->last_entry = vec_len (sl);
230  srh->length = ((sizeof (ip6_sr_header_t) +
231  ((vec_len (sl) + 1) * sizeof (ip6_address_t))) / 8) - 1;
232  srh->flags = 0x00;
233  srh->tag = 0x0000;
234  addrp = srh->segments + vec_len (sl);
235  vec_foreach (this_address, sl)
236  {
237  clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
238  sizeof (ip6_address_t));
239  addrp--;
240  }
241  return rs;
242 }
243 
244 /**
245  * @brief SR rewrite string computation for SRH insertion with BSID (inline)
246  *
247  * @param sl is a vector of IPv6 addresses composing the Segment List
248  *
249  * @return precomputed rewrite string for SRH insertion with BSID
250  */
251 static inline u8 *
253 {
254  ip6_sr_header_t *srh;
255  ip6_address_t *addrp, *this_address;
256  u32 header_length = 0;
257  u8 *rs = NULL;
258 
259  header_length = 0;
260  header_length += sizeof (ip6_sr_header_t);
261  header_length += vec_len (sl) * sizeof (ip6_address_t);
262 
263  vec_validate (rs, header_length - 1);
264 
265  srh = (ip6_sr_header_t *) rs;
267  srh->segments_left = vec_len (sl) - 1;
268  srh->last_entry = vec_len (sl) - 1;
269  srh->length = ((sizeof (ip6_sr_header_t) +
270  (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1;
271  srh->flags = 0x00;
272  srh->tag = 0x0000;
273  addrp = srh->segments + vec_len (sl) - 1;
274  vec_foreach (this_address, sl)
275  {
276  clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
277  sizeof (ip6_address_t));
278  addrp--;
279  }
280  return rs;
281 }
282 
283 /*************************** SR LB helper functions **************************/
284 /**
285  * @brief Creates a Segment List and adds it to an SR policy
286  *
287  * Creates a Segment List and adds it to the SR policy. Notice that the SL are
288  * not necessarily unique. Hence there might be two Segment List within the
289  * same SR Policy with exactly the same segments and same weight.
290  *
291  * @param sr_policy is the SR policy where the SL will be added
292  * @param sl is a vector of IPv6 addresses composing the Segment List
293  * @param weight is the weight of the SegmentList (for load-balancing purposes)
294  * @param is_encap represents the mode (SRH insertion vs Encapsulation)
295  *
296  * @return pointer to the just created segment list
297  */
298 static inline ip6_sr_sl_t *
300  u8 is_encap)
301 {
302  ip6_sr_main_t *sm = &sr_main;
303  ip6_sr_sl_t *segment_list;
304 
305  pool_get (sm->sid_lists, segment_list);
306  clib_memset (segment_list, 0, sizeof (*segment_list));
307 
308  vec_add1 (sr_policy->segments_lists, segment_list - sm->sid_lists);
309 
310  /* Fill in segment list */
311  segment_list->weight =
312  (weight != (u32) ~ 0 ? weight : SR_SEGMENT_LIST_WEIGHT_DEFAULT);
313  segment_list->segments = vec_dup (sl);
314 
315  if (is_encap)
316  {
317  segment_list->rewrite = compute_rewrite_encaps (sl);
318  segment_list->rewrite_bsid = segment_list->rewrite;
319  }
320  else
321  {
322  segment_list->rewrite = compute_rewrite_insert (sl);
323  segment_list->rewrite_bsid = compute_rewrite_bsid (sl);
324  }
325 
326  /* Create DPO */
327  dpo_reset (&segment_list->bsid_dpo);
328  dpo_reset (&segment_list->ip6_dpo);
329  dpo_reset (&segment_list->ip4_dpo);
330 
331  if (is_encap)
332  {
334  segment_list - sm->sid_lists);
336  segment_list - sm->sid_lists);
338  DPO_PROTO_IP6, segment_list - sm->sid_lists);
339  }
340  else
341  {
343  segment_list - sm->sid_lists);
345  DPO_PROTO_IP6, segment_list - sm->sid_lists);
346  }
347 
348  return segment_list;
349 }
350 
351 /**
352  * @brief Updates the Load Balancer after an SR Policy change
353  *
354  * @param sr_policy is the modified SR Policy
355  */
356 static inline void
358 {
359  flow_hash_config_t fhc;
360  u32 *sl_index;
361  ip6_sr_sl_t *segment_list;
362  ip6_sr_main_t *sm = &sr_main;
363  load_balance_path_t path;
365  load_balance_path_t *ip4_path_vector = 0;
366  load_balance_path_t *ip6_path_vector = 0;
367  load_balance_path_t *b_path_vector = 0;
368 
369  /* In case LB does not exist, create it */
370  if (!dpo_id_is_valid (&sr_policy->bsid_dpo))
371  {
372  fib_prefix_t pfx = {
374  .fp_len = 128,
375  .fp_addr = {
376  .ip6 = sr_policy->bsid,
377  }
378  };
379 
380  /* Add FIB entry for BSID */
381  fhc = fib_table_get_flow_hash_config (sr_policy->fib_table,
383 
386 
389 
390  /* Update FIB entry's to point to the LB DPO in the main FIB and hidden one */
392  sr_policy->fib_table),
393  &pfx, FIB_SOURCE_SR,
395  &sr_policy->bsid_dpo);
396 
398  &pfx,
401  &sr_policy->ip6_dpo);
402 
403  if (sr_policy->is_encap)
404  {
407 
409  &pfx,
412  &sr_policy->ip4_dpo);
413  }
414 
415  }
416 
417  /* Create the LB path vector */
418  //path_vector = vec_new(load_balance_path_t, vec_len(sr_policy->segments_lists));
419  vec_foreach (sl_index, sr_policy->segments_lists)
420  {
421  segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
422  path.path_dpo = segment_list->bsid_dpo;
423  path.path_weight = segment_list->weight;
424  vec_add1 (b_path_vector, path);
425  path.path_dpo = segment_list->ip6_dpo;
426  vec_add1 (ip6_path_vector, path);
427  if (sr_policy->is_encap)
428  {
429  path.path_dpo = segment_list->ip4_dpo;
430  vec_add1 (ip4_path_vector, path);
431  }
432  }
433 
434  /* Update LB multipath */
435  load_balance_multipath_update (&sr_policy->bsid_dpo, b_path_vector,
437  load_balance_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector,
439  if (sr_policy->is_encap)
440  load_balance_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector,
442 
443  /* Cleanup */
444  vec_free (b_path_vector);
445  vec_free (ip6_path_vector);
446  vec_free (ip4_path_vector);
447 
448 }
449 
450 /**
451  * @brief Updates the Replicate DPO after an SR Policy change
452  *
453  * @param sr_policy is the modified SR Policy (type spray)
454  */
455 static inline void
457 {
458  u32 *sl_index;
459  ip6_sr_sl_t *segment_list;
460  ip6_sr_main_t *sm = &sr_main;
461  load_balance_path_t path;
463  load_balance_path_t *b_path_vector = 0;
464  load_balance_path_t *ip6_path_vector = 0;
465  load_balance_path_t *ip4_path_vector = 0;
466 
467  /* In case LB does not exist, create it */
468  if (!dpo_id_is_valid (&sr_policy->bsid_dpo))
469  {
470  dpo_set (&sr_policy->bsid_dpo, DPO_REPLICATE,
472 
473  dpo_set (&sr_policy->ip6_dpo, DPO_REPLICATE,
475 
476  /* Update FIB entry's DPO to point to SR without LB */
477  fib_prefix_t pfx = {
479  .fp_len = 128,
480  .fp_addr = {
481  .ip6 = sr_policy->bsid,
482  }
483  };
485  sr_policy->fib_table),
486  &pfx, FIB_SOURCE_SR,
488  &sr_policy->bsid_dpo);
489 
491  &pfx,
494  &sr_policy->ip6_dpo);
495 
496  if (sr_policy->is_encap)
497  {
500 
502  &pfx,
505  &sr_policy->ip4_dpo);
506  }
507 
508  }
509 
510  /* Create the replicate path vector */
511  path.path_weight = 1;
512  vec_foreach (sl_index, sr_policy->segments_lists)
513  {
514  segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
515  path.path_dpo = segment_list->bsid_dpo;
516  vec_add1 (b_path_vector, path);
517  path.path_dpo = segment_list->ip6_dpo;
518  vec_add1 (ip6_path_vector, path);
519  if (sr_policy->is_encap)
520  {
521  path.path_dpo = segment_list->ip4_dpo;
522  vec_add1 (ip4_path_vector, path);
523  }
524  }
525 
526  /* Update replicate multipath */
527  replicate_multipath_update (&sr_policy->bsid_dpo, b_path_vector);
528  replicate_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector);
529  if (sr_policy->is_encap)
530  replicate_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector);
531 }
532 
533 /******************************* SR rewrite API *******************************/
534 /* Three functions for handling sr policies:
535  * -> sr_policy_add
536  * -> sr_policy_del
537  * -> sr_policy_mod
538  * All of them are API. CLI function on sr_policy_command_fn */
539 
540 /**
541  * @brief Create a new SR policy
542  *
543  * @param bsid is the bindingSID of the SR Policy
544  * @param segments is a vector of IPv6 address composing the segment list
545  * @param weight is the weight of the sid list. optional.
546  * @param behavior is the behavior of the SR policy. (default//spray)
547  * @param fib_table is the VRF where to install the FIB entry for the BSID
548  * @param is_encap (bool) whether SR policy should behave as Encap/SRH Insertion
549  *
550  * @return 0 if correct, else error
551  */
552 int
554  u32 weight, u8 behavior, u32 fib_table, u8 is_encap)
555 {
556  ip6_sr_main_t *sm = &sr_main;
557  ip6_sr_policy_t *sr_policy = 0;
558  uword *p;
559 
560  /* Search for existing keys (BSID) */
561  p = mhash_get (&sm->sr_policies_index_hash, bsid);
562  if (p)
563  {
564  /* Add SR policy that already exists; complain */
565  return -12;
566  }
567 
568  /* Search collision in FIB entries */
569  /* Explanation: It might be possible that some other entity has already
570  * created a route for the BSID. This in theory is impossible, but in
571  * practise we could see it. Assert it and scream if needed */
572  fib_prefix_t pfx = {
574  .fp_len = 128,
575  .fp_addr = {
576  .ip6 = *bsid,
577  }
578  };
579 
580  /* Lookup the FIB index associated to the table selected */
581  u32 fib_index = fib_table_find (FIB_PROTOCOL_IP6,
582  (fib_table != (u32) ~ 0 ? fib_table : 0));
583  if (fib_index == ~0)
584  return -13;
585 
586  /* Lookup whether there exists an entry for the BSID */
587  fib_node_index_t fei = fib_table_lookup_exact_match (fib_index, &pfx);
588  if (FIB_NODE_INDEX_INVALID != fei)
589  return -12; //There is an entry for such lookup
590 
591  /* Add an SR policy object */
592  pool_get (sm->sr_policies, sr_policy);
593  clib_memset (sr_policy, 0, sizeof (*sr_policy));
594  clib_memcpy_fast (&sr_policy->bsid, bsid, sizeof (ip6_address_t));
595  sr_policy->type = behavior;
596  sr_policy->fib_table = (fib_table != (u32) ~ 0 ? fib_table : 0); //Is default FIB 0 ?
597  sr_policy->is_encap = is_encap;
598 
599  /* Copy the key */
600  mhash_set (&sm->sr_policies_index_hash, bsid, sr_policy - sm->sr_policies,
601  NULL);
602 
603  /* Create a segment list and add the index to the SR policy */
604  create_sl (sr_policy, segments, weight, is_encap);
605 
606  /* If FIB doesnt exist, create them */
607  if (sm->fib_table_ip6 == (u32) ~ 0)
608  {
611  "SRv6 steering of IP6 prefixes through BSIDs");
614  "SRv6 steering of IP4 prefixes through BSIDs");
615  }
616 
617  /* Create IPv6 FIB for the BindingSID attached to the DPO of the only SL */
618  if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
619  update_lb (sr_policy);
620  else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
621  update_replicate (sr_policy);
622  return 0;
623 }
624 
625 /**
626  * @brief Delete a SR policy
627  *
628  * @param bsid is the bindingSID of the SR Policy
629  * @param index is the index of the SR policy
630  *
631  * @return 0 if correct, else error
632  */
633 int
635 {
636  ip6_sr_main_t *sm = &sr_main;
637  ip6_sr_policy_t *sr_policy = 0;
638  ip6_sr_sl_t *segment_list;
639  u32 *sl_index;
640  uword *p;
641 
642  if (bsid)
643  {
644  p = mhash_get (&sm->sr_policies_index_hash, bsid);
645  if (p)
646  sr_policy = pool_elt_at_index (sm->sr_policies, p[0]);
647  else
648  return -1;
649  }
650  else
651  {
652  sr_policy = pool_elt_at_index (sm->sr_policies, index);
653  if (!sr_policy)
654  return -1;
655  }
656 
657  /* Remove BindingSID FIB entry */
658  fib_prefix_t pfx = {
660  .fp_len = 128,
661  .fp_addr = {
662  .ip6 = sr_policy->bsid,
663  }
664  ,
665  };
666 
668  sr_policy->fib_table),
669  &pfx, FIB_SOURCE_SR);
670 
672 
673  if (sr_policy->is_encap)
675 
676  if (dpo_id_is_valid (&sr_policy->bsid_dpo))
677  {
678  dpo_reset (&sr_policy->bsid_dpo);
679  dpo_reset (&sr_policy->ip4_dpo);
680  dpo_reset (&sr_policy->ip6_dpo);
681  }
682 
683  /* Clean SID Lists */
684  vec_foreach (sl_index, sr_policy->segments_lists)
685  {
686  segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
687  vec_free (segment_list->segments);
688  vec_free (segment_list->rewrite);
689  if (!sr_policy->is_encap)
690  vec_free (segment_list->rewrite_bsid);
691  pool_put_index (sm->sid_lists, *sl_index);
692  }
693 
694  /* Remove SR policy entry */
695  mhash_unset (&sm->sr_policies_index_hash, &sr_policy->bsid, NULL);
696  pool_put (sm->sr_policies, sr_policy);
697 
698  /* If FIB empty unlock it */
699  if (!pool_elts (sm->sr_policies) && !pool_elts (sm->steer_policies))
700  {
703  sm->fib_table_ip6 = (u32) ~ 0;
704  sm->fib_table_ip4 = (u32) ~ 0;
705  }
706 
707  return 0;
708 }
709 
710 /**
711  * @brief Modify an existing SR policy
712  *
713  * The possible modifications are adding a new Segment List, modifying an
714  * existing Segment List (modify the weight only) and delete a given
715  * Segment List from the SR Policy.
716  *
717  * @param bsid is the bindingSID of the SR Policy
718  * @param index is the index of the SR policy
719  * @param fib_table is the VRF where to install the FIB entry for the BSID
720  * @param operation is the operation to perform (among the top ones)
721  * @param segments is a vector of IPv6 address composing the segment list
722  * @param sl_index is the index of the Segment List to modify/delete
723  * @param weight is the weight of the sid list. optional.
724  * @param is_encap Mode. Encapsulation or SRH insertion.
725  *
726  * @return 0 if correct, else error
727  */
728 int
729 sr_policy_mod (ip6_address_t * bsid, u32 index, u32 fib_table,
730  u8 operation, ip6_address_t * segments, u32 sl_index,
731  u32 weight)
732 {
733  ip6_sr_main_t *sm = &sr_main;
734  ip6_sr_policy_t *sr_policy = 0;
735  ip6_sr_sl_t *segment_list;
736  u32 *sl_index_iterate;
737  uword *p;
738 
739  if (bsid)
740  {
741  p = mhash_get (&sm->sr_policies_index_hash, bsid);
742  if (p)
743  sr_policy = pool_elt_at_index (sm->sr_policies, p[0]);
744  else
745  return -1;
746  }
747  else
748  {
749  sr_policy = pool_elt_at_index (sm->sr_policies, index);
750  if (!sr_policy)
751  return -1;
752  }
753 
754  if (operation == 1) /* Add SR List to an existing SR policy */
755  {
756  /* Create the new SL */
757  segment_list =
758  create_sl (sr_policy, segments, weight, sr_policy->is_encap);
759 
760  /* Create a new LB DPO */
761  if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
762  update_lb (sr_policy);
763  else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
764  update_replicate (sr_policy);
765  }
766  else if (operation == 2) /* Delete SR List from an existing SR policy */
767  {
768  /* Check that currently there are more than one SID list */
769  if (vec_len (sr_policy->segments_lists) == 1)
770  return -21;
771 
772  /* Check that the SR list does exist and is assigned to the sr policy */
773  vec_foreach (sl_index_iterate, sr_policy->segments_lists)
774  if (*sl_index_iterate == sl_index)
775  break;
776 
777  if (*sl_index_iterate != sl_index)
778  return -22;
779 
780  /* Remove the lucky SR list that is being kicked out */
781  segment_list = pool_elt_at_index (sm->sid_lists, sl_index);
782  vec_free (segment_list->segments);
783  vec_free (segment_list->rewrite);
784  if (!sr_policy->is_encap)
785  vec_free (segment_list->rewrite_bsid);
786  pool_put_index (sm->sid_lists, sl_index);
787  vec_del1 (sr_policy->segments_lists,
788  sl_index_iterate - sr_policy->segments_lists);
789 
790  /* Create a new LB DPO */
791  if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
792  update_lb (sr_policy);
793  else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
794  update_replicate (sr_policy);
795  }
796  else if (operation == 3) /* Modify the weight of an existing SR List */
797  {
798  /* Find the corresponding SL */
799  vec_foreach (sl_index_iterate, sr_policy->segments_lists)
800  if (*sl_index_iterate == sl_index)
801  break;
802 
803  if (*sl_index_iterate != sl_index)
804  return -32;
805 
806  /* Change the weight */
807  segment_list = pool_elt_at_index (sm->sid_lists, sl_index);
808  segment_list->weight = weight;
809 
810  /* Update LB */
811  if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
812  update_lb (sr_policy);
813  }
814  else /* Incorrect op. */
815  return -1;
816 
817  return 0;
818 }
819 
820 /**
821  * @brief CLI for 'sr policies' command family
822  */
823 static clib_error_t *
825  vlib_cli_command_t * cmd)
826 {
827  int rv = -1;
828  char is_del = 0, is_add = 0, is_mod = 0;
829  char policy_set = 0;
830  ip6_address_t bsid, next_address;
831  u32 sr_policy_index = (u32) ~ 0, sl_index = (u32) ~ 0;
832  u32 weight = (u32) ~ 0, fib_table = (u32) ~ 0;
833  ip6_address_t *segments = 0, *this_seg;
834  u8 operation = 0;
835  char is_encap = 1;
836  char is_spray = 0;
837 
839  {
840  if (!is_add && !is_mod && !is_del && unformat (input, "add"))
841  is_add = 1;
842  else if (!is_add && !is_mod && !is_del && unformat (input, "del"))
843  is_del = 1;
844  else if (!is_add && !is_mod && !is_del && unformat (input, "mod"))
845  is_mod = 1;
846  else if (!policy_set
847  && unformat (input, "bsid %U", unformat_ip6_address, &bsid))
848  policy_set = 1;
849  else if (!is_add && !policy_set
850  && unformat (input, "index %d", &sr_policy_index))
851  policy_set = 1;
852  else if (unformat (input, "weight %d", &weight));
853  else
854  if (unformat (input, "next %U", unformat_ip6_address, &next_address))
855  {
856  vec_add2 (segments, this_seg, 1);
857  clib_memcpy_fast (this_seg->as_u8, next_address.as_u8,
858  sizeof (*this_seg));
859  }
860  else if (unformat (input, "add sl"))
861  operation = 1;
862  else if (unformat (input, "del sl index %d", &sl_index))
863  operation = 2;
864  else if (unformat (input, "mod sl index %d", &sl_index))
865  operation = 3;
866  else if (fib_table == (u32) ~ 0
867  && unformat (input, "fib-table %d", &fib_table));
868  else if (unformat (input, "encap"))
869  is_encap = 1;
870  else if (unformat (input, "insert"))
871  is_encap = 0;
872  else if (unformat (input, "spray"))
873  is_spray = 1;
874  else
875  break;
876  }
877 
878  if (!is_add && !is_mod && !is_del)
879  return clib_error_return (0, "Incorrect CLI");
880 
881  if (!policy_set)
882  return clib_error_return (0, "No SR policy BSID or index specified");
883 
884  if (is_add)
885  {
886  if (vec_len (segments) == 0)
887  return clib_error_return (0, "No Segment List specified");
888  rv = sr_policy_add (&bsid, segments, weight,
889  (is_spray ? SR_POLICY_TYPE_SPRAY :
890  SR_POLICY_TYPE_DEFAULT), fib_table, is_encap);
891  vec_free (segments);
892  }
893  else if (is_del)
894  rv = sr_policy_del ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid),
895  sr_policy_index);
896  else if (is_mod)
897  {
898  if (!operation)
899  return clib_error_return (0, "No SL modification specified");
900  if (operation != 1 && sl_index == (u32) ~ 0)
901  return clib_error_return (0, "No Segment List index specified");
902  if (operation == 1 && vec_len (segments) == 0)
903  return clib_error_return (0, "No Segment List specified");
904  if (operation == 3 && weight == (u32) ~ 0)
905  return clib_error_return (0, "No new weight for the SL specified");
906  rv = sr_policy_mod ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid),
907  sr_policy_index, fib_table, operation, segments,
908  sl_index, weight);
909  vec_free (segments);
910  }
911 
912  switch (rv)
913  {
914  case 0:
915  break;
916  case 1:
917  return 0;
918  case -12:
919  return clib_error_return (0,
920  "There is already a FIB entry for the BindingSID address.\n"
921  "The SR policy could not be created.");
922  case -13:
923  return clib_error_return (0, "The specified FIB table does not exist.");
924  case -21:
925  return clib_error_return (0,
926  "The selected SR policy only contains ONE segment list. "
927  "Please remove the SR policy instead");
928  case -22:
929  return clib_error_return (0,
930  "Could not delete the segment list. "
931  "It is not associated with that SR policy.");
932  case -32:
933  return clib_error_return (0,
934  "Could not modify the segment list. "
935  "The given SL is not associated with such SR policy.");
936  default:
937  return clib_error_return (0, "BUG: sr policy returns %d", rv);
938  }
939  return 0;
940 }
941 
942 /* *INDENT-OFF* */
943 VLIB_CLI_COMMAND (sr_policy_command, static) = {
944  .path = "sr policy",
945  .short_help = "sr policy [add||del||mod] [bsid 2001::1||index 5] "
946  "next A:: next B:: next C:: (weight 1) (fib-table 2) (encap|insert)",
947  .long_help =
948  "Manipulation of SR policies.\n"
949  "A Segment Routing policy may contain several SID lists. Each SID list has\n"
950  "an associated weight (default 1), which will result in wECMP (uECMP).\n"
951  "Segment Routing policies might be of type encapsulation or srh insertion\n"
952  "Each SR policy will be associated with a unique BindingSID.\n"
953  "A BindingSID is a locally allocated SegmentID. For every packet that arrives\n"
954  "with IPv6_DA:BSID such traffic will be steered into the SR policy.\n"
955  "The add command will create a SR policy with its first segment list (sl)\n"
956  "The mod command allows you to add, remove, or modify the existing segment lists\n"
957  "within an SR policy.\n"
958  "The del command allows you to delete a SR policy along with all its associated\n"
959  "SID lists.\n",
960  .function = sr_policy_command_fn,
961 };
962 /* *INDENT-ON* */
963 
964 /**
965  * @brief CLI to display onscreen all the SR policies
966  */
967 static clib_error_t *
969  vlib_cli_command_t * cmd)
970 {
971  ip6_sr_main_t *sm = &sr_main;
972  u32 *sl_index;
973  ip6_sr_sl_t *segment_list = 0;
974  ip6_sr_policy_t *sr_policy = 0;
975  ip6_sr_policy_t **vec_policies = 0;
977  u8 *s;
978  int i = 0;
979 
980  vlib_cli_output (vm, "SR policies:");
981 
982  /* *INDENT-OFF* */
983  pool_foreach (sr_policy, sm->sr_policies,
984  {vec_add1 (vec_policies, sr_policy); } );
985  /* *INDENT-ON* */
986 
987  vec_foreach_index (i, vec_policies)
988  {
989  sr_policy = vec_policies[i];
990  vlib_cli_output (vm, "[%u].-\tBSID: %U",
991  (u32) (sr_policy - sm->sr_policies),
992  format_ip6_address, &sr_policy->bsid);
993  vlib_cli_output (vm, "\tBehavior: %s",
994  (sr_policy->is_encap ? "Encapsulation" :
995  "SRH insertion"));
996  vlib_cli_output (vm, "\tType: %s",
997  (sr_policy->type ==
998  SR_POLICY_TYPE_DEFAULT ? "Default" : "Spray"));
999  vlib_cli_output (vm, "\tFIB table: %u",
1000  (sr_policy->fib_table !=
1001  (u32) ~ 0 ? sr_policy->fib_table : 0));
1002  vlib_cli_output (vm, "\tSegment Lists:");
1003  vec_foreach (sl_index, sr_policy->segments_lists)
1004  {
1005  s = NULL;
1006  s = format (s, "\t[%u].- ", *sl_index);
1007  segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
1008  s = format (s, "< ");
1009  vec_foreach (addr, segment_list->segments)
1010  {
1011  s = format (s, "%U, ", format_ip6_address, addr);
1012  }
1013  s = format (s, "\b\b > ");
1014  s = format (s, "weight: %u", segment_list->weight);
1015  vlib_cli_output (vm, " %v", s);
1016  }
1017  vlib_cli_output (vm, "-----------");
1018  }
1019  return 0;
1020 }
1021 
1022 /* *INDENT-OFF* */
1023 VLIB_CLI_COMMAND (show_sr_policies_command, static) = {
1024  .path = "show sr policies",
1025  .short_help = "show sr policies",
1026  .function = show_sr_policies_command_fn,
1027 };
1028 /* *INDENT-ON* */
1029 
1030 /*************************** SR rewrite graph node ****************************/
1031 /**
1032  * @brief Trace for the SR Policy Rewrite graph node
1033  */
1034 static u8 *
1035 format_sr_policy_rewrite_trace (u8 * s, va_list * args)
1036 {
1037  //TODO
1038  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1039  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1041 
1042  s = format
1043  (s, "SR-policy-rewrite: src %U dst %U",
1045 
1046  return s;
1047 }
1048 
1049 /**
1050  * @brief IPv6 encapsulation processing as per RFC2473
1051  */
1054  vlib_buffer_t * b0,
1055  ip6_header_t * ip0, ip6_header_t * ip0_encap)
1056 {
1057  u32 new_l0;
1058 
1059  ip0_encap->hop_limit -= 1;
1060  new_l0 =
1061  ip0->payload_length + sizeof (ip6_header_t) +
1062  clib_net_to_host_u16 (ip0_encap->payload_length);
1063  ip0->payload_length = clib_host_to_net_u16 (new_l0);
1066 }
1067 
1068 /**
1069  * @brief Graph node for applying a SR policy into an IPv6 packet. Encapsulation
1070  */
1071 static uword
1073  vlib_frame_t * from_frame)
1074 {
1075  ip6_sr_main_t *sm = &sr_main;
1076  u32 n_left_from, next_index, *from, *to_next;
1077 
1078  from = vlib_frame_vector_args (from_frame);
1079  n_left_from = from_frame->n_vectors;
1080 
1081  next_index = node->cached_next_index;
1082 
1083  int encap_pkts = 0, bsid_pkts = 0;
1084 
1085  while (n_left_from > 0)
1086  {
1087  u32 n_left_to_next;
1088 
1089  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1090 
1091  /* Quad - Loop */
1092  while (n_left_from >= 8 && n_left_to_next >= 4)
1093  {
1094  u32 bi0, bi1, bi2, bi3;
1095  vlib_buffer_t *b0, *b1, *b2, *b3;
1096  u32 next0, next1, next2, next3;
1097  next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1098  ip6_header_t *ip0, *ip1, *ip2, *ip3;
1099  ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
1100  ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1101 
1102  /* Prefetch next iteration. */
1103  {
1104  vlib_buffer_t *p4, *p5, *p6, *p7;
1105 
1106  p4 = vlib_get_buffer (vm, from[4]);
1107  p5 = vlib_get_buffer (vm, from[5]);
1108  p6 = vlib_get_buffer (vm, from[6]);
1109  p7 = vlib_get_buffer (vm, from[7]);
1110 
1111  /* Prefetch the buffer header and packet for the N+2 loop iteration */
1112  vlib_prefetch_buffer_header (p4, LOAD);
1113  vlib_prefetch_buffer_header (p5, LOAD);
1114  vlib_prefetch_buffer_header (p6, LOAD);
1115  vlib_prefetch_buffer_header (p7, LOAD);
1116 
1121  }
1122 
1123  to_next[0] = bi0 = from[0];
1124  to_next[1] = bi1 = from[1];
1125  to_next[2] = bi2 = from[2];
1126  to_next[3] = bi3 = from[3];
1127  from += 4;
1128  to_next += 4;
1129  n_left_from -= 4;
1130  n_left_to_next -= 4;
1131 
1132  b0 = vlib_get_buffer (vm, bi0);
1133  b1 = vlib_get_buffer (vm, bi1);
1134  b2 = vlib_get_buffer (vm, bi2);
1135  b3 = vlib_get_buffer (vm, bi3);
1136 
1137  sl0 =
1139  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1140  sl1 =
1142  vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1143  sl2 =
1145  vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1146  sl3 =
1148  vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1149 
1151  vec_len (sl0->rewrite));
1153  vec_len (sl1->rewrite));
1155  vec_len (sl2->rewrite));
1157  vec_len (sl3->rewrite));
1158 
1159  ip0_encap = vlib_buffer_get_current (b0);
1160  ip1_encap = vlib_buffer_get_current (b1);
1161  ip2_encap = vlib_buffer_get_current (b2);
1162  ip3_encap = vlib_buffer_get_current (b3);
1163 
1164  clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1165  sl0->rewrite, vec_len (sl0->rewrite));
1166  clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
1167  sl1->rewrite, vec_len (sl1->rewrite));
1168  clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
1169  sl2->rewrite, vec_len (sl2->rewrite));
1170  clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
1171  sl3->rewrite, vec_len (sl3->rewrite));
1172 
1173  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1174  vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1175  vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1176  vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1177 
1178  ip0 = vlib_buffer_get_current (b0);
1179  ip1 = vlib_buffer_get_current (b1);
1180  ip2 = vlib_buffer_get_current (b2);
1181  ip3 = vlib_buffer_get_current (b3);
1182 
1183  encaps_processing_v6 (node, b0, ip0, ip0_encap);
1184  encaps_processing_v6 (node, b1, ip1, ip1_encap);
1185  encaps_processing_v6 (node, b2, ip2, ip2_encap);
1186  encaps_processing_v6 (node, b3, ip3, ip3_encap);
1187 
1188  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1189  {
1190  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1191  {
1193  vlib_add_trace (vm, node, b0, sizeof (*tr));
1195  sizeof (tr->src.as_u8));
1197  sizeof (tr->dst.as_u8));
1198  }
1199 
1200  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1201  {
1203  vlib_add_trace (vm, node, b1, sizeof (*tr));
1205  sizeof (tr->src.as_u8));
1207  sizeof (tr->dst.as_u8));
1208  }
1209 
1210  if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1211  {
1213  vlib_add_trace (vm, node, b2, sizeof (*tr));
1215  sizeof (tr->src.as_u8));
1217  sizeof (tr->dst.as_u8));
1218  }
1219 
1220  if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1221  {
1223  vlib_add_trace (vm, node, b3, sizeof (*tr));
1225  sizeof (tr->src.as_u8));
1227  sizeof (tr->dst.as_u8));
1228  }
1229  }
1230 
1231  encap_pkts += 4;
1232  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1233  n_left_to_next, bi0, bi1, bi2, bi3,
1234  next0, next1, next2, next3);
1235  }
1236 
1237  /* Single loop for potentially the last three packets */
1238  while (n_left_from > 0 && n_left_to_next > 0)
1239  {
1240  u32 bi0;
1241  vlib_buffer_t *b0;
1242  ip6_header_t *ip0 = 0, *ip0_encap = 0;
1243  ip6_sr_sl_t *sl0;
1244  u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1245 
1246  bi0 = from[0];
1247  to_next[0] = bi0;
1248  from += 1;
1249  to_next += 1;
1250  n_left_from -= 1;
1251  n_left_to_next -= 1;
1252  b0 = vlib_get_buffer (vm, bi0);
1253 
1254  sl0 =
1256  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1258  vec_len (sl0->rewrite));
1259 
1260  ip0_encap = vlib_buffer_get_current (b0);
1261 
1262  clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1263  sl0->rewrite, vec_len (sl0->rewrite));
1264  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1265 
1266  ip0 = vlib_buffer_get_current (b0);
1267 
1268  encaps_processing_v6 (node, b0, ip0, ip0_encap);
1269 
1270  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1271  PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1272  {
1274  vlib_add_trace (vm, node, b0, sizeof (*tr));
1276  sizeof (tr->src.as_u8));
1278  sizeof (tr->dst.as_u8));
1279  }
1280 
1281  encap_pkts++;
1282  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1283  n_left_to_next, bi0, next0);
1284  }
1285 
1286  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1287  }
1288 
1289  /* Update counters */
1291  SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
1292  encap_pkts);
1294  SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
1295  bsid_pkts);
1296 
1297  return from_frame->n_vectors;
1298 }
1299 
1300 /* *INDENT-OFF* */
1302  .function = sr_policy_rewrite_encaps,
1303  .name = "sr-pl-rewrite-encaps",
1304  .vector_size = sizeof (u32),
1305  .format_trace = format_sr_policy_rewrite_trace,
1307  .n_errors = SR_POLICY_REWRITE_N_ERROR,
1308  .error_strings = sr_policy_rewrite_error_strings,
1309  .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
1310  .next_nodes = {
1311 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
1313 #undef _
1314  },
1315 };
1316 /* *INDENT-ON* */
1317 
1318 /**
1319  * @brief IPv4 encapsulation processing as per RFC2473
1320  */
1323  vlib_buffer_t * b0,
1324  ip6_header_t * ip0, ip4_header_t * ip0_encap)
1325 {
1326  u32 new_l0;
1327  ip6_sr_header_t *sr0;
1328 
1329  u32 checksum0;
1330 
1331  /* Inner IPv4: Decrement TTL & update checksum */
1332  ip0_encap->ttl -= 1;
1333  checksum0 = ip0_encap->checksum + clib_host_to_net_u16 (0x0100);
1334  checksum0 += checksum0 >= 0xffff;
1335  ip0_encap->checksum = checksum0;
1336 
1337  /* Outer IPv6: Update length, FL, proto */
1338  new_l0 = ip0->payload_length + clib_net_to_host_u16 (ip0_encap->length);
1339  ip0->payload_length = clib_host_to_net_u16 (new_l0);
1341  clib_host_to_net_u32 (0 | ((6 & 0xF) << 28) |
1342  ((ip0_encap->tos & 0xFF) << 20));
1343  if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1344  {
1345  sr0 = (void *) (ip0 + 1);
1346  sr0->protocol = IP_PROTOCOL_IP_IN_IP;
1347  }
1348  else
1349  ip0->protocol = IP_PROTOCOL_IP_IN_IP;
1350 }
1351 
1352 /**
1353  * @brief Graph node for applying a SR policy into an IPv4 packet. Encapsulation
1354  */
1355 static uword
1357  vlib_frame_t * from_frame)
1358 {
1359  ip6_sr_main_t *sm = &sr_main;
1360  u32 n_left_from, next_index, *from, *to_next;
1361 
1362  from = vlib_frame_vector_args (from_frame);
1363  n_left_from = from_frame->n_vectors;
1364 
1365  next_index = node->cached_next_index;
1366 
1367  int encap_pkts = 0, bsid_pkts = 0;
1368 
1369  while (n_left_from > 0)
1370  {
1371  u32 n_left_to_next;
1372 
1373  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1374 
1375  /* Quad - Loop */
1376  while (n_left_from >= 8 && n_left_to_next >= 4)
1377  {
1378  u32 bi0, bi1, bi2, bi3;
1379  vlib_buffer_t *b0, *b1, *b2, *b3;
1380  u32 next0, next1, next2, next3;
1381  next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1382  ip6_header_t *ip0, *ip1, *ip2, *ip3;
1383  ip4_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
1384  ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1385 
1386  /* Prefetch next iteration. */
1387  {
1388  vlib_buffer_t *p4, *p5, *p6, *p7;
1389 
1390  p4 = vlib_get_buffer (vm, from[4]);
1391  p5 = vlib_get_buffer (vm, from[5]);
1392  p6 = vlib_get_buffer (vm, from[6]);
1393  p7 = vlib_get_buffer (vm, from[7]);
1394 
1395  /* Prefetch the buffer header and packet for the N+2 loop iteration */
1396  vlib_prefetch_buffer_header (p4, LOAD);
1397  vlib_prefetch_buffer_header (p5, LOAD);
1398  vlib_prefetch_buffer_header (p6, LOAD);
1399  vlib_prefetch_buffer_header (p7, LOAD);
1400 
1405  }
1406 
1407  to_next[0] = bi0 = from[0];
1408  to_next[1] = bi1 = from[1];
1409  to_next[2] = bi2 = from[2];
1410  to_next[3] = bi3 = from[3];
1411  from += 4;
1412  to_next += 4;
1413  n_left_from -= 4;
1414  n_left_to_next -= 4;
1415 
1416  b0 = vlib_get_buffer (vm, bi0);
1417  b1 = vlib_get_buffer (vm, bi1);
1418  b2 = vlib_get_buffer (vm, bi2);
1419  b3 = vlib_get_buffer (vm, bi3);
1420 
1421  sl0 =
1423  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1424  sl1 =
1426  vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1427  sl2 =
1429  vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1430  sl3 =
1432  vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1434  vec_len (sl0->rewrite));
1436  vec_len (sl1->rewrite));
1438  vec_len (sl2->rewrite));
1440  vec_len (sl3->rewrite));
1441 
1442  ip0_encap = vlib_buffer_get_current (b0);
1443  ip1_encap = vlib_buffer_get_current (b1);
1444  ip2_encap = vlib_buffer_get_current (b2);
1445  ip3_encap = vlib_buffer_get_current (b3);
1446 
1447  clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1448  sl0->rewrite, vec_len (sl0->rewrite));
1449  clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
1450  sl1->rewrite, vec_len (sl1->rewrite));
1451  clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
1452  sl2->rewrite, vec_len (sl2->rewrite));
1453  clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
1454  sl3->rewrite, vec_len (sl3->rewrite));
1455 
1456  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1457  vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1458  vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1459  vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1460 
1461  ip0 = vlib_buffer_get_current (b0);
1462  ip1 = vlib_buffer_get_current (b1);
1463  ip2 = vlib_buffer_get_current (b2);
1464  ip3 = vlib_buffer_get_current (b3);
1465 
1466  encaps_processing_v4 (node, b0, ip0, ip0_encap);
1467  encaps_processing_v4 (node, b1, ip1, ip1_encap);
1468  encaps_processing_v4 (node, b2, ip2, ip2_encap);
1469  encaps_processing_v4 (node, b3, ip3, ip3_encap);
1470 
1471  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1472  {
1473  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1474  {
1476  vlib_add_trace (vm, node, b0, sizeof (*tr));
1478  sizeof (tr->src.as_u8));
1480  sizeof (tr->dst.as_u8));
1481  }
1482 
1483  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1484  {
1486  vlib_add_trace (vm, node, b1, sizeof (*tr));
1488  sizeof (tr->src.as_u8));
1490  sizeof (tr->dst.as_u8));
1491  }
1492 
1493  if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1494  {
1496  vlib_add_trace (vm, node, b2, sizeof (*tr));
1498  sizeof (tr->src.as_u8));
1500  sizeof (tr->dst.as_u8));
1501  }
1502 
1503  if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1504  {
1506  vlib_add_trace (vm, node, b3, sizeof (*tr));
1508  sizeof (tr->src.as_u8));
1510  sizeof (tr->dst.as_u8));
1511  }
1512  }
1513 
1514  encap_pkts += 4;
1515  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1516  n_left_to_next, bi0, bi1, bi2, bi3,
1517  next0, next1, next2, next3);
1518  }
1519 
1520  /* Single loop for potentially the last three packets */
1521  while (n_left_from > 0 && n_left_to_next > 0)
1522  {
1523  u32 bi0;
1524  vlib_buffer_t *b0;
1525  ip6_header_t *ip0 = 0;
1526  ip4_header_t *ip0_encap = 0;
1527  ip6_sr_sl_t *sl0;
1528  u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1529 
1530  bi0 = from[0];
1531  to_next[0] = bi0;
1532  from += 1;
1533  to_next += 1;
1534  n_left_from -= 1;
1535  n_left_to_next -= 1;
1536  b0 = vlib_get_buffer (vm, bi0);
1537 
1538  sl0 =
1540  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1542  vec_len (sl0->rewrite));
1543 
1544  ip0_encap = vlib_buffer_get_current (b0);
1545 
1546  clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1547  sl0->rewrite, vec_len (sl0->rewrite));
1548  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1549 
1550  ip0 = vlib_buffer_get_current (b0);
1551 
1552  encaps_processing_v4 (node, b0, ip0, ip0_encap);
1553 
1554  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1555  PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1556  {
1558  vlib_add_trace (vm, node, b0, sizeof (*tr));
1560  sizeof (tr->src.as_u8));
1562  sizeof (tr->dst.as_u8));
1563  }
1564 
1565  encap_pkts++;
1566  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1567  n_left_to_next, bi0, next0);
1568  }
1569 
1570  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1571  }
1572 
1573  /* Update counters */
1575  SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
1576  encap_pkts);
1578  SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
1579  bsid_pkts);
1580 
1581  return from_frame->n_vectors;
1582 }
1583 
1584 /* *INDENT-OFF* */
1586  .function = sr_policy_rewrite_encaps_v4,
1587  .name = "sr-pl-rewrite-encaps-v4",
1588  .vector_size = sizeof (u32),
1589  .format_trace = format_sr_policy_rewrite_trace,
1591  .n_errors = SR_POLICY_REWRITE_N_ERROR,
1592  .error_strings = sr_policy_rewrite_error_strings,
1593  .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
1594  .next_nodes = {
1595 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
1597 #undef _
1598  },
1599 };
1600 /* *INDENT-ON* */
1601 
1604 {
1605  ip4_header_t *iph = (ip4_header_t *) data;
1606 
1607  if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1609  else
1611 }
1612 
1615 {
1616  return (*((u64 *) m) & 0xffffffffffff);
1617 }
1618 
1621 {
1622  ethernet_header_t *eh;
1623  u64 a, b, c;
1624  uword is_ip, eh_size;
1625  u16 eh_type;
1626 
1627  eh = vlib_buffer_get_current (b0);
1628  eh_type = clib_net_to_host_u16 (eh->type);
1629  eh_size = ethernet_buffer_header_size (b0);
1630 
1631  is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1632 
1633  /* since we have 2 cache lines, use them */
1634  if (is_ip)
1635  a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1636  else
1637  a = eh->type;
1638 
1639  b = mac_to_u64 ((u8 *) eh->dst_address);
1640  c = mac_to_u64 ((u8 *) eh->src_address);
1641  hash_mix64 (a, b, c);
1642 
1643  return (u32) c;
1644 }
1645 
1646 /**
1647  * @brief Graph node for applying a SR policy into a L2 frame
1648  */
1649 static uword
1651  vlib_frame_t * from_frame)
1652 {
1653  ip6_sr_main_t *sm = &sr_main;
1654  u32 n_left_from, next_index, *from, *to_next;
1655 
1656  from = vlib_frame_vector_args (from_frame);
1657  n_left_from = from_frame->n_vectors;
1658 
1659  next_index = node->cached_next_index;
1660 
1661  int encap_pkts = 0, bsid_pkts = 0;
1662 
1663  while (n_left_from > 0)
1664  {
1665  u32 n_left_to_next;
1666 
1667  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1668 
1669  /* Quad - Loop */
1670  while (n_left_from >= 8 && n_left_to_next >= 4)
1671  {
1672  u32 bi0, bi1, bi2, bi3;
1673  vlib_buffer_t *b0, *b1, *b2, *b3;
1674  u32 next0, next1, next2, next3;
1675  next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1676  ethernet_header_t *en0, *en1, *en2, *en3;
1677  ip6_header_t *ip0, *ip1, *ip2, *ip3;
1678  ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
1679  ip6_sr_policy_t *sp0, *sp1, *sp2, *sp3;
1680  ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1681 
1682  /* Prefetch next iteration. */
1683  {
1684  vlib_buffer_t *p4, *p5, *p6, *p7;
1685 
1686  p4 = vlib_get_buffer (vm, from[4]);
1687  p5 = vlib_get_buffer (vm, from[5]);
1688  p6 = vlib_get_buffer (vm, from[6]);
1689  p7 = vlib_get_buffer (vm, from[7]);
1690 
1691  /* Prefetch the buffer header and packet for the N+2 loop iteration */
1692  vlib_prefetch_buffer_header (p4, LOAD);
1693  vlib_prefetch_buffer_header (p5, LOAD);
1694  vlib_prefetch_buffer_header (p6, LOAD);
1695  vlib_prefetch_buffer_header (p7, LOAD);
1696 
1701  }
1702 
1703  to_next[0] = bi0 = from[0];
1704  to_next[1] = bi1 = from[1];
1705  to_next[2] = bi2 = from[2];
1706  to_next[3] = bi3 = from[3];
1707  from += 4;
1708  to_next += 4;
1709  n_left_from -= 4;
1710  n_left_to_next -= 4;
1711 
1712  b0 = vlib_get_buffer (vm, bi0);
1713  b1 = vlib_get_buffer (vm, bi1);
1714  b2 = vlib_get_buffer (vm, bi2);
1715  b3 = vlib_get_buffer (vm, bi3);
1716 
1717  sp0 = pool_elt_at_index (sm->sr_policies,
1719  (b0)->sw_if_index
1720  [VLIB_RX]]);
1721 
1722  sp1 = pool_elt_at_index (sm->sr_policies,
1724  (b1)->sw_if_index
1725  [VLIB_RX]]);
1726 
1727  sp2 = pool_elt_at_index (sm->sr_policies,
1729  (b2)->sw_if_index
1730  [VLIB_RX]]);
1731 
1732  sp3 = pool_elt_at_index (sm->sr_policies,
1734  (b3)->sw_if_index
1735  [VLIB_RX]]);
1736 
1737  if (vec_len (sp0->segments_lists) == 1)
1738  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0];
1739  else
1740  {
1741  vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
1742  vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1743  sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash &
1744  (vec_len (sp0->segments_lists) - 1))];
1745  }
1746 
1747  if (vec_len (sp1->segments_lists) == 1)
1748  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = sp1->segments_lists[1];
1749  else
1750  {
1751  vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1);
1752  vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
1753  sp1->segments_lists[(vnet_buffer (b1)->ip.flow_hash &
1754  (vec_len (sp1->segments_lists) - 1))];
1755  }
1756 
1757  if (vec_len (sp2->segments_lists) == 1)
1758  vnet_buffer (b2)->ip.adj_index[VLIB_TX] = sp2->segments_lists[2];
1759  else
1760  {
1761  vnet_buffer (b2)->ip.flow_hash = l2_flow_hash (b2);
1762  vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
1763  sp2->segments_lists[(vnet_buffer (b2)->ip.flow_hash &
1764  (vec_len (sp2->segments_lists) - 1))];
1765  }
1766 
1767  if (vec_len (sp3->segments_lists) == 1)
1768  vnet_buffer (b3)->ip.adj_index[VLIB_TX] = sp3->segments_lists[3];
1769  else
1770  {
1771  vnet_buffer (b3)->ip.flow_hash = l2_flow_hash (b3);
1772  vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
1773  sp3->segments_lists[(vnet_buffer (b3)->ip.flow_hash &
1774  (vec_len (sp3->segments_lists) - 1))];
1775  }
1776 
1777  sl0 =
1779  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1780  sl1 =
1782  vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1783  sl2 =
1785  vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1786  sl3 =
1788  vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1789 
1791  vec_len (sl0->rewrite));
1793  vec_len (sl1->rewrite));
1795  vec_len (sl2->rewrite));
1797  vec_len (sl3->rewrite));
1798 
1799  en0 = vlib_buffer_get_current (b0);
1800  en1 = vlib_buffer_get_current (b1);
1801  en2 = vlib_buffer_get_current (b2);
1802  en3 = vlib_buffer_get_current (b3);
1803 
1804  clib_memcpy_fast (((u8 *) en0) - vec_len (sl0->rewrite),
1805  sl0->rewrite, vec_len (sl0->rewrite));
1806  clib_memcpy_fast (((u8 *) en1) - vec_len (sl1->rewrite),
1807  sl1->rewrite, vec_len (sl1->rewrite));
1808  clib_memcpy_fast (((u8 *) en2) - vec_len (sl2->rewrite),
1809  sl2->rewrite, vec_len (sl2->rewrite));
1810  clib_memcpy_fast (((u8 *) en3) - vec_len (sl3->rewrite),
1811  sl3->rewrite, vec_len (sl3->rewrite));
1812 
1813  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1814  vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1815  vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1816  vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1817 
1818  ip0 = vlib_buffer_get_current (b0);
1819  ip1 = vlib_buffer_get_current (b1);
1820  ip2 = vlib_buffer_get_current (b2);
1821  ip3 = vlib_buffer_get_current (b3);
1822 
1823  ip0->payload_length =
1824  clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t));
1825  ip1->payload_length =
1826  clib_host_to_net_u16 (b1->current_length - sizeof (ip6_header_t));
1827  ip2->payload_length =
1828  clib_host_to_net_u16 (b2->current_length - sizeof (ip6_header_t));
1829  ip3->payload_length =
1830  clib_host_to_net_u16 (b3->current_length - sizeof (ip6_header_t));
1831 
1832  if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1833  {
1834  sr0 = (void *) (ip0 + 1);
1835  sr0->protocol = IP_PROTOCOL_IP6_NONXT;
1836  }
1837  else
1838  ip0->protocol = IP_PROTOCOL_IP6_NONXT;
1839 
1840  if (ip1->protocol == IP_PROTOCOL_IPV6_ROUTE)
1841  {
1842  sr1 = (void *) (ip1 + 1);
1843  sr1->protocol = IP_PROTOCOL_IP6_NONXT;
1844  }
1845  else
1846  ip1->protocol = IP_PROTOCOL_IP6_NONXT;
1847 
1848  if (ip2->protocol == IP_PROTOCOL_IPV6_ROUTE)
1849  {
1850  sr2 = (void *) (ip2 + 1);
1851  sr2->protocol = IP_PROTOCOL_IP6_NONXT;
1852  }
1853  else
1854  ip2->protocol = IP_PROTOCOL_IP6_NONXT;
1855 
1856  if (ip3->protocol == IP_PROTOCOL_IPV6_ROUTE)
1857  {
1858  sr3 = (void *) (ip3 + 1);
1859  sr3->protocol = IP_PROTOCOL_IP6_NONXT;
1860  }
1861  else
1862  ip3->protocol = IP_PROTOCOL_IP6_NONXT;
1863 
1864  /* Which Traffic class and flow label do I set ? */
1865  //ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(0|((6&0xF)<<28)|((ip0_encap->tos&0xFF)<<20));
1866 
1867  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1868  {
1869  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1870  {
1872  vlib_add_trace (vm, node, b0, sizeof (*tr));
1874  sizeof (tr->src.as_u8));
1876  sizeof (tr->dst.as_u8));
1877  }
1878 
1879  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1880  {
1882  vlib_add_trace (vm, node, b1, sizeof (*tr));
1884  sizeof (tr->src.as_u8));
1886  sizeof (tr->dst.as_u8));
1887  }
1888 
1889  if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1890  {
1892  vlib_add_trace (vm, node, b2, sizeof (*tr));
1894  sizeof (tr->src.as_u8));
1896  sizeof (tr->dst.as_u8));
1897  }
1898 
1899  if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1900  {
1902  vlib_add_trace (vm, node, b3, sizeof (*tr));
1904  sizeof (tr->src.as_u8));
1906  sizeof (tr->dst.as_u8));
1907  }
1908  }
1909 
1910  encap_pkts += 4;
1911  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1912  n_left_to_next, bi0, bi1, bi2, bi3,
1913  next0, next1, next2, next3);
1914  }
1915 
1916  /* Single loop for potentially the last three packets */
1917  while (n_left_from > 0 && n_left_to_next > 0)
1918  {
1919  u32 bi0;
1920  vlib_buffer_t *b0;
1921  ip6_header_t *ip0 = 0;
1922  ip6_sr_header_t *sr0;
1923  ethernet_header_t *en0;
1924  ip6_sr_policy_t *sp0;
1925  ip6_sr_sl_t *sl0;
1926  u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1927 
1928  bi0 = from[0];
1929  to_next[0] = bi0;
1930  from += 1;
1931  to_next += 1;
1932  n_left_from -= 1;
1933  n_left_to_next -= 1;
1934  b0 = vlib_get_buffer (vm, bi0);
1935 
1936  /* Find the SR policy */
1937  sp0 = pool_elt_at_index (sm->sr_policies,
1939  (b0)->sw_if_index
1940  [VLIB_RX]]);
1941 
1942  /* In case there is more than one SL, LB among them */
1943  if (vec_len (sp0->segments_lists) == 1)
1944  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0];
1945  else
1946  {
1947  vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
1948  vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1949  sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash &
1950  (vec_len (sp0->segments_lists) - 1))];
1951  }
1952  sl0 =
1954  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1956  vec_len (sl0->rewrite));
1957 
1958  en0 = vlib_buffer_get_current (b0);
1959 
1960  clib_memcpy_fast (((u8 *) en0) - vec_len (sl0->rewrite),
1961  sl0->rewrite, vec_len (sl0->rewrite));
1962 
1963  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1964 
1965  ip0 = vlib_buffer_get_current (b0);
1966 
1967  ip0->payload_length =
1968  clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t));
1969 
1970  if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1971  {
1972  sr0 = (void *) (ip0 + 1);
1973  sr0->protocol = IP_PROTOCOL_IP6_NONXT;
1974  }
1975  else
1976  ip0->protocol = IP_PROTOCOL_IP6_NONXT;
1977 
1978  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1979  PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1980  {
1982  vlib_add_trace (vm, node, b0, sizeof (*tr));
1984  sizeof (tr->src.as_u8));
1986  sizeof (tr->dst.as_u8));
1987  }
1988 
1989  encap_pkts++;
1990  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1991  n_left_to_next, bi0, next0);
1992  }
1993 
1994  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1995  }
1996 
1997  /* Update counters */
1999  SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2000  encap_pkts);
2002  SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2003  bsid_pkts);
2004 
2005  return from_frame->n_vectors;
2006 }
2007 
2008 /* *INDENT-OFF* */
2010  .function = sr_policy_rewrite_encaps_l2,
2011  .name = "sr-pl-rewrite-encaps-l2",
2012  .vector_size = sizeof (u32),
2013  .format_trace = format_sr_policy_rewrite_trace,
2015  .n_errors = SR_POLICY_REWRITE_N_ERROR,
2016  .error_strings = sr_policy_rewrite_error_strings,
2017  .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2018  .next_nodes = {
2019 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2021 #undef _
2022  },
2023 };
2024 /* *INDENT-ON* */
2025 
2026 /**
2027  * @brief Graph node for applying a SR policy into a packet. SRH insertion.
2028  */
2029 static uword
2031  vlib_frame_t * from_frame)
2032 {
2033  ip6_sr_main_t *sm = &sr_main;
2034  u32 n_left_from, next_index, *from, *to_next;
2035 
2036  from = vlib_frame_vector_args (from_frame);
2037  n_left_from = from_frame->n_vectors;
2038 
2039  next_index = node->cached_next_index;
2040 
2041  int insert_pkts = 0, bsid_pkts = 0;
2042 
2043  while (n_left_from > 0)
2044  {
2045  u32 n_left_to_next;
2046 
2047  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2048 
2049  /* Quad - Loop */
2050  while (n_left_from >= 8 && n_left_to_next >= 4)
2051  {
2052  u32 bi0, bi1, bi2, bi3;
2053  vlib_buffer_t *b0, *b1, *b2, *b3;
2054  u32 next0, next1, next2, next3;
2055  next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2056  ip6_header_t *ip0, *ip1, *ip2, *ip3;
2057  ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2058  ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2059  u16 new_l0, new_l1, new_l2, new_l3;
2060 
2061  /* Prefetch next iteration. */
2062  {
2063  vlib_buffer_t *p4, *p5, *p6, *p7;
2064 
2065  p4 = vlib_get_buffer (vm, from[4]);
2066  p5 = vlib_get_buffer (vm, from[5]);
2067  p6 = vlib_get_buffer (vm, from[6]);
2068  p7 = vlib_get_buffer (vm, from[7]);
2069 
2070  /* Prefetch the buffer header and packet for the N+2 loop iteration */
2071  vlib_prefetch_buffer_header (p4, LOAD);
2072  vlib_prefetch_buffer_header (p5, LOAD);
2073  vlib_prefetch_buffer_header (p6, LOAD);
2074  vlib_prefetch_buffer_header (p7, LOAD);
2075 
2080  }
2081 
2082  to_next[0] = bi0 = from[0];
2083  to_next[1] = bi1 = from[1];
2084  to_next[2] = bi2 = from[2];
2085  to_next[3] = bi3 = from[3];
2086  from += 4;
2087  to_next += 4;
2088  n_left_from -= 4;
2089  n_left_to_next -= 4;
2090 
2091  b0 = vlib_get_buffer (vm, bi0);
2092  b1 = vlib_get_buffer (vm, bi1);
2093  b2 = vlib_get_buffer (vm, bi2);
2094  b3 = vlib_get_buffer (vm, bi3);
2095 
2096  sl0 =
2098  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2099  sl1 =
2101  vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2102  sl2 =
2104  vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2105  sl3 =
2107  vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2109  vec_len (sl0->rewrite));
2111  vec_len (sl1->rewrite));
2113  vec_len (sl2->rewrite));
2115  vec_len (sl3->rewrite));
2116 
2117  ip0 = vlib_buffer_get_current (b0);
2118  ip1 = vlib_buffer_get_current (b1);
2119  ip2 = vlib_buffer_get_current (b2);
2120  ip3 = vlib_buffer_get_current (b3);
2121 
2122  if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2123  sr0 =
2124  (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2125  ip6_ext_header_len (ip0 + 1));
2126  else
2127  sr0 = (ip6_sr_header_t *) (ip0 + 1);
2128 
2129  if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2130  sr1 =
2131  (ip6_sr_header_t *) (((void *) (ip1 + 1)) +
2132  ip6_ext_header_len (ip1 + 1));
2133  else
2134  sr1 = (ip6_sr_header_t *) (ip1 + 1);
2135 
2136  if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2137  sr2 =
2138  (ip6_sr_header_t *) (((void *) (ip2 + 1)) +
2139  ip6_ext_header_len (ip2 + 1));
2140  else
2141  sr2 = (ip6_sr_header_t *) (ip2 + 1);
2142 
2143  if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2144  sr3 =
2145  (ip6_sr_header_t *) (((void *) (ip3 + 1)) +
2146  ip6_ext_header_len (ip3 + 1));
2147  else
2148  sr3 = (ip6_sr_header_t *) (ip3 + 1);
2149 
2150  clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0,
2151  (void *) sr0 - (void *) ip0);
2152  clib_memcpy_fast ((u8 *) ip1 - vec_len (sl1->rewrite), (u8 *) ip1,
2153  (void *) sr1 - (void *) ip1);
2154  clib_memcpy_fast ((u8 *) ip2 - vec_len (sl2->rewrite), (u8 *) ip2,
2155  (void *) sr2 - (void *) ip2);
2156  clib_memcpy_fast ((u8 *) ip3 - vec_len (sl3->rewrite), (u8 *) ip3,
2157  (void *) sr3 - (void *) ip3);
2158 
2159  clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite)),
2160  sl0->rewrite, vec_len (sl0->rewrite));
2161  clib_memcpy_fast (((u8 *) sr1 - vec_len (sl1->rewrite)),
2162  sl1->rewrite, vec_len (sl1->rewrite));
2163  clib_memcpy_fast (((u8 *) sr2 - vec_len (sl2->rewrite)),
2164  sl2->rewrite, vec_len (sl2->rewrite));
2165  clib_memcpy_fast (((u8 *) sr3 - vec_len (sl3->rewrite)),
2166  sl3->rewrite, vec_len (sl3->rewrite));
2167 
2168  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
2169  vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
2170  vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
2171  vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
2172 
2173  ip0 = ((void *) ip0) - vec_len (sl0->rewrite);
2174  ip1 = ((void *) ip1) - vec_len (sl1->rewrite);
2175  ip2 = ((void *) ip2) - vec_len (sl2->rewrite);
2176  ip3 = ((void *) ip3) - vec_len (sl3->rewrite);
2177 
2178  ip0->hop_limit -= 1;
2179  ip1->hop_limit -= 1;
2180  ip2->hop_limit -= 1;
2181  ip3->hop_limit -= 1;
2182 
2183  new_l0 =
2184  clib_net_to_host_u16 (ip0->payload_length) +
2185  vec_len (sl0->rewrite);
2186  new_l1 =
2187  clib_net_to_host_u16 (ip1->payload_length) +
2188  vec_len (sl1->rewrite);
2189  new_l2 =
2190  clib_net_to_host_u16 (ip2->payload_length) +
2191  vec_len (sl2->rewrite);
2192  new_l3 =
2193  clib_net_to_host_u16 (ip3->payload_length) +
2194  vec_len (sl3->rewrite);
2195 
2196  ip0->payload_length = clib_host_to_net_u16 (new_l0);
2197  ip1->payload_length = clib_host_to_net_u16 (new_l1);
2198  ip2->payload_length = clib_host_to_net_u16 (new_l2);
2199  ip3->payload_length = clib_host_to_net_u16 (new_l3);
2200 
2201  sr0 = ((void *) sr0) - vec_len (sl0->rewrite);
2202  sr1 = ((void *) sr1) - vec_len (sl1->rewrite);
2203  sr2 = ((void *) sr2) - vec_len (sl2->rewrite);
2204  sr3 = ((void *) sr3) - vec_len (sl3->rewrite);
2205 
2206  sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0];
2207  sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1];
2208  sr1->segments->as_u64[0] = ip1->dst_address.as_u64[0];
2209  sr1->segments->as_u64[1] = ip1->dst_address.as_u64[1];
2210  sr2->segments->as_u64[0] = ip2->dst_address.as_u64[0];
2211  sr2->segments->as_u64[1] = ip2->dst_address.as_u64[1];
2212  sr3->segments->as_u64[0] = ip3->dst_address.as_u64[0];
2213  sr3->segments->as_u64[1] = ip3->dst_address.as_u64[1];
2214 
2215  ip0->dst_address.as_u64[0] =
2216  (sr0->segments + sr0->segments_left)->as_u64[0];
2217  ip0->dst_address.as_u64[1] =
2218  (sr0->segments + sr0->segments_left)->as_u64[1];
2219  ip1->dst_address.as_u64[0] =
2220  (sr1->segments + sr1->segments_left)->as_u64[0];
2221  ip1->dst_address.as_u64[1] =
2222  (sr1->segments + sr1->segments_left)->as_u64[1];
2223  ip2->dst_address.as_u64[0] =
2224  (sr2->segments + sr2->segments_left)->as_u64[0];
2225  ip2->dst_address.as_u64[1] =
2226  (sr2->segments + sr2->segments_left)->as_u64[1];
2227  ip3->dst_address.as_u64[0] =
2228  (sr3->segments + sr3->segments_left)->as_u64[0];
2229  ip3->dst_address.as_u64[1] =
2230  (sr3->segments + sr3->segments_left)->as_u64[1];
2231 
2232  ip6_ext_header_t *ip_ext;
2233  if (ip0 + 1 == (void *) sr0)
2234  {
2235  sr0->protocol = ip0->protocol;
2236  ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2237  }
2238  else
2239  {
2240  ip_ext = (void *) (ip0 + 1);
2241  sr0->protocol = ip_ext->next_hdr;
2242  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2243  }
2244 
2245  if (ip1 + 1 == (void *) sr1)
2246  {
2247  sr1->protocol = ip1->protocol;
2248  ip1->protocol = IP_PROTOCOL_IPV6_ROUTE;
2249  }
2250  else
2251  {
2252  ip_ext = (void *) (ip2 + 1);
2253  sr2->protocol = ip_ext->next_hdr;
2254  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2255  }
2256 
2257  if (ip2 + 1 == (void *) sr2)
2258  {
2259  sr2->protocol = ip2->protocol;
2260  ip2->protocol = IP_PROTOCOL_IPV6_ROUTE;
2261  }
2262  else
2263  {
2264  ip_ext = (void *) (ip2 + 1);
2265  sr2->protocol = ip_ext->next_hdr;
2266  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2267  }
2268 
2269  if (ip3 + 1 == (void *) sr3)
2270  {
2271  sr3->protocol = ip3->protocol;
2272  ip3->protocol = IP_PROTOCOL_IPV6_ROUTE;
2273  }
2274  else
2275  {
2276  ip_ext = (void *) (ip3 + 1);
2277  sr3->protocol = ip_ext->next_hdr;
2278  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2279  }
2280 
2281  insert_pkts += 4;
2282 
2283  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
2284  {
2285  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2286  {
2288  vlib_add_trace (vm, node, b0, sizeof (*tr));
2289  clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2290  sizeof (tr->src.as_u8));
2291  clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2292  sizeof (tr->dst.as_u8));
2293  }
2294 
2295  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
2296  {
2298  vlib_add_trace (vm, node, b1, sizeof (*tr));
2299  clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
2300  sizeof (tr->src.as_u8));
2301  clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
2302  sizeof (tr->dst.as_u8));
2303  }
2304 
2305  if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
2306  {
2308  vlib_add_trace (vm, node, b2, sizeof (*tr));
2309  clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
2310  sizeof (tr->src.as_u8));
2311  clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
2312  sizeof (tr->dst.as_u8));
2313  }
2314 
2315  if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
2316  {
2318  vlib_add_trace (vm, node, b3, sizeof (*tr));
2319  clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
2320  sizeof (tr->src.as_u8));
2321  clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
2322  sizeof (tr->dst.as_u8));
2323  }
2324  }
2325 
2326  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
2327  n_left_to_next, bi0, bi1, bi2, bi3,
2328  next0, next1, next2, next3);
2329  }
2330 
2331  /* Single loop for potentially the last three packets */
2332  while (n_left_from > 0 && n_left_to_next > 0)
2333  {
2334  u32 bi0;
2335  vlib_buffer_t *b0;
2336  ip6_header_t *ip0 = 0;
2337  ip6_sr_header_t *sr0 = 0;
2338  ip6_sr_sl_t *sl0;
2339  u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2340  u16 new_l0 = 0;
2341 
2342  bi0 = from[0];
2343  to_next[0] = bi0;
2344  from += 1;
2345  to_next += 1;
2346  n_left_from -= 1;
2347  n_left_to_next -= 1;
2348 
2349  b0 = vlib_get_buffer (vm, bi0);
2350  sl0 =
2352  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2354  vec_len (sl0->rewrite));
2355 
2356  ip0 = vlib_buffer_get_current (b0);
2357 
2358  if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2359  sr0 =
2360  (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2361  ip6_ext_header_len (ip0 + 1));
2362  else
2363  sr0 = (ip6_sr_header_t *) (ip0 + 1);
2364 
2365  clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0,
2366  (void *) sr0 - (void *) ip0);
2367  clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite)),
2368  sl0->rewrite, vec_len (sl0->rewrite));
2369 
2370  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
2371 
2372  ip0 = ((void *) ip0) - vec_len (sl0->rewrite);
2373  ip0->hop_limit -= 1;
2374  new_l0 =
2375  clib_net_to_host_u16 (ip0->payload_length) +
2376  vec_len (sl0->rewrite);
2377  ip0->payload_length = clib_host_to_net_u16 (new_l0);
2378 
2379  sr0 = ((void *) sr0) - vec_len (sl0->rewrite);
2380  sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0];
2381  sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1];
2382 
2383  ip0->dst_address.as_u64[0] =
2384  (sr0->segments + sr0->segments_left)->as_u64[0];
2385  ip0->dst_address.as_u64[1] =
2386  (sr0->segments + sr0->segments_left)->as_u64[1];
2387 
2388  if (ip0 + 1 == (void *) sr0)
2389  {
2390  sr0->protocol = ip0->protocol;
2391  ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2392  }
2393  else
2394  {
2395  ip6_ext_header_t *ip_ext = (void *) (ip0 + 1);
2396  sr0->protocol = ip_ext->next_hdr;
2397  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2398  }
2399 
2400  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
2401  PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2402  {
2404  vlib_add_trace (vm, node, b0, sizeof (*tr));
2405  clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2406  sizeof (tr->src.as_u8));
2407  clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2408  sizeof (tr->dst.as_u8));
2409  }
2410 
2411  insert_pkts++;
2412 
2413  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2414  n_left_to_next, bi0, next0);
2415  }
2416 
2417  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2418  }
2419 
2420  /* Update counters */
2422  SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2423  insert_pkts);
2425  SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2426  bsid_pkts);
2427  return from_frame->n_vectors;
2428 }
2429 
2430 /* *INDENT-OFF* */
2432  .function = sr_policy_rewrite_insert,
2433  .name = "sr-pl-rewrite-insert",
2434  .vector_size = sizeof (u32),
2435  .format_trace = format_sr_policy_rewrite_trace,
2437  .n_errors = SR_POLICY_REWRITE_N_ERROR,
2438  .error_strings = sr_policy_rewrite_error_strings,
2439  .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2440  .next_nodes = {
2441 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2443 #undef _
2444  },
2445 };
2446 /* *INDENT-ON* */
2447 
2448 /**
2449  * @brief Graph node for applying a SR policy into a packet. BSID - SRH insertion.
2450  */
2451 static uword
2453  vlib_frame_t * from_frame)
2454 {
2455  ip6_sr_main_t *sm = &sr_main;
2456  u32 n_left_from, next_index, *from, *to_next;
2457 
2458  from = vlib_frame_vector_args (from_frame);
2459  n_left_from = from_frame->n_vectors;
2460 
2461  next_index = node->cached_next_index;
2462 
2463  int insert_pkts = 0, bsid_pkts = 0;
2464 
2465  while (n_left_from > 0)
2466  {
2467  u32 n_left_to_next;
2468 
2469  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2470 
2471  /* Quad - Loop */
2472  while (n_left_from >= 8 && n_left_to_next >= 4)
2473  {
2474  u32 bi0, bi1, bi2, bi3;
2475  vlib_buffer_t *b0, *b1, *b2, *b3;
2476  u32 next0, next1, next2, next3;
2477  next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2478  ip6_header_t *ip0, *ip1, *ip2, *ip3;
2479  ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2480  ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2481  u16 new_l0, new_l1, new_l2, new_l3;
2482 
2483  /* Prefetch next iteration. */
2484  {
2485  vlib_buffer_t *p4, *p5, *p6, *p7;
2486 
2487  p4 = vlib_get_buffer (vm, from[4]);
2488  p5 = vlib_get_buffer (vm, from[5]);
2489  p6 = vlib_get_buffer (vm, from[6]);
2490  p7 = vlib_get_buffer (vm, from[7]);
2491 
2492  /* Prefetch the buffer header and packet for the N+2 loop iteration */
2493  vlib_prefetch_buffer_header (p4, LOAD);
2494  vlib_prefetch_buffer_header (p5, LOAD);
2495  vlib_prefetch_buffer_header (p6, LOAD);
2496  vlib_prefetch_buffer_header (p7, LOAD);
2497 
2502  }
2503 
2504  to_next[0] = bi0 = from[0];
2505  to_next[1] = bi1 = from[1];
2506  to_next[2] = bi2 = from[2];
2507  to_next[3] = bi3 = from[3];
2508  from += 4;
2509  to_next += 4;
2510  n_left_from -= 4;
2511  n_left_to_next -= 4;
2512 
2513  b0 = vlib_get_buffer (vm, bi0);
2514  b1 = vlib_get_buffer (vm, bi1);
2515  b2 = vlib_get_buffer (vm, bi2);
2516  b3 = vlib_get_buffer (vm, bi3);
2517 
2518  sl0 =
2520  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2521  sl1 =
2523  vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2524  sl2 =
2526  vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2527  sl3 =
2529  vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2531  vec_len (sl0->rewrite_bsid));
2533  vec_len (sl1->rewrite_bsid));
2535  vec_len (sl2->rewrite_bsid));
2537  vec_len (sl3->rewrite_bsid));
2538 
2539  ip0 = vlib_buffer_get_current (b0);
2540  ip1 = vlib_buffer_get_current (b1);
2541  ip2 = vlib_buffer_get_current (b2);
2542  ip3 = vlib_buffer_get_current (b3);
2543 
2544  if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2545  sr0 =
2546  (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2547  ip6_ext_header_len (ip0 + 1));
2548  else
2549  sr0 = (ip6_sr_header_t *) (ip0 + 1);
2550 
2551  if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2552  sr1 =
2553  (ip6_sr_header_t *) (((void *) (ip1 + 1)) +
2554  ip6_ext_header_len (ip1 + 1));
2555  else
2556  sr1 = (ip6_sr_header_t *) (ip1 + 1);
2557 
2558  if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2559  sr2 =
2560  (ip6_sr_header_t *) (((void *) (ip2 + 1)) +
2561  ip6_ext_header_len (ip2 + 1));
2562  else
2563  sr2 = (ip6_sr_header_t *) (ip2 + 1);
2564 
2565  if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2566  sr3 =
2567  (ip6_sr_header_t *) (((void *) (ip3 + 1)) +
2568  ip6_ext_header_len (ip3 + 1));
2569  else
2570  sr3 = (ip6_sr_header_t *) (ip3 + 1);
2571 
2572  clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite_bsid),
2573  (u8 *) ip0, (void *) sr0 - (void *) ip0);
2574  clib_memcpy_fast ((u8 *) ip1 - vec_len (sl1->rewrite_bsid),
2575  (u8 *) ip1, (void *) sr1 - (void *) ip1);
2576  clib_memcpy_fast ((u8 *) ip2 - vec_len (sl2->rewrite_bsid),
2577  (u8 *) ip2, (void *) sr2 - (void *) ip2);
2578  clib_memcpy_fast ((u8 *) ip3 - vec_len (sl3->rewrite_bsid),
2579  (u8 *) ip3, (void *) sr3 - (void *) ip3);
2580 
2581  clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)),
2582  sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid));
2583  clib_memcpy_fast (((u8 *) sr1 - vec_len (sl1->rewrite_bsid)),
2584  sl1->rewrite_bsid, vec_len (sl1->rewrite_bsid));
2585  clib_memcpy_fast (((u8 *) sr2 - vec_len (sl2->rewrite_bsid)),
2586  sl2->rewrite_bsid, vec_len (sl2->rewrite_bsid));
2587  clib_memcpy_fast (((u8 *) sr3 - vec_len (sl3->rewrite_bsid)),
2588  sl3->rewrite_bsid, vec_len (sl3->rewrite_bsid));
2589 
2590  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid));
2591  vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite_bsid));
2592  vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite_bsid));
2593  vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite_bsid));
2594 
2595  ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid);
2596  ip1 = ((void *) ip1) - vec_len (sl1->rewrite_bsid);
2597  ip2 = ((void *) ip2) - vec_len (sl2->rewrite_bsid);
2598  ip3 = ((void *) ip3) - vec_len (sl3->rewrite_bsid);
2599 
2600  ip0->hop_limit -= 1;
2601  ip1->hop_limit -= 1;
2602  ip2->hop_limit -= 1;
2603  ip3->hop_limit -= 1;
2604 
2605  new_l0 =
2606  clib_net_to_host_u16 (ip0->payload_length) +
2607  vec_len (sl0->rewrite_bsid);
2608  new_l1 =
2609  clib_net_to_host_u16 (ip1->payload_length) +
2610  vec_len (sl1->rewrite_bsid);
2611  new_l2 =
2612  clib_net_to_host_u16 (ip2->payload_length) +
2613  vec_len (sl2->rewrite_bsid);
2614  new_l3 =
2615  clib_net_to_host_u16 (ip3->payload_length) +
2616  vec_len (sl3->rewrite_bsid);
2617 
2618  ip0->payload_length = clib_host_to_net_u16 (new_l0);
2619  ip1->payload_length = clib_host_to_net_u16 (new_l1);
2620  ip2->payload_length = clib_host_to_net_u16 (new_l2);
2621  ip3->payload_length = clib_host_to_net_u16 (new_l3);
2622 
2623  sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid);
2624  sr1 = ((void *) sr1) - vec_len (sl1->rewrite_bsid);
2625  sr2 = ((void *) sr2) - vec_len (sl2->rewrite_bsid);
2626  sr3 = ((void *) sr3) - vec_len (sl3->rewrite_bsid);
2627 
2628  ip0->dst_address.as_u64[0] =
2629  (sr0->segments + sr0->segments_left)->as_u64[0];
2630  ip0->dst_address.as_u64[1] =
2631  (sr0->segments + sr0->segments_left)->as_u64[1];
2632  ip1->dst_address.as_u64[0] =
2633  (sr1->segments + sr1->segments_left)->as_u64[0];
2634  ip1->dst_address.as_u64[1] =
2635  (sr1->segments + sr1->segments_left)->as_u64[1];
2636  ip2->dst_address.as_u64[0] =
2637  (sr2->segments + sr2->segments_left)->as_u64[0];
2638  ip2->dst_address.as_u64[1] =
2639  (sr2->segments + sr2->segments_left)->as_u64[1];
2640  ip3->dst_address.as_u64[0] =
2641  (sr3->segments + sr3->segments_left)->as_u64[0];
2642  ip3->dst_address.as_u64[1] =
2643  (sr3->segments + sr3->segments_left)->as_u64[1];
2644 
2645  ip6_ext_header_t *ip_ext;
2646  if (ip0 + 1 == (void *) sr0)
2647  {
2648  sr0->protocol = ip0->protocol;
2649  ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2650  }
2651  else
2652  {
2653  ip_ext = (void *) (ip0 + 1);
2654  sr0->protocol = ip_ext->next_hdr;
2655  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2656  }
2657 
2658  if (ip1 + 1 == (void *) sr1)
2659  {
2660  sr1->protocol = ip1->protocol;
2661  ip1->protocol = IP_PROTOCOL_IPV6_ROUTE;
2662  }
2663  else
2664  {
2665  ip_ext = (void *) (ip2 + 1);
2666  sr2->protocol = ip_ext->next_hdr;
2667  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2668  }
2669 
2670  if (ip2 + 1 == (void *) sr2)
2671  {
2672  sr2->protocol = ip2->protocol;
2673  ip2->protocol = IP_PROTOCOL_IPV6_ROUTE;
2674  }
2675  else
2676  {
2677  ip_ext = (void *) (ip2 + 1);
2678  sr2->protocol = ip_ext->next_hdr;
2679  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2680  }
2681 
2682  if (ip3 + 1 == (void *) sr3)
2683  {
2684  sr3->protocol = ip3->protocol;
2685  ip3->protocol = IP_PROTOCOL_IPV6_ROUTE;
2686  }
2687  else
2688  {
2689  ip_ext = (void *) (ip3 + 1);
2690  sr3->protocol = ip_ext->next_hdr;
2691  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2692  }
2693 
2694  insert_pkts += 4;
2695 
2696  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
2697  {
2698  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2699  {
2701  vlib_add_trace (vm, node, b0, sizeof (*tr));
2702  clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2703  sizeof (tr->src.as_u8));
2704  clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2705  sizeof (tr->dst.as_u8));
2706  }
2707 
2708  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
2709  {
2711  vlib_add_trace (vm, node, b1, sizeof (*tr));
2712  clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
2713  sizeof (tr->src.as_u8));
2714  clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
2715  sizeof (tr->dst.as_u8));
2716  }
2717 
2718  if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
2719  {
2721  vlib_add_trace (vm, node, b2, sizeof (*tr));
2722  clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
2723  sizeof (tr->src.as_u8));
2724  clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
2725  sizeof (tr->dst.as_u8));
2726  }
2727 
2728  if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
2729  {
2731  vlib_add_trace (vm, node, b3, sizeof (*tr));
2732  clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
2733  sizeof (tr->src.as_u8));
2734  clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
2735  sizeof (tr->dst.as_u8));
2736  }
2737  }
2738 
2739  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
2740  n_left_to_next, bi0, bi1, bi2, bi3,
2741  next0, next1, next2, next3);
2742  }
2743 
2744  /* Single loop for potentially the last three packets */
2745  while (n_left_from > 0 && n_left_to_next > 0)
2746  {
2747  u32 bi0;
2748  vlib_buffer_t *b0;
2749  ip6_header_t *ip0 = 0;
2750  ip6_sr_header_t *sr0 = 0;
2751  ip6_sr_sl_t *sl0;
2752  u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2753  u16 new_l0 = 0;
2754 
2755  bi0 = from[0];
2756  to_next[0] = bi0;
2757  from += 1;
2758  to_next += 1;
2759  n_left_from -= 1;
2760  n_left_to_next -= 1;
2761 
2762  b0 = vlib_get_buffer (vm, bi0);
2763  sl0 =
2765  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2767  vec_len (sl0->rewrite_bsid));
2768 
2769  ip0 = vlib_buffer_get_current (b0);
2770 
2771  if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2772  sr0 =
2773  (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2774  ip6_ext_header_len (ip0 + 1));
2775  else
2776  sr0 = (ip6_sr_header_t *) (ip0 + 1);
2777 
2778  clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite_bsid),
2779  (u8 *) ip0, (void *) sr0 - (void *) ip0);
2780  clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)),
2781  sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid));
2782 
2783  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid));
2784 
2785  ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid);
2786  ip0->hop_limit -= 1;
2787  new_l0 =
2788  clib_net_to_host_u16 (ip0->payload_length) +
2789  vec_len (sl0->rewrite_bsid);
2790  ip0->payload_length = clib_host_to_net_u16 (new_l0);
2791 
2792  sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid);
2793 
2794  ip0->dst_address.as_u64[0] =
2795  (sr0->segments + sr0->segments_left)->as_u64[0];
2796  ip0->dst_address.as_u64[1] =
2797  (sr0->segments + sr0->segments_left)->as_u64[1];
2798 
2799  if (ip0 + 1 == (void *) sr0)
2800  {
2801  sr0->protocol = ip0->protocol;
2802  ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2803  }
2804  else
2805  {
2806  ip6_ext_header_t *ip_ext = (void *) (ip0 + 1);
2807  sr0->protocol = ip_ext->next_hdr;
2808  ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2809  }
2810 
2811  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
2812  PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2813  {
2815  vlib_add_trace (vm, node, b0, sizeof (*tr));
2816  clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2817  sizeof (tr->src.as_u8));
2818  clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2819  sizeof (tr->dst.as_u8));
2820  }
2821 
2822  insert_pkts++;
2823 
2824  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2825  n_left_to_next, bi0, next0);
2826  }
2827 
2828  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2829  }
2830 
2831  /* Update counters */
2833  SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2834  insert_pkts);
2836  SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2837  bsid_pkts);
2838  return from_frame->n_vectors;
2839 }
2840 
2841 /* *INDENT-OFF* */
2843  .function = sr_policy_rewrite_b_insert,
2844  .name = "sr-pl-rewrite-b-insert",
2845  .vector_size = sizeof (u32),
2846  .format_trace = format_sr_policy_rewrite_trace,
2848  .n_errors = SR_POLICY_REWRITE_N_ERROR,
2849  .error_strings = sr_policy_rewrite_error_strings,
2850  .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2851  .next_nodes = {
2852 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2854 #undef _
2855  },
2856 };
2857 /* *INDENT-ON* */
2858 
2859 /**
2860  * @brief Function BSID encapsulation
2861  */
2864  vlib_buffer_t * b0,
2865  ip6_header_t * ip0,
2866  ip6_sr_header_t * sr0, u32 * next0)
2867 {
2868  ip6_address_t *new_dst0;
2869 
2870  if (PREDICT_FALSE (!sr0))
2871  goto error_bsid_encaps;
2872 
2874  {
2875  if (PREDICT_TRUE (sr0->segments_left != 0))
2876  {
2877  sr0->segments_left -= 1;
2878  new_dst0 = (ip6_address_t *) (sr0->segments);
2879  new_dst0 += sr0->segments_left;
2880  ip0->dst_address.as_u64[0] = new_dst0->as_u64[0];
2881  ip0->dst_address.as_u64[1] = new_dst0->as_u64[1];
2882  return;
2883  }
2884  }
2885 
2886 error_bsid_encaps:
2887  *next0 = SR_POLICY_REWRITE_NEXT_ERROR;
2888  b0->error = node->errors[SR_POLICY_REWRITE_ERROR_BSID_ZERO];
2889 }
2890 
2891 /**
2892  * @brief Graph node for applying a SR policy BSID - Encapsulation
2893  */
2894 static uword
2896  vlib_frame_t * from_frame)
2897 {
2898  ip6_sr_main_t *sm = &sr_main;
2899  u32 n_left_from, next_index, *from, *to_next;
2900 
2901  from = vlib_frame_vector_args (from_frame);
2902  n_left_from = from_frame->n_vectors;
2903 
2904  next_index = node->cached_next_index;
2905 
2906  int encap_pkts = 0, bsid_pkts = 0;
2907 
2908  while (n_left_from > 0)
2909  {
2910  u32 n_left_to_next;
2911 
2912  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2913 
2914  /* Quad - Loop */
2915  while (n_left_from >= 8 && n_left_to_next >= 4)
2916  {
2917  u32 bi0, bi1, bi2, bi3;
2918  vlib_buffer_t *b0, *b1, *b2, *b3;
2919  u32 next0, next1, next2, next3;
2920  next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2921  ip6_header_t *ip0, *ip1, *ip2, *ip3;
2922  ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
2923  ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2924  ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2925 
2926  /* Prefetch next iteration. */
2927  {
2928  vlib_buffer_t *p4, *p5, *p6, *p7;
2929 
2930  p4 = vlib_get_buffer (vm, from[4]);
2931  p5 = vlib_get_buffer (vm, from[5]);
2932  p6 = vlib_get_buffer (vm, from[6]);
2933  p7 = vlib_get_buffer (vm, from[7]);
2934 
2935  /* Prefetch the buffer header and packet for the N+2 loop iteration */
2936  vlib_prefetch_buffer_header (p4, LOAD);
2937  vlib_prefetch_buffer_header (p5, LOAD);
2938  vlib_prefetch_buffer_header (p6, LOAD);
2939  vlib_prefetch_buffer_header (p7, LOAD);
2940 
2945  }
2946 
2947  to_next[0] = bi0 = from[0];
2948  to_next[1] = bi1 = from[1];
2949  to_next[2] = bi2 = from[2];
2950  to_next[3] = bi3 = from[3];
2951  from += 4;
2952  to_next += 4;
2953  n_left_from -= 4;
2954  n_left_to_next -= 4;
2955 
2956  b0 = vlib_get_buffer (vm, bi0);
2957  b1 = vlib_get_buffer (vm, bi1);
2958  b2 = vlib_get_buffer (vm, bi2);
2959  b3 = vlib_get_buffer (vm, bi3);
2960 
2961  sl0 =
2963  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2964  sl1 =
2966  vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2967  sl2 =
2969  vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2970  sl3 =
2972  vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2974  vec_len (sl0->rewrite));
2976  vec_len (sl1->rewrite));
2978  vec_len (sl2->rewrite));
2980  vec_len (sl3->rewrite));
2981 
2982  ip0_encap = vlib_buffer_get_current (b0);
2983  ip1_encap = vlib_buffer_get_current (b1);
2984  ip2_encap = vlib_buffer_get_current (b2);
2985  ip3_encap = vlib_buffer_get_current (b3);
2986 
2987  sr0 =
2988  ip6_ext_header_find (vm, b0, ip0_encap, IP_PROTOCOL_IPV6_ROUTE,
2989  NULL);
2990  sr1 =
2991  ip6_ext_header_find (vm, b1, ip1_encap, IP_PROTOCOL_IPV6_ROUTE,
2992  NULL);
2993  sr2 =
2994  ip6_ext_header_find (vm, b2, ip2_encap, IP_PROTOCOL_IPV6_ROUTE,
2995  NULL);
2996  sr3 =
2997  ip6_ext_header_find (vm, b3, ip3_encap, IP_PROTOCOL_IPV6_ROUTE,
2998  NULL);
2999 
3000  end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0);
3001  end_bsid_encaps_srh_processing (node, b1, ip1_encap, sr1, &next1);
3002  end_bsid_encaps_srh_processing (node, b2, ip2_encap, sr2, &next2);
3003  end_bsid_encaps_srh_processing (node, b3, ip3_encap, sr3, &next3);
3004 
3005  clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
3006  sl0->rewrite, vec_len (sl0->rewrite));
3007  clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
3008  sl1->rewrite, vec_len (sl1->rewrite));
3009  clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
3010  sl2->rewrite, vec_len (sl2->rewrite));
3011  clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
3012  sl3->rewrite, vec_len (sl3->rewrite));
3013 
3014  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
3015  vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
3016  vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
3017  vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
3018 
3019  ip0 = vlib_buffer_get_current (b0);
3020  ip1 = vlib_buffer_get_current (b1);
3021  ip2 = vlib_buffer_get_current (b2);
3022  ip3 = vlib_buffer_get_current (b3);
3023 
3024  encaps_processing_v6 (node, b0, ip0, ip0_encap);
3025  encaps_processing_v6 (node, b1, ip1, ip1_encap);
3026  encaps_processing_v6 (node, b2, ip2, ip2_encap);
3027  encaps_processing_v6 (node, b3, ip3, ip3_encap);
3028 
3029  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
3030  {
3031  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3032  {
3034  vlib_add_trace (vm, node, b0, sizeof (*tr));
3036  sizeof (tr->src.as_u8));
3038  sizeof (tr->dst.as_u8));
3039  }
3040 
3041  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
3042  {
3044  vlib_add_trace (vm, node, b1, sizeof (*tr));
3046  sizeof (tr->src.as_u8));
3048  sizeof (tr->dst.as_u8));
3049  }
3050 
3051  if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
3052  {
3054  vlib_add_trace (vm, node, b2, sizeof (*tr));
3056  sizeof (tr->src.as_u8));
3058  sizeof (tr->dst.as_u8));
3059  }
3060 
3061  if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
3062  {
3064  vlib_add_trace (vm, node, b3, sizeof (*tr));
3066  sizeof (tr->src.as_u8));
3068  sizeof (tr->dst.as_u8));
3069  }
3070  }
3071 
3072  encap_pkts += 4;
3073  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
3074  n_left_to_next, bi0, bi1, bi2, bi3,
3075  next0, next1, next2, next3);
3076  }
3077 
3078  /* Single loop for potentially the last three packets */
3079  while (n_left_from > 0 && n_left_to_next > 0)
3080  {
3081  u32 bi0;
3082  vlib_buffer_t *b0;
3083  ip6_header_t *ip0 = 0, *ip0_encap = 0;
3084  ip6_sr_header_t *sr0;
3085  ip6_sr_sl_t *sl0;
3086  u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
3087 
3088  bi0 = from[0];
3089  to_next[0] = bi0;
3090  from += 1;
3091  to_next += 1;
3092  n_left_from -= 1;
3093  n_left_to_next -= 1;
3094  b0 = vlib_get_buffer (vm, bi0);
3095 
3096  sl0 =
3098  vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
3100  vec_len (sl0->rewrite));
3101 
3102  ip0_encap = vlib_buffer_get_current (b0);
3103  sr0 =
3104  ip6_ext_header_find (vm, b0, ip0_encap, IP_PROTOCOL_IPV6_ROUTE,
3105  NULL);
3106  end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0);
3107 
3108  clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
3109  sl0->rewrite, vec_len (sl0->rewrite));
3110  vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
3111 
3112  ip0 = vlib_buffer_get_current (b0);
3113 
3114  encaps_processing_v6 (node, b0, ip0, ip0_encap);
3115 
3116  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
3117  PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3118  {
3120  vlib_add_trace (vm, node, b0, sizeof (*tr));
3122  sizeof (tr->src.as_u8));
3124  sizeof (tr->dst.as_u8));
3125  }
3126 
3127  encap_pkts++;
3128  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
3129  n_left_to_next, bi0, next0);
3130  }
3131 
3132  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
3133  }
3134 
3135  /* Update counters */
3137  SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
3138  encap_pkts);
3140  SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
3141  bsid_pkts);
3142 
3143  return from_frame->n_vectors;
3144 }
3145 
3146 /* *INDENT-OFF* */
3148  .function = sr_policy_rewrite_b_encaps,
3149  .name = "sr-pl-rewrite-b-encaps",
3150  .vector_size = sizeof (u32),
3151  .format_trace = format_sr_policy_rewrite_trace,
3153  .n_errors = SR_POLICY_REWRITE_N_ERROR,
3154  .error_strings = sr_policy_rewrite_error_strings,
3155  .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
3156  .next_nodes = {
3157 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
3159 #undef _
3160  },
3161 };
3162 /* *INDENT-ON* */
3163 
3164 /*************************** SR Segment Lists DPOs ****************************/
3165 static u8 *
3166 format_sr_segment_list_dpo (u8 * s, va_list * args)
3167 {
3168  ip6_sr_main_t *sm = &sr_main;
3170  ip6_sr_sl_t *sl;
3171 
3172  index_t index = va_arg (*args, index_t);
3173  CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
3174  s = format (s, "SR: Segment List index:[%d]", index);
3175  s = format (s, "\n\tSegments:");
3176 
3177  sl = pool_elt_at_index (sm->sid_lists, index);
3178 
3179  s = format (s, "< ");
3180  vec_foreach (addr, sl->segments)
3181  {
3182  s = format (s, "%U, ", format_ip6_address, addr);
3183  }
3184  s = format (s, "\b\b > - ");
3185  s = format (s, "Weight: %u", sl->weight);
3186 
3187  return s;
3188 }
3189 
3190 const static dpo_vft_t sr_policy_rewrite_vft = {
3191  .dv_lock = sr_dpo_lock,
3192  .dv_unlock = sr_dpo_unlock,
3193  .dv_format = format_sr_segment_list_dpo,
3194 };
3195 
3196 const static char *const sr_pr_encaps_ip6_nodes[] = {
3197  "sr-pl-rewrite-encaps",
3198  NULL,
3199 };
3200 
3201 const static char *const sr_pr_encaps_ip4_nodes[] = {
3202  "sr-pl-rewrite-encaps-v4",
3203  NULL,
3204 };
3205 
3206 const static char *const *const sr_pr_encaps_nodes[DPO_PROTO_NUM] = {
3209 };
3210 
3211 const static char *const sr_pr_insert_ip6_nodes[] = {
3212  "sr-pl-rewrite-insert",
3213  NULL,
3214 };
3215 
3216 const static char *const *const sr_pr_insert_nodes[DPO_PROTO_NUM] = {
3218 };
3219 
3220 const static char *const sr_pr_bsid_insert_ip6_nodes[] = {
3221  "sr-pl-rewrite-b-insert",
3222  NULL,
3223 };
3224 
3225 const static char *const *const sr_pr_bsid_insert_nodes[DPO_PROTO_NUM] = {
3227 };
3228 
3229 const static char *const sr_pr_bsid_encaps_ip6_nodes[] = {
3230  "sr-pl-rewrite-b-encaps",
3231  NULL,
3232 };
3233 
3234 const static char *const *const sr_pr_bsid_encaps_nodes[DPO_PROTO_NUM] = {
3236 };
3237 
3238 /********************* SR Policy Rewrite initialization ***********************/
3239 /**
3240  * @brief SR Policy Rewrite initialization
3241  */
3242 clib_error_t *
3244 {
3245  ip6_sr_main_t *sm = &sr_main;
3246 
3247  /* Init memory for sr policy keys (bsid <-> ip6_address_t) */
3248  mhash_init (&sm->sr_policies_index_hash, sizeof (uword),
3249  sizeof (ip6_address_t));
3250 
3251  /* Init SR VPO DPOs type */
3253  dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_encaps_nodes);
3254 
3256  dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_insert_nodes);
3257 
3259  dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_encaps_nodes);
3260 
3262  dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_insert_nodes);
3263 
3264  /* Register the L2 encaps node used in HW redirect */
3266 
3267  sm->fib_table_ip6 = (u32) ~ 0;
3268  sm->fib_table_ip4 = (u32) ~ 0;
3269 
3270  return 0;
3271 }
3272 
3274 
3275 
3276 /*
3277 * fd.io coding-style-patch-verification: ON
3278 *
3279 * Local Variables:
3280 * eval: (c-set-style "gnu")
3281 * End:
3282 */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
static clib_error_t * sr_policy_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
CLI for &#39;sr policies&#39; command family.
static u8 * compute_rewrite_insert(ip6_address_t *sl)
SR rewrite string computation for SRH insertion (inline)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
fib_protocol_t fp_proto
protocol type
Definition: fib_types.h:212
ip6_sr_main_t sr_main
Definition: sr.c:31
dpo_lock_fn_t dv_lock
A reference counting lock function.
Definition: dpo.h:406
sr_policy_rewrite_next_t
u8 type
Type (default is 0)
Definition: sr.h:86
#define vec_foreach_index(var, v)
Iterate over vector indices.
typedef address
Definition: ip_types.api:83
fib_node_index_t path_index
The index of the FIB path.
Definition: load_balance.h:71
#define foreach_sr_policy_rewrite_error
#define CLIB_UNUSED(x)
Definition: clib.h:83
A virtual function table regisitered for a DPO type.
Definition: dpo.h:401
static uword sr_policy_rewrite_b_insert(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Graph node for applying a SR policy into a packet.
fib_node_index_t fib_table_lookup_exact_match(u32 fib_index, const fib_prefix_t *prefix)
Perfom an exact match in the non-forwarding table.
Definition: fib_table.c:95
a
Definition: bitmap.h:538
dpo_id_t path_dpo
ID of the Data-path object.
Definition: load_balance.h:66
u32 fib_table
FIB table.
Definition: sr.h:94
static uword sr_policy_rewrite_b_encaps(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Graph node for applying a SR policy BSID - Encapsulation.
u64 as_u64
Definition: bihash_doc.h:63
#define PREDICT_TRUE(x)
Definition: clib.h:113
void sr_dpo_unlock(dpo_id_t *dpo)
no-op unlock function.
Definition: sr.c:47
u8 as_u8[16]
Definition: ip6_packet.h:48
u64 as_u64[2]
Definition: ip6_packet.h:51
static const char *const sr_pr_encaps_ip4_nodes[]
static int dpo_id_is_valid(const dpo_id_t *dpoi)
Return true if the DPO object is valid, i.e.
Definition: dpo.h:209
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
vlib_node_registration_t sr_policy_rewrite_b_encaps_node
(constructor) VLIB_REGISTER_NODE (sr_policy_rewrite_b_encaps_node)
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip4.h:312
uword mhash_unset(mhash_t *h, void *key, uword *old_value)
Definition: mhash.c:346
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define ethernet_buffer_header_size(b)
Determine the size of the Ethernet headers of the current frame in the buffer.
Definition: ethernet.h:420
int sr_policy_mod(ip6_address_t *bsid, u32 index, u32 fib_table, u8 operation, ip6_address_t *segments, u32 sl_index, u32 weight)
Modify an existing SR policy.
dpo_id_t ip4_dpo
DPO for Encaps IPv6.
Definition: sr.h:71
u8 src_address[6]
Definition: packet.h:56
#define VLIB_BUFFER_PRE_DATA_SIZE
Definition: buffer.h:51
static uword sr_policy_rewrite_insert(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Graph node for applying a SR policy into a packet.
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
ip6_address_t * segments
SIDs (key)
Definition: sr.h:62
static u8 * compute_rewrite_bsid(ip6_address_t *sl)
SR rewrite string computation for SRH insertion with BSID (inline)
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:138
static clib_error_t * set_sr_src_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:560
int i
static ip6_sr_sl_t * create_sl(ip6_sr_policy_t *sr_policy, ip6_address_t *sl, u32 weight, u8 is_encap)
Creates a Segment List and adds it to an SR policy.
static const char *const *const sr_pr_bsid_encaps_nodes[DPO_PROTO_NUM]
#define IPv6_DEFAULT_HEADER_LENGTH
Definition: sr.h:33
u32 l2_sr_policy_rewrite_index
Definition: sr.h:192
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u8 data[128]
Definition: ipsec.api:251
#define ROUTING_HEADER_TYPE_SR
Definition: sr_packet.h:117
static u8 * format_sr_segment_list_dpo(u8 *s, va_list *args)
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
vhost_vring_addr_t addr
Definition: vhost_user.h:147
ip6_address_t src_address
Definition: ip6_packet.h:383
ip6_sr_steering_policy_t * steer_policies
Definition: sr.h:210
static_always_inline void encaps_processing_v6(vlib_node_runtime_t *node, vlib_buffer_t *b0, ip6_header_t *ip0, ip6_header_t *ip0_encap)
IPv6 encapsulation processing as per RFC2473.
unsigned char u8
Definition: types.h:56
vlib_node_registration_t sr_policy_rewrite_encaps_v4_node
(constructor) VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_v4_node)
u8 * rewrite_bsid
Precomputed rewrite header for bindingSID.
Definition: sr.h:67
vlib_node_registration_t sr_policy_rewrite_encaps_l2_node
(constructor) VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_l2_node)
static uword sr_policy_rewrite_encaps_l2(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Graph node for applying a SR policy into a L2 frame.
flow_hash_config_t fib_table_get_flow_hash_config(u32 fib_index, fib_protocol_t proto)
Get the flow hash configured used by the table.
Definition: fib_table.c:988
index_t load_balance_create(u32 n_buckets, dpo_proto_t lb_proto, flow_hash_config_t fhc)
Definition: load_balance.c:263
dpo_id_t ip6_dpo
DPO for Encaps/Insert IPv6.
Definition: sr.h:70
u32 * sw_iface_sr_policies
Definition: sr.h:216
vlib_node_registration_t sr_policy_rewrite_encaps_node
(constructor) VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_node)
#define static_always_inline
Definition: clib.h:100
enum dpo_type_t_ dpo_type_t
Common types of data-path objects New types can be dynamically added using dpo_register_new_type() ...
i64 word
Definition: types.h:111
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
static const char *const sr_pr_insert_ip6_nodes[]
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
static u32 l2_flow_hash(vlib_buffer_t *b0)
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
void fib_table_entry_special_remove(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source)
Remove a &#39;special&#39; entry from the FIB.
Definition: fib_table.c:407
#define always_inline
Definition: clib.h:99
u8 dst_address[6]
Definition: packet.h:55
SR Segment List (SID list)
Definition: sr.h:60
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
Aggregate type for a prefix.
Definition: fib_types.h:203
#define clib_error_return(e, args...)
Definition: error.h:99
void load_balance_multipath_update(const dpo_id_t *dpo, const load_balance_path_t *raw_nhs, load_balance_flags_t flags)
Definition: load_balance.c:626
static uword sr_policy_rewrite_encaps(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Graph node for applying a SR policy into an IPv6 packet.
static const char *const *const sr_pr_encaps_nodes[DPO_PROTO_NUM]
unsigned int u32
Definition: types.h:88
static u64 mac_to_u64(u8 *m)
u32 fib_table_find(fib_protocol_t proto, u32 table_id)
Get the index of the FIB for a Table-ID.
Definition: fib_table.c:1080
dpo_type_t dpo_register_new_type(const dpo_vft_t *vft, const char *const *const *nodes)
Create and register a new DPO type.
Definition: dpo.c:342
static void * ip6_ext_header_find(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6_header, u8 header_type, ip6_ext_header_t **prev_ext_header)
Definition: ip6_packet.h:578
int sr_policy_del(ip6_address_t *bsid, u32 index)
Delete a SR policy.
#define SR_POLICY_TYPE_DEFAULT
Definition: sr_mpls.h:36
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static void update_replicate(ip6_sr_policy_t *sr_policy)
Updates the Replicate DPO after an SR Policy change.
vlib_node_registration_t sr_policy_rewrite_b_insert_node
(constructor) VLIB_REGISTER_NODE (sr_policy_rewrite_b_insert_node)
Definition: fib_entry.h:285
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
vlib_node_registration_t sr_policy_rewrite_insert_node
(constructor) VLIB_REGISTER_NODE (sr_policy_rewrite_insert_node)
static const char *const sr_pr_bsid_insert_ip6_nodes[]
void sr_set_source(ip6_address_t *address)
static dpo_type_t sr_pr_bsid_insert_dpo_type
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
static u8 * compute_rewrite_encaps(ip6_address_t *sl)
SR rewrite string computation for IPv6 encapsulation (inline)
load-balancing over a choice of [un]equal cost paths
Definition: dpo.h:102
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static u32 ip6_compute_flow_hash(const ip6_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip6.h:483
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:375
u8 weight
Definition: fib_types.api:120
#define PREDICT_FALSE(x)
Definition: clib.h:112
void sr_dpo_lock(dpo_id_t *dpo)
no-op lock function.
Definition: sr.c:38
#define vec_del1(v, i)
Delete the element at index I.
Definition: vec.h:804
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
static uword mhash_set(mhash_t *h, void *key, uword new_value, uword *old_value)
Definition: mhash.h:117
vl_api_address_t dst
Definition: gre.api:52
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
void fib_table_unlock(u32 fib_index, fib_protocol_t proto, fib_source_t source)
Take a reference counting lock on the table.
Definition: fib_table.c:1253
u8 is_encap
Mode (0 is SRH insert, 1 Encaps)
Definition: sr.h:96
static uword sr_policy_rewrite_encaps_v4(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Graph node for applying a SR policy into an IPv4 packet.
u32 weight
SID list weight (wECMP / UCMP)
Definition: sr.h:64
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
sr_policy_rewrite_error_t
#define ip6_ext_header_len(p)
Definition: ip6_packet.h:548
unformat_function_t unformat_ip6_address
Definition: format.h:91
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
void replicate_multipath_update(const dpo_id_t *dpo, load_balance_path_t *next_hops)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
fib_node_index_t fib_table_entry_special_dpo_update(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, const dpo_id_t *dpo)
Update a &#39;special&#39; entry to the FIB that links to the DPO passed A special entry is an entry that the...
Definition: fib_table.c:346
#define UNFORMAT_END_OF_INPUT
Definition: format.h:145
void mhash_init(mhash_t *h, uword n_value_bytes, uword n_key_bytes)
Definition: mhash.c:168
#define hash_mix64(a0, b0, c0)
Definition: hash.h:531
svmdb_client_t * c
u16 n_vectors
Definition: node.h:397
format_function_t format_ip6_address
Definition: format.h:93
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:323
static const char *const sr_pr_bsid_encaps_ip6_nodes[]
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void end_bsid_encaps_srh_processing(vlib_node_runtime_t *node, vlib_buffer_t *b0, ip6_header_t *ip0, ip6_sr_header_t *sr0, u32 *next0)
Function BSID encapsulation.
static u8 * format_sr_policy_rewrite_trace(u8 *s, va_list *args)
Trace for the SR Policy Rewrite graph node.
u32 * segments_lists
SID lists indexes (vector)
Definition: sr.h:82
u8 data[]
Packet data.
Definition: buffer.h:181
u32 fib_node_index_t
A typedef of a node index.
Definition: fib_types.h:30
dpo_id_t bsid_dpo
DPO for Encaps/Insert for BSID.
Definition: sr.h:69
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
void dpo_set(dpo_id_t *dpo, dpo_type_t type, dpo_proto_t proto, index_t index)
Set/create a DPO ID The DPO will be locked.
Definition: dpo.c:186
index_t replicate_create(u32 n_buckets, dpo_proto_t rep_proto)
#define foreach_sr_policy_rewrite_next
clib_error_t * sr_policy_rewrite_init(vlib_main_t *vm)
SR Policy Rewrite initialization.
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:161
static const char *const *const sr_pr_bsid_insert_nodes[DPO_PROTO_NUM]
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
#define pool_put_index(p, i)
Free pool element with given index.
Definition: pool.h:311
#define ASSERT(truth)
static uword * mhash_get(mhash_t *h, const void *key)
Definition: mhash.h:110
ip_dscp_t tos
Definition: ip4_packet.h:141
static const char *const *const sr_pr_insert_nodes[DPO_PROTO_NUM]
#define SR_POLICY_TYPE_SPRAY
Definition: sr_mpls.h:37
SR policy rewrite trace.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
#define IP_FLOW_HASH_DEFAULT
Default: 5-tuple without the "reverse" bit.
Definition: lookup.h:70
static dpo_type_t sr_pr_encaps_dpo_type
Dynamically added SR SL DPO type.
static_always_inline void encaps_processing_v4(vlib_node_runtime_t *node, vlib_buffer_t *b0, ip6_header_t *ip0, ip4_header_t *ip0_encap)
IPv4 encapsulation processing as per RFC2473.
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
Definition: lookup.h:84
#define SR_SEGMENT_LIST_WEIGHT_DEFAULT
Definition: sr_mpls.h:39
mhash_t sr_policies_index_hash
Definition: sr.h:201
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
int sr_policy_add(ip6_address_t *bsid, ip6_address_t *segments, u32 weight, u8 behavior, u32 fib_table, u8 is_encap)
Create a new SR policy.
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:370
Definition: defs.h:47
#define DPO_PROTO_NUM
Definition: dpo.h:70
SRv6 and SR-MPLS.
Definition: fib_entry.h:63
u16 payload_length
Definition: ip6_packet.h:374
vl_api_address_t ip
Definition: l2.api:489
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
ip6_sr_policy_t * sr_policies
Definition: sr.h:198
u32 path_weight
weight for the path.
Definition: load_balance.h:76
ip6_address_t segments[0]
Definition: sr_packet.h:149
SR Policy.
Definition: sr.h:80
u8 * rewrite
Precomputed rewrite header.
Definition: sr.h:66
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
dpo_id_t bsid_dpo
SR Policy specific DPO - BSID.
Definition: sr.h:90
static dpo_type_t sr_pr_insert_dpo_type
One path from an [EU]CMP set that the client wants to add to a load-balance object.
Definition: load_balance.h:62
ip6_sr_sl_t * sid_lists
Definition: sr.h:195
static char * sr_policy_rewrite_error_strings[]
#define vnet_buffer(b)
Definition: buffer.h:365
Segment Routing data structures definitions.
Segment Routing main datastructure.
Definition: sr.h:189
dpo_id_t ip6_dpo
SR Policy specific DPO - IPv4.
Definition: sr.h:92
void dpo_reset(dpo_id_t *dpo)
reset a DPO ID The DPO will be unlocked.
Definition: dpo.c:232
#define vec_foreach(var, vec)
Vector iterator.
static const char *const sr_pr_encaps_ip6_nodes[]
u32 fib_table_ip4
Definition: sr.h:233
u16 flags
Copy of main node flags.
Definition: node.h:509
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
static dpo_type_t sr_pr_bsid_encaps_dpo_type
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define IPv6_DEFAULT_HOP_LIMIT
Definition: sr.h:34
u32 fib_table_create_and_lock(fib_protocol_t proto, fib_source_t src, const char *const fmt,...)
Create a new table with no table ID.
Definition: fib_table.c:1158
dpo_id_t ip4_dpo
SR Policy specific DPO - IPv6.
Definition: sr.h:91
static ip6_address_t sr_pr_encaps_src
IPv6 SA for encapsulated packets.
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:772
u32 fib_table_ip6
Definition: sr.h:232
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static void update_lb(ip6_sr_policy_t *sr_policy)
Updates the Load Balancer after an SR Policy change.
ip6_address_t bsid
BindingSID (key)
Definition: sr.h:84
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
static u32 ip_flow_hash(void *data)
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:383
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:171
static clib_error_t * show_sr_policies_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
CLI to display onscreen all the SR policies.
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128