FD.io VPP  v20.05.1-6-gf53edbc3b
Vector Packet Processing
ioam_cache_tunnel_select_node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * ioam_cache_tunnel_select_node.c
17  * This file implements anycast server selection using ioam data
18  * attached to anycast service selection.
19  * Anycast service is reachable via multiple servers reachable
20  * over SR tunnels.
21  * Works with TCP Anycast application.
22  * Cache entry is created when TCP SYN is received for anycast destination.
23  * Response TCP SYN ACKs for anycast service is compared and selected
24  * response is forwarded.
25  * The functionality is introduced via graph nodes that are hooked into
26  * vnet graph via classifier configs like below:
27  *
28  * Enable anycast service selection:
29  * set ioam ip6 sr-tunnel-select oneway
30  *
31  * Enable following classifier on the anycast service client facing interface
32  * e.g. anycast service is db06::06 then:
33  * classify session acl-hit-next ip6-node ip6-add-syn-hop-by-hop table-index 0 match l3
34  * ip6 dst db06::06 ioam-encap anycast
35  *
36  * Enable following classifier on the interfaces facing the server of anycast service:
37  * classify session acl-hit-next ip6-node ip6-lookup table-index 0 match l3
38  * ip6 src db06::06 ioam-decap anycast
39  *
40  */
41 #include <vlib/vlib.h>
42 #include <vnet/vnet.h>
43 #include <vnet/pg/pg.h>
44 #include <vppinfra/error.h>
45 #include <vnet/ip/ip.h>
46 #include <vnet/srv6/sr.h>
47 #include <ioam/ip6/ioam_cache.h>
48 #include <vnet/ip/ip6_hop_by_hop.h>
50 
51 typedef struct
52 {
56 
57 /* packet trace format function */
58 static u8 *
59 format_cache_ts_trace (u8 * s, va_list * args)
60 {
61  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63  cache_ts_trace_t *t = va_arg (*args, cache_ts_trace_t *);
64 
65  s = format (s, "CACHE: flow_label %d, next index %d",
66  t->flow_label, t->next_index);
67  return s;
68 }
69 
70 #define foreach_cache_ts_error \
71 _(RECORDED, "ip6 iOAM headers cached")
72 
73 typedef enum
74 {
75 #define _(sym,str) CACHE_TS_ERROR_##sym,
77 #undef _
80 
81 static char *cache_ts_error_strings[] = {
82 #define _(sym,string) string,
84 #undef _
85 };
86 
87 typedef enum
88 {
93 
94 static uword
97 {
99  u32 n_left_from, *from, *to_next;
100  cache_ts_next_t next_index;
101  u32 recorded = 0;
102 
103  from = vlib_frame_vector_args (frame);
104  n_left_from = frame->n_vectors;
105  next_index = node->cached_next_index;
106 
107  while (n_left_from > 0)
108  {
109  u32 n_left_to_next;
110 
111  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
112  // TODO: dual loop
113  while (n_left_from > 0 && n_left_to_next > 0)
114  {
115  u32 bi0;
116  vlib_buffer_t *p0;
118  ip6_header_t *ip0;
119  ip6_hop_by_hop_header_t *hbh0, *hbh_cmp;
120  tcp_header_t *tcp0;
121  u32 tcp_offset0;
122  u32 cache_ts_index = 0;
123  u8 cache_thread_id = 0;
124  int result = 0;
125  int skip = 0;
126 
127  bi0 = from[0];
128  from += 1;
129  n_left_from -= 1;
130 
131  p0 = vlib_get_buffer (vm, bi0);
132  ip0 = vlib_buffer_get_current (p0);
133  if (IP_PROTOCOL_TCP ==
134  ip6_locate_header (p0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
135  {
136  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
137  if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
138  (tcp0->flags & TCP_FLAG_ACK) == TCP_FLAG_ACK)
139  {
140  /* Look up and compare */
141  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
142 
143  if (0 == ioam_cache_ts_lookup (ip0,
144  hbh0->protocol,
145  clib_net_to_host_u16
146  (tcp0->src_port),
147  clib_net_to_host_u16
148  (tcp0->dst_port),
149  clib_net_to_host_u32
150  (tcp0->ack_number), &hbh_cmp,
151  &cache_ts_index,
152  &cache_thread_id, 1))
153  {
154  /* response seen */
155  result = -1;
156  if (hbh_cmp)
157  result =
159  cm->criteria_oneway);
160  if (result >= 0)
161  {
162  /* current syn/ack is worse than the earlier: Drop */
164  /* Check if all responses are received or time has exceeded
165  send cached response if yes */
166  ioam_cache_ts_check_and_send (cache_thread_id,
167  cache_ts_index);
168  }
169  else
170  {
171  /* Update cache with this buffer */
172  /* If successfully updated then skip sending it */
173  if (0 ==
174  (result =
175  ioam_cache_ts_update (cache_thread_id,
176  cache_ts_index, bi0,
177  hbh0)))
178  {
179  skip = 1;
180  }
181  else
183  }
184  }
185  else
186  {
188  }
189  }
190  else if ((tcp0->flags & TCP_FLAG_RST) == TCP_FLAG_RST)
191  {
192  /* Look up and compare */
193  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
194  if (0 == ioam_cache_ts_lookup (ip0, hbh0->protocol, clib_net_to_host_u16 (tcp0->src_port), clib_net_to_host_u16 (tcp0->dst_port), clib_net_to_host_u32 (tcp0->ack_number), &hbh_cmp, &cache_ts_index, &cache_thread_id, 1)) //response seen
195  {
197  if (hbh_cmp)
198  ioam_cache_ts_check_and_send (cache_thread_id,
199  cache_ts_index);
200  }
201 
202  }
203  }
204  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
205  {
206  if (p0->flags & VLIB_BUFFER_IS_TRACED)
207  {
208  cache_ts_trace_t *t =
209  vlib_add_trace (vm, node, p0, sizeof (*t));
210  t->flow_label =
211  clib_net_to_host_u32
213  t->next_index = next0;
214  }
215  }
216  /* verify speculative enqueue, maybe switch current next frame */
217  if (!skip)
218  {
219  to_next[0] = bi0;
220  to_next += 1;
221  n_left_to_next -= 1;
222  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
223  to_next, n_left_to_next,
224  bi0, next0);
225  }
226  }
227 
228  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
229  }
231  CACHE_TS_ERROR_RECORDED, recorded);
232  return frame->n_vectors;
233 }
234 
235 /*
236  * Node for IP6 iOAM header cache
237  */
238 /* *INDENT-OFF* */
240 {
241  .function = ip6_ioam_cache_ts_node_fn,
242  .name = "ip6-ioam-tunnel-select",
243  .vector_size = sizeof (u32),
244  .format_trace = format_cache_ts_trace,
246  .n_errors = ARRAY_LEN (cache_ts_error_strings),
247  .error_strings = cache_ts_error_strings,
248  .n_next_nodes = IOAM_CACHE_TS_N_NEXT,
249  /* edit / add dispositions here */
250  .next_nodes =
251  {
252  [IOAM_CACHE_TS_NEXT_POP_HBYH] = "ip6-pop-hop-by-hop",
253  [IOAM_CACHE_TS_ERROR_NEXT_DROP] = "error-drop",
254  },
255 };
256 /* *INDENT-ON* */
257 
258 typedef struct
259 {
262 
263 /* packet trace format function */
264 static u8 *
265 format_ip6_reset_ts_hbh_trace (u8 * s, va_list * args)
266 {
267  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
268  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
269  ip6_reset_ts_hbh_trace_t *t = va_arg (*args,
271 
272  s =
273  format (s, "IP6_IOAM_RESET_TUNNEL_SELECT_HBH: next index %d",
274  t->next_index);
275  return s;
276 }
277 
278 #define foreach_ip6_reset_ts_hbh_error \
279 _(PROCESSED, "iOAM Syn/Ack Pkts processed") \
280 _(SAVED, "iOAM Syn Pkts state saved") \
281 _(REMOVED, "iOAM Syn/Ack Pkts state removed")
282 
283 typedef enum
284 {
285 #define _(sym,str) IP6_RESET_TS_HBH_ERROR_##sym,
287 #undef _
290 
292 #define _(sym,string) string,
294 #undef _
295 };
296 
297 #define foreach_ip6_ioam_cache_ts_input_next \
298  _(IP6_LOOKUP, "ip6-lookup") \
299  _(DROP, "error-drop")
300 
301 typedef enum
302 {
303 #define _(s,n) IP6_IOAM_CACHE_TS_INPUT_NEXT_##s,
305 #undef _
308 
309 
313 {
315  u32 n_left_from, *from, *to_next;
316  ip_lookup_next_t next_index;
317  u32 processed = 0, cache_ts_added = 0;
318  u64 now;
319  u8 *rewrite = cm->rewrite;
320  u32 rewrite_length = vec_len (rewrite);
321  ioam_e2e_cache_option_t *e2e = 0;
322  u8 no_of_responses = cm->wait_for_responses;
323 
324  from = vlib_frame_vector_args (frame);
325  n_left_from = frame->n_vectors;
326  next_index = node->cached_next_index;
327 
328  while (n_left_from > 0)
329  {
330  u32 n_left_to_next;
331 
332  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
333 
334  now = vlib_time_now (vm);
335  while (n_left_from >= 4 && n_left_to_next >= 2)
336  {
337  u32 bi0, bi1;
338  vlib_buffer_t *b0, *b1;
339  u32 next0, next1;
340  ip6_header_t *ip0, *ip1;
341  tcp_header_t *tcp0, *tcp1;
342  u32 tcp_offset0, tcp_offset1;
343  ip6_hop_by_hop_header_t *hbh0, *hbh1;
344  u64 *copy_src0, *copy_dst0, *copy_src1, *copy_dst1;
345  u16 new_l0, new_l1;
346  u32 pool_index0 = 0, pool_index1 = 0;
347 
348  next0 = next1 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
349  /* Prefetch next iteration. */
350  {
351  vlib_buffer_t *p2, *p3;
352 
353  p2 = vlib_get_buffer (vm, from[2]);
354  p3 = vlib_get_buffer (vm, from[3]);
355 
356  vlib_prefetch_buffer_header (p2, LOAD);
357  vlib_prefetch_buffer_header (p3, LOAD);
358  CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
359  CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
360  }
361 
362 
363  /* speculatively enqueue b0 to the current next frame */
364  to_next[0] = bi0 = from[0];
365  to_next[1] = bi1 = from[1];
366  from += 2;
367  to_next += 2;
368  n_left_from -= 2;
369  n_left_to_next -= 2;
370 
371  b0 = vlib_get_buffer (vm, bi0);
372  b1 = vlib_get_buffer (vm, bi1);
373 
374  ip0 = vlib_buffer_get_current (b0);
375  ip1 = vlib_buffer_get_current (b1);
376 
377  if (IP_PROTOCOL_TCP !=
378  ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
379  {
380  goto NEXT00;
381  }
382  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
383  if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
384  (tcp0->flags & TCP_FLAG_ACK) == 0)
385  {
386  if (no_of_responses > 0)
387  {
388  /* Create TS select entry */
389  if (0 == ioam_cache_ts_add (ip0,
390  clib_net_to_host_u16
391  (tcp0->src_port),
392  clib_net_to_host_u16
393  (tcp0->dst_port),
394  clib_net_to_host_u32
395  (tcp0->seq_number) + 1,
396  no_of_responses, now,
397  vm->thread_index, &pool_index0))
398  {
399  cache_ts_added++;
400  }
401  }
402  copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
403  copy_src0 = (u64 *) ip0;
404 
405  copy_dst0[0] = copy_src0[0];
406  copy_dst0[1] = copy_src0[1];
407  copy_dst0[2] = copy_src0[2];
408  copy_dst0[3] = copy_src0[3];
409  copy_dst0[4] = copy_src0[4];
410 
411  vlib_buffer_advance (b0, -(word) rewrite_length);
412  ip0 = vlib_buffer_get_current (b0);
413 
414  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
415  /* $$$ tune, rewrite_length is a multiple of 8 */
416  clib_memcpy_fast (hbh0, rewrite, rewrite_length);
417  e2e =
418  (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
420  e2e->pool_id = (u8) vm->thread_index;
421  e2e->pool_index = pool_index0;
422  ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
423  ((u8 *) e2e +
424  sizeof (ioam_e2e_cache_option_t)),
425  &cm->sr_localsid_ts);
426  /* Patch the protocol chain, insert the h-b-h (type 0) header */
427  hbh0->protocol = ip0->protocol;
428  ip0->protocol = 0;
429  new_l0 =
430  clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
431  ip0->payload_length = clib_host_to_net_u16 (new_l0);
432  processed++;
433  }
434 
435  NEXT00:
436  if (IP_PROTOCOL_TCP !=
437  ip6_locate_header (b1, ip1, IP_PROTOCOL_TCP, &tcp_offset1))
438  {
439  goto TRACE00;
440  }
441  tcp1 = (tcp_header_t *) ((u8 *) ip1 + tcp_offset1);
442  if ((tcp1->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
443  (tcp1->flags & TCP_FLAG_ACK) == 0)
444  {
445  if (no_of_responses > 0)
446  {
447  /* Create TS select entry */
448  if (0 == ioam_cache_ts_add (ip1,
449  clib_net_to_host_u16
450  (tcp1->src_port),
451  clib_net_to_host_u16
452  (tcp1->dst_port),
453  clib_net_to_host_u32
454  (tcp1->seq_number) + 1,
455  no_of_responses, now,
456  vm->thread_index, &pool_index1))
457  {
458  cache_ts_added++;
459  }
460  }
461 
462  copy_dst1 = (u64 *) (((u8 *) ip1) - rewrite_length);
463  copy_src1 = (u64 *) ip1;
464 
465  copy_dst1[0] = copy_src1[0];
466  copy_dst1[1] = copy_src1[1];
467  copy_dst1[2] = copy_src1[2];
468  copy_dst1[3] = copy_src1[3];
469  copy_dst1[4] = copy_src1[4];
470 
471  vlib_buffer_advance (b1, -(word) rewrite_length);
472  ip1 = vlib_buffer_get_current (b1);
473 
474  hbh1 = (ip6_hop_by_hop_header_t *) (ip1 + 1);
475  /* $$$ tune, rewrite_length is a multiple of 8 */
476  clib_memcpy_fast (hbh1, rewrite, rewrite_length);
477  e2e =
478  (ioam_e2e_cache_option_t *) ((u8 *) hbh1 +
480  e2e->pool_id = (u8) vm->thread_index;
481  e2e->pool_index = pool_index1;
482  ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
483  ((u8 *) e2e +
484  sizeof (ioam_e2e_cache_option_t)),
485  &cm->sr_localsid_ts);
486  /* Patch the protocol chain, insert the h-b-h (type 0) header */
487  hbh1->protocol = ip1->protocol;
488  ip1->protocol = 0;
489  new_l1 =
490  clib_net_to_host_u16 (ip1->payload_length) + rewrite_length;
491  ip1->payload_length = clib_host_to_net_u16 (new_l1);
492  processed++;
493  }
494 
495  TRACE00:
496  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
497  {
498  if (b0->flags & VLIB_BUFFER_IS_TRACED)
499  {
501  vlib_add_trace (vm, node, b0, sizeof (*t));
502  t->next_index = next0;
503  }
504  if (b1->flags & VLIB_BUFFER_IS_TRACED)
505  {
507  vlib_add_trace (vm, node, b1, sizeof (*t));
508  t->next_index = next1;
509  }
510 
511  }
512 
513  /* verify speculative enqueue, maybe switch current next frame */
514  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
515  to_next, n_left_to_next,
516  bi0, bi1, next0, next1);
517  }
518  while (n_left_from > 0 && n_left_to_next > 0)
519  {
520  u32 bi0;
521  vlib_buffer_t *b0;
522  u32 next0;
523  ip6_header_t *ip0;
524  tcp_header_t *tcp0;
525  u32 tcp_offset0;
527  u64 *copy_src0, *copy_dst0;
528  u16 new_l0;
529  u32 pool_index0 = 0;
530 
531  next0 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
532  /* speculatively enqueue b0 to the current next frame */
533  bi0 = from[0];
534  to_next[0] = bi0;
535  from += 1;
536  to_next += 1;
537  n_left_from -= 1;
538  n_left_to_next -= 1;
539 
540  b0 = vlib_get_buffer (vm, bi0);
541 
542  ip0 = vlib_buffer_get_current (b0);
543  if (IP_PROTOCOL_TCP !=
544  ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
545  {
546  goto TRACE0;
547  }
548  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
549  if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
550  (tcp0->flags & TCP_FLAG_ACK) == 0)
551  {
552  if (no_of_responses > 0)
553  {
554  /* Create TS select entry */
555  if (0 == ioam_cache_ts_add (ip0,
556  clib_net_to_host_u16
557  (tcp0->src_port),
558  clib_net_to_host_u16
559  (tcp0->dst_port),
560  clib_net_to_host_u32
561  (tcp0->seq_number) + 1,
562  no_of_responses, now,
563  vm->thread_index, &pool_index0))
564  {
565  cache_ts_added++;
566  }
567  }
568  copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
569  copy_src0 = (u64 *) ip0;
570 
571  copy_dst0[0] = copy_src0[0];
572  copy_dst0[1] = copy_src0[1];
573  copy_dst0[2] = copy_src0[2];
574  copy_dst0[3] = copy_src0[3];
575  copy_dst0[4] = copy_src0[4];
576 
577  vlib_buffer_advance (b0, -(word) rewrite_length);
578  ip0 = vlib_buffer_get_current (b0);
579 
580  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
581  /* $$$ tune, rewrite_length is a multiple of 8 */
582  clib_memcpy_fast (hbh0, rewrite, rewrite_length);
583  e2e =
584  (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
586  e2e->pool_id = (u8) vm->thread_index;
587  e2e->pool_index = pool_index0;
588  ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
589  ((u8 *) e2e +
590  sizeof (ioam_e2e_cache_option_t)),
591  &cm->sr_localsid_ts);
592  /* Patch the protocol chain, insert the h-b-h (type 0) header */
593  hbh0->protocol = ip0->protocol;
594  ip0->protocol = 0;
595  new_l0 =
596  clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
597  ip0->payload_length = clib_host_to_net_u16 (new_l0);
598  processed++;
599  }
600  TRACE0:
601  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
602  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
603  {
605  vlib_add_trace (vm, node, b0, sizeof (*t));
606  t->next_index = next0;
607  }
608 
609  /* verify speculative enqueue, maybe switch current next frame */
610  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
611  to_next, n_left_to_next,
612  bi0, next0);
613  }
614 
615  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
616  }
617 
619  IP6_RESET_TS_HBH_ERROR_PROCESSED, processed);
621  IP6_RESET_TS_HBH_ERROR_SAVED, cache_ts_added);
622 
623  return frame->n_vectors;
624 }
625 
626 /* *INDENT-OFF* */
628 {
629  .name = "ip6-add-syn-hop-by-hop",
630  .vector_size = sizeof (u32),
631  .format_trace = format_ip6_reset_ts_hbh_trace,
634  .error_strings = ip6_reset_ts_hbh_error_strings,
635  /* See ip/lookup.h */
636  .n_next_nodes = IP6_IOAM_CACHE_TS_INPUT_N_NEXT,
637  .next_nodes =
638  {
639 #define _(s,n) [IP6_IOAM_CACHE_TS_INPUT_NEXT_##s] = n,
641 #undef _
642  },
643 };
644 
645 /* *INDENT-ON* */
646 
647 #ifndef CLIB_MARCH_VARIANT
649 #endif /* CLIB_MARCH_VARIANT */
650 
651 typedef struct
652 {
655 
656 /* packet trace format function */
657 static u8 *
659 {
660  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
661  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
663  va_arg (*args, ioam_cache_ts_timer_tick_trace_t *);
664 
665  s = format (s, "IOAM_CACHE_TS_TIMER_TICK: thread index %d",
666  t->thread_index);
667  return s;
668 }
669 
670 #define foreach_ioam_cache_ts_timer_tick_error \
671  _(TIMER, "Timer events")
672 
673 typedef enum
674 {
675 #define _(sym,str) IOAM_CACHE_TS_TIMER_TICK_ERROR_##sym,
677 #undef _
680 
682 #define _(sym,string) string,
684 #undef _
685 };
686 
687 #ifndef CLIB_MARCH_VARIANT
688 void
690 {
692  enable ==
693  0 ? VLIB_NODE_STATE_DISABLED :
694  VLIB_NODE_STATE_POLLING);
695 }
696 
697 void
699 {
701  int i;
702  u32 pool_index;
703  u32 thread_index = vlib_get_thread_index ();
704  u32 count = 0;
705 
706  for (i = 0; i < vec_len (expired_timers); i++)
707  {
708  /* Get pool index and pool id */
709  pool_index = expired_timers[i] & 0x0FFFFFFF;
710 
711  /* Handle expiration */
712  ioam_cache_ts_send (thread_index, pool_index);
713  count++;
714  }
717  IOAM_CACHE_TS_TIMER_TICK_ERROR_TIMER, count);
718 }
719 #endif /* CLIB_MARCH_VARIANT */
720 
721 static uword
724  vlib_frame_t * f)
725 {
727  u32 my_thread_index = vlib_get_thread_index ();
728  struct timespec ts, tsrem;
729 
730  tw_timer_expire_timers_16t_2w_512sl (&cm->timer_wheels[my_thread_index],
731  vlib_time_now (vm));
732  ts.tv_sec = 0;
733  ts.tv_nsec = 1000 * 1000 * IOAM_CACHE_TS_TICK;
734  while (nanosleep (&ts, &tsrem) < 0)
735  {
736  ts = tsrem;
737  }
738 
739  return 0;
740 }
741 /* *INDENT-OFF* */
744  .name = "ioam-cache-ts-timer-tick",
746  .type = VLIB_NODE_TYPE_INPUT,
747 
750 
751  .n_next_nodes = 1,
752 
753  .state = VLIB_NODE_STATE_DISABLED,
754 
755  /* edit / add dispositions here */
756  .next_nodes = {
757  [0] = "error-drop",
758  },
759 };
760 /* *INDENT-ON* */
761 
762 /*
763  * fd.io coding-style-patch-verification: ON
764  *
765  * Local Variables:
766  * eval: (c-set-style "gnu")
767  * End:
768  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u8 count
Definition: dhcp.api:208
u8 rewrite_pool_index_offset
Definition: ioam_cache.h:179
ip6_address_t sr_localsid_ts
Definition: ioam_cache.h:198
#define CLIB_UNUSED(x)
Definition: clib.h:86
#define TCP_FLAG_SYN
Definition: fa_node.h:13
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:291
static int ip6_ioam_analyse_compare_path_delay(ip6_hop_by_hop_header_t *hbh0, ip6_hop_by_hop_header_t *hbh1, bool oneway)
u32 thread_index
Definition: main.h:218
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:202
static int ioam_cache_ts_update(u32 thread_id, i32 pool_index, u32 buffer_index, ip6_hop_by_hop_header_t *hbh)
Definition: ioam_cache.h:715
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
ip_lookup_next_t
An adjacency is a representation of an attached L3 peer.
Definition: adj.h:50
static char * ip6_reset_ts_hbh_error_strings[]
i64 word
Definition: types.h:111
#define foreach_ip6_ioam_cache_ts_input_next
vlib_node_registration_t ioam_cache_ts_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_ts_node)
#define TCP_FLAG_ACK
Definition: fa_node.h:16
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static int ioam_cache_ts_add(ip6_header_t *ip0, u16 src_port, u16 dst_port, u32 seq_no, u8 max_responses, u64 now, u32 thread_id, u32 *pool_index)
Definition: ioam_cache.h:643
#define foreach_ip6_reset_ts_hbh_error
u32 ip6_reset_ts_hbh_node_index
Definition: ioam_cache.h:210
unsigned int u32
Definition: types.h:88
tw_timer_wheel_16t_2w_512sl_t * timer_wheels
per thread single-wheel
Definition: ioam_cache.h:190
static void ioam_cache_ts_check_and_send(u32 thread_id, i32 pool_index)
Definition: ioam_cache.h:697
#define foreach_ioam_cache_ts_timer_tick_error
static void ioam_e2e_id_rewrite_handler(ioam_e2e_id_option_t *e2e_option, ip6_address_t *address)
Definition: ioam_cache.h:287
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vnet_crypto_main_t * cm
Definition: quic_crypto.c:53
vlib_node_registration_t ip6_reset_ts_hbh_node
(constructor) VLIB_REGISTER_NODE (ip6_reset_ts_hbh_node)
vlib_node_registration_t ioam_cache_ts_timer_tick_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_ts_timer_tick_node)
unsigned short u16
Definition: types.h:57
void ioam_cache_ts_timer_node_enable(vlib_main_t *vm, u8 enable)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
void expired_cache_ts_timer_callback(u32 *expired_timers)
#define PREDICT_FALSE(x)
Definition: clib.h:118
static int ioam_cache_ts_lookup(ip6_header_t *ip0, u8 protocol, u16 src_port, u16 dst_port, u32 seq_no, ip6_hop_by_hop_header_t **hbh, u32 *pool_index, u8 *thread_id, u8 response_seen)
Definition: ioam_cache.h:754
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_main_t * vm
Definition: in2out_ed.c:1599
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define TCP_FLAG_RST
Definition: fa_node.h:14
static u8 * format_ioam_cache_ts_timer_tick_trace(u8 *s, va_list *args)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:399
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:218
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
u8 data[]
Packet data.
Definition: buffer.h:181
static u8 * format_cache_ts_trace(u8 *s, va_list *args)
#define ARRAY_LEN(x)
Definition: clib.h:66
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:517
static uword ioam_cache_ts_timer_tick_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:148
static void ioam_cache_ts_send(u32 thread_id, i32 pool_index)
Definition: ioam_cache.h:683
static int ip6_locate_header(vlib_buffer_t *p0, ip6_header_t *ip0, int find_hdr_type, u32 *offset)
Definition: ip6.h:465
#define foreach_cache_ts_error
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
struct _vlib_node_registration vlib_node_registration_t
static uword ip6_ioam_cache_ts_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:297
u16 payload_length
Definition: ip6_packet.h:301
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t * vlib_main
Definition: ioam_cache.h:201
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static char * cache_ts_error_strings[]
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
ioam_cache_main_t ioam_cache_main
Definition: ioam_cache.c:38
static u8 * format_ip6_reset_ts_hbh_trace(u8 *s, va_list *args)
Segment Routing data structures definitions.
static char * ioam_cache_ts_timer_tick_error_strings[]
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
u16 flags
Copy of main node flags.
Definition: node.h:511
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:304
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define IOAM_CACHE_TS_TICK
Definition: ioam_cache.h:511