FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
ioam_cache_tunnel_select_node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * ioam_cache_tunnel_select_node.c
17  * This file implements anycast server selection using ioam data
18  * attached to anycast service selection.
19  * Anycast service is reachable via multiple servers reachable
20  * over SR tunnels.
21  * Works with TCP Anycast application.
22  * Cache entry is created when TCP SYN is received for anycast destination.
23  * Response TCP SYN ACKs for anycast service is compared and selected
24  * response is forwarded.
25  * The functionality is introduced via graph nodes that are hooked into
26  * vnet graph via classifier configs like below:
27  *
28  * Enable anycast service selection:
29  * set ioam ip6 sr-tunnel-select oneway
30  *
31  * Enable following classifier on the anycast service client facing interface
32  * e.g. anycast service is db06::06 then:
33  * classify session acl-hit-next ip6-node ip6-add-syn-hop-by-hop table-index 0 match l3
34  * ip6 dst db06::06 ioam-encap anycast
35  *
36  * Enable following classifier on the interfaces facing the server of anycast service:
37  * classify session acl-hit-next ip6-node ip6-lookup table-index 0 match l3
38  * ip6 src db06::06 ioam-decap anycast
39  *
40  */
41 #include <vlib/vlib.h>
42 #include <vnet/vnet.h>
43 #include <vnet/pg/pg.h>
44 #include <vppinfra/error.h>
45 #include <vnet/ip/ip.h>
46 #include <vnet/srv6/sr.h>
47 #include <ioam/ip6/ioam_cache.h>
48 #include <vnet/ip/ip6_hop_by_hop.h>
50 
51 typedef struct
52 {
56 
57 /* packet trace format function */
58 static u8 *
59 format_cache_ts_trace (u8 * s, va_list * args)
60 {
61  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63  cache_ts_trace_t *t = va_arg (*args, cache_ts_trace_t *);
64 
65  s = format (s, "CACHE: flow_label %d, next index %d",
66  t->flow_label, t->next_index);
67  return s;
68 }
69 
70 #define foreach_cache_ts_error \
71 _(RECORDED, "ip6 iOAM headers cached")
72 
73 typedef enum
74 {
75 #define _(sym,str) CACHE_TS_ERROR_##sym,
77 #undef _
80 
81 static char *cache_ts_error_strings[] = {
82 #define _(sym,string) string,
84 #undef _
85 };
86 
87 typedef enum
88 {
93 
94 static uword
96  vlib_node_runtime_t * node, vlib_frame_t * frame)
97 {
99  u32 n_left_from, *from, *to_next;
100  cache_ts_next_t next_index;
101  u32 recorded = 0;
102 
103  from = vlib_frame_vector_args (frame);
104  n_left_from = frame->n_vectors;
105  next_index = node->cached_next_index;
106 
107  while (n_left_from > 0)
108  {
109  u32 n_left_to_next;
110 
111  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
112  // TODO: dual loop
113  while (n_left_from > 0 && n_left_to_next > 0)
114  {
115  u32 bi0;
116  vlib_buffer_t *p0;
118  ip6_header_t *ip0;
119  ip6_hop_by_hop_header_t *hbh0, *hbh_cmp;
120  tcp_header_t *tcp0;
121  u32 tcp_offset0;
122  u32 cache_ts_index = 0;
123  u8 cache_thread_id = 0;
124  int result = 0;
125  int skip = 0;
126 
127  bi0 = from[0];
128  from += 1;
129  n_left_from -= 1;
130 
131  p0 = vlib_get_buffer (vm, bi0);
132  ip0 = vlib_buffer_get_current (p0);
133  if (IP_PROTOCOL_TCP ==
134  ip6_locate_header (p0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
135  {
136  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
137  if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
138  (tcp0->flags & TCP_FLAG_ACK) == TCP_FLAG_ACK)
139  {
140  /* Look up and compare */
141  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
142 
143  if (0 == ioam_cache_ts_lookup (ip0,
144  hbh0->protocol,
145  clib_net_to_host_u16
146  (tcp0->src_port),
147  clib_net_to_host_u16
148  (tcp0->dst_port),
149  clib_net_to_host_u32
150  (tcp0->ack_number), &hbh_cmp,
151  &cache_ts_index,
152  &cache_thread_id, 1))
153  {
154  /* response seen */
155  result = -1;
156  if (hbh_cmp)
157  result =
159  cm->criteria_oneway);
160  if (result >= 0)
161  {
162  /* current syn/ack is worse than the earlier: Drop */
164  /* Check if all responses are received or time has exceeded
165  send cached response if yes */
166  ioam_cache_ts_check_and_send (cache_thread_id,
167  cache_ts_index);
168  }
169  else
170  {
171  /* Update cache with this buffer */
172  /* If successfully updated then skip sending it */
173  if (0 ==
174  (result =
175  ioam_cache_ts_update (cache_thread_id,
176  cache_ts_index, bi0,
177  hbh0)))
178  {
179  skip = 1;
180  }
181  else
183  }
184  }
185  else
186  {
188  }
189  }
190  else if ((tcp0->flags & TCP_FLAG_RST) == TCP_FLAG_RST)
191  {
192  /* Look up and compare */
193  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
194  if (0 == ioam_cache_ts_lookup (ip0, hbh0->protocol, clib_net_to_host_u16 (tcp0->src_port), clib_net_to_host_u16 (tcp0->dst_port), clib_net_to_host_u32 (tcp0->ack_number), &hbh_cmp, &cache_ts_index, &cache_thread_id, 1)) //response seen
195  {
197  if (hbh_cmp)
198  ioam_cache_ts_check_and_send (cache_thread_id,
199  cache_ts_index);
200  }
201 
202  }
203  }
204  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
205  {
206  if (p0->flags & VLIB_BUFFER_IS_TRACED)
207  {
208  cache_ts_trace_t *t =
209  vlib_add_trace (vm, node, p0, sizeof (*t));
210  t->flow_label =
211  clib_net_to_host_u32
213  t->next_index = next0;
214  }
215  }
216  /* verify speculative enqueue, maybe switch current next frame */
217  if (!skip)
218  {
219  to_next[0] = bi0;
220  to_next += 1;
221  n_left_to_next -= 1;
222  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
223  to_next, n_left_to_next,
224  bi0, next0);
225  }
226  }
227 
228  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
229  }
231  CACHE_TS_ERROR_RECORDED, recorded);
232  return frame->n_vectors;
233 }
234 
235 /*
236  * Node for IP6 iOAM header cache
237  */
238 /* *INDENT-OFF* */
240 {
241  .function = ip6_ioam_cache_ts_node_fn,
242  .name = "ip6-ioam-tunnel-select",
243  .vector_size = sizeof (u32),
244  .format_trace = format_cache_ts_trace,
245  .type = VLIB_NODE_TYPE_INTERNAL,
246  .n_errors = ARRAY_LEN (cache_ts_error_strings),
247  .error_strings = cache_ts_error_strings,
248  .n_next_nodes = IOAM_CACHE_TS_N_NEXT,
249  /* edit / add dispositions here */
250  .next_nodes =
251  {
252  [IOAM_CACHE_TS_NEXT_POP_HBYH] = "ip6-pop-hop-by-hop",
253  [IOAM_CACHE_TS_ERROR_NEXT_DROP] = "error-drop",
254  },
255 };
256 /* *INDENT-ON* */
257 
258 typedef struct
259 {
262 
263 /* packet trace format function */
264 static u8 *
265 format_ip6_reset_ts_hbh_trace (u8 * s, va_list * args)
266 {
267  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
268  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
269  ip6_reset_ts_hbh_trace_t *t = va_arg (*args,
271 
272  s =
273  format (s, "IP6_IOAM_RESET_TUNNEL_SELECT_HBH: next index %d",
274  t->next_index);
275  return s;
276 }
277 
279 
280 #define foreach_ip6_reset_ts_hbh_error \
281 _(PROCESSED, "iOAM Syn/Ack Pkts processed") \
282 _(SAVED, "iOAM Syn Pkts state saved") \
283 _(REMOVED, "iOAM Syn/Ack Pkts state removed")
284 
285 typedef enum
286 {
287 #define _(sym,str) IP6_RESET_TS_HBH_ERROR_##sym,
289 #undef _
292 
294 #define _(sym,string) string,
296 #undef _
297 };
298 
299 #define foreach_ip6_ioam_cache_ts_input_next \
300  _(IP6_LOOKUP, "ip6-lookup") \
301  _(DROP, "error-drop")
302 
303 typedef enum
304 {
305 #define _(s,n) IP6_IOAM_CACHE_TS_INPUT_NEXT_##s,
307 #undef _
310 
311 
312 static uword
314  vlib_node_runtime_t * node, vlib_frame_t * frame)
315 {
317  u32 n_left_from, *from, *to_next;
318  ip_lookup_next_t next_index;
319  u32 processed = 0, cache_ts_added = 0;
320  u64 now;
321  u8 *rewrite = cm->rewrite;
322  u32 rewrite_length = vec_len (rewrite);
323  ioam_e2e_cache_option_t *e2e = 0;
324  u8 no_of_responses = cm->wait_for_responses;
325 
326  from = vlib_frame_vector_args (frame);
327  n_left_from = frame->n_vectors;
328  next_index = node->cached_next_index;
329 
330  while (n_left_from > 0)
331  {
332  u32 n_left_to_next;
333 
334  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
335 
336  now = vlib_time_now (vm);
337  while (n_left_from >= 4 && n_left_to_next >= 2)
338  {
339  u32 bi0, bi1;
340  vlib_buffer_t *b0, *b1;
341  u32 next0, next1;
342  ip6_header_t *ip0, *ip1;
343  tcp_header_t *tcp0, *tcp1;
344  u32 tcp_offset0, tcp_offset1;
345  ip6_hop_by_hop_header_t *hbh0, *hbh1;
346  u64 *copy_src0, *copy_dst0, *copy_src1, *copy_dst1;
347  u16 new_l0, new_l1;
348  u32 pool_index0 = 0, pool_index1 = 0;
349 
350  next0 = next1 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
351  /* Prefetch next iteration. */
352  {
353  vlib_buffer_t *p2, *p3;
354 
355  p2 = vlib_get_buffer (vm, from[2]);
356  p3 = vlib_get_buffer (vm, from[3]);
357 
358  vlib_prefetch_buffer_header (p2, LOAD);
359  vlib_prefetch_buffer_header (p3, LOAD);
360  CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
361  CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
362  }
363 
364 
365  /* speculatively enqueue b0 to the current next frame */
366  to_next[0] = bi0 = from[0];
367  to_next[1] = bi1 = from[1];
368  from += 2;
369  to_next += 2;
370  n_left_from -= 2;
371  n_left_to_next -= 2;
372 
373  b0 = vlib_get_buffer (vm, bi0);
374  b1 = vlib_get_buffer (vm, bi1);
375 
376  ip0 = vlib_buffer_get_current (b0);
377  ip1 = vlib_buffer_get_current (b1);
378 
379  if (IP_PROTOCOL_TCP !=
380  ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
381  {
382  goto NEXT00;
383  }
384  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
385  if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
386  (tcp0->flags & TCP_FLAG_ACK) == 0)
387  {
388  if (no_of_responses > 0)
389  {
390  /* Create TS select entry */
391  if (0 == ioam_cache_ts_add (ip0,
392  clib_net_to_host_u16
393  (tcp0->src_port),
394  clib_net_to_host_u16
395  (tcp0->dst_port),
396  clib_net_to_host_u32
397  (tcp0->seq_number) + 1,
398  no_of_responses, now,
399  vm->thread_index, &pool_index0))
400  {
401  cache_ts_added++;
402  }
403  }
404  copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
405  copy_src0 = (u64 *) ip0;
406 
407  copy_dst0[0] = copy_src0[0];
408  copy_dst0[1] = copy_src0[1];
409  copy_dst0[2] = copy_src0[2];
410  copy_dst0[3] = copy_src0[3];
411  copy_dst0[4] = copy_src0[4];
412 
413  vlib_buffer_advance (b0, -(word) rewrite_length);
414  ip0 = vlib_buffer_get_current (b0);
415 
416  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
417  /* $$$ tune, rewrite_length is a multiple of 8 */
418  clib_memcpy (hbh0, rewrite, rewrite_length);
419  e2e =
420  (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
422  e2e->pool_id = (u8) vm->thread_index;
423  e2e->pool_index = pool_index0;
424  ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
425  ((u8 *) e2e +
426  sizeof (ioam_e2e_cache_option_t)),
427  &cm->sr_localsid_ts);
428  /* Patch the protocol chain, insert the h-b-h (type 0) header */
429  hbh0->protocol = ip0->protocol;
430  ip0->protocol = 0;
431  new_l0 =
432  clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
433  ip0->payload_length = clib_host_to_net_u16 (new_l0);
434  processed++;
435  }
436 
437  NEXT00:
438  if (IP_PROTOCOL_TCP !=
439  ip6_locate_header (b1, ip1, IP_PROTOCOL_TCP, &tcp_offset1))
440  {
441  goto TRACE00;
442  }
443  tcp1 = (tcp_header_t *) ((u8 *) ip1 + tcp_offset1);
444  if ((tcp1->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
445  (tcp1->flags & TCP_FLAG_ACK) == 0)
446  {
447  if (no_of_responses > 0)
448  {
449  /* Create TS select entry */
450  if (0 == ioam_cache_ts_add (ip1,
451  clib_net_to_host_u16
452  (tcp1->src_port),
453  clib_net_to_host_u16
454  (tcp1->dst_port),
455  clib_net_to_host_u32
456  (tcp1->seq_number) + 1,
457  no_of_responses, now,
458  vm->thread_index, &pool_index1))
459  {
460  cache_ts_added++;
461  }
462  }
463 
464  copy_dst1 = (u64 *) (((u8 *) ip1) - rewrite_length);
465  copy_src1 = (u64 *) ip1;
466 
467  copy_dst1[0] = copy_src1[0];
468  copy_dst1[1] = copy_src1[1];
469  copy_dst1[2] = copy_src1[2];
470  copy_dst1[3] = copy_src1[3];
471  copy_dst1[4] = copy_src1[4];
472 
473  vlib_buffer_advance (b1, -(word) rewrite_length);
474  ip1 = vlib_buffer_get_current (b1);
475 
476  hbh1 = (ip6_hop_by_hop_header_t *) (ip1 + 1);
477  /* $$$ tune, rewrite_length is a multiple of 8 */
478  clib_memcpy (hbh1, rewrite, rewrite_length);
479  e2e =
480  (ioam_e2e_cache_option_t *) ((u8 *) hbh1 +
482  e2e->pool_id = (u8) vm->thread_index;
483  e2e->pool_index = pool_index1;
484  ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
485  ((u8 *) e2e +
486  sizeof (ioam_e2e_cache_option_t)),
487  &cm->sr_localsid_ts);
488  /* Patch the protocol chain, insert the h-b-h (type 0) header */
489  hbh1->protocol = ip1->protocol;
490  ip1->protocol = 0;
491  new_l1 =
492  clib_net_to_host_u16 (ip1->payload_length) + rewrite_length;
493  ip1->payload_length = clib_host_to_net_u16 (new_l1);
494  processed++;
495  }
496 
497  TRACE00:
498  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
499  {
500  if (b0->flags & VLIB_BUFFER_IS_TRACED)
501  {
503  vlib_add_trace (vm, node, b0, sizeof (*t));
504  t->next_index = next0;
505  }
506  if (b1->flags & VLIB_BUFFER_IS_TRACED)
507  {
509  vlib_add_trace (vm, node, b1, sizeof (*t));
510  t->next_index = next1;
511  }
512 
513  }
514 
515  /* verify speculative enqueue, maybe switch current next frame */
516  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
517  to_next, n_left_to_next,
518  bi0, bi1, next0, next1);
519  }
520  while (n_left_from > 0 && n_left_to_next > 0)
521  {
522  u32 bi0;
523  vlib_buffer_t *b0;
524  u32 next0;
525  ip6_header_t *ip0;
526  tcp_header_t *tcp0;
527  u32 tcp_offset0;
529  u64 *copy_src0, *copy_dst0;
530  u16 new_l0;
531  u32 pool_index0 = 0;
532 
533  next0 = IP6_IOAM_CACHE_TS_INPUT_NEXT_IP6_LOOKUP;
534  /* speculatively enqueue b0 to the current next frame */
535  bi0 = from[0];
536  to_next[0] = bi0;
537  from += 1;
538  to_next += 1;
539  n_left_from -= 1;
540  n_left_to_next -= 1;
541 
542  b0 = vlib_get_buffer (vm, bi0);
543 
544  ip0 = vlib_buffer_get_current (b0);
545  if (IP_PROTOCOL_TCP !=
546  ip6_locate_header (b0, ip0, IP_PROTOCOL_TCP, &tcp_offset0))
547  {
548  goto TRACE0;
549  }
550  tcp0 = (tcp_header_t *) ((u8 *) ip0 + tcp_offset0);
551  if ((tcp0->flags & TCP_FLAG_SYN) == TCP_FLAG_SYN &&
552  (tcp0->flags & TCP_FLAG_ACK) == 0)
553  {
554  if (no_of_responses > 0)
555  {
556  /* Create TS select entry */
557  if (0 == ioam_cache_ts_add (ip0,
558  clib_net_to_host_u16
559  (tcp0->src_port),
560  clib_net_to_host_u16
561  (tcp0->dst_port),
562  clib_net_to_host_u32
563  (tcp0->seq_number) + 1,
564  no_of_responses, now,
565  vm->thread_index, &pool_index0))
566  {
567  cache_ts_added++;
568  }
569  }
570  copy_dst0 = (u64 *) (((u8 *) ip0) - rewrite_length);
571  copy_src0 = (u64 *) ip0;
572 
573  copy_dst0[0] = copy_src0[0];
574  copy_dst0[1] = copy_src0[1];
575  copy_dst0[2] = copy_src0[2];
576  copy_dst0[3] = copy_src0[3];
577  copy_dst0[4] = copy_src0[4];
578 
579  vlib_buffer_advance (b0, -(word) rewrite_length);
580  ip0 = vlib_buffer_get_current (b0);
581 
582  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
583  /* $$$ tune, rewrite_length is a multiple of 8 */
584  clib_memcpy (hbh0, rewrite, rewrite_length);
585  e2e =
586  (ioam_e2e_cache_option_t *) ((u8 *) hbh0 +
588  e2e->pool_id = (u8) vm->thread_index;
589  e2e->pool_index = pool_index0;
590  ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *)
591  ((u8 *) e2e +
592  sizeof (ioam_e2e_cache_option_t)),
593  &cm->sr_localsid_ts);
594  /* Patch the protocol chain, insert the h-b-h (type 0) header */
595  hbh0->protocol = ip0->protocol;
596  ip0->protocol = 0;
597  new_l0 =
598  clib_net_to_host_u16 (ip0->payload_length) + rewrite_length;
599  ip0->payload_length = clib_host_to_net_u16 (new_l0);
600  processed++;
601  }
602  TRACE0:
604  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
605  {
607  vlib_add_trace (vm, node, b0, sizeof (*t));
608  t->next_index = next0;
609  }
610 
611  /* verify speculative enqueue, maybe switch current next frame */
612  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
613  to_next, n_left_to_next,
614  bi0, next0);
615  }
616 
617  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
618  }
619 
621  IP6_RESET_TS_HBH_ERROR_PROCESSED, processed);
623  IP6_RESET_TS_HBH_ERROR_SAVED, cache_ts_added);
624 
625  return frame->n_vectors;
626 }
627 
628 /* *INDENT-OFF* */
630 {
631  .function = ip6_reset_ts_hbh_node_fn,
632  .name = "ip6-add-syn-hop-by-hop",
633  .vector_size = sizeof (u32),
634  .format_trace = format_ip6_reset_ts_hbh_trace,
635  .type = VLIB_NODE_TYPE_INTERNAL,
637  .error_strings = ip6_reset_ts_hbh_error_strings,
638  /* See ip/lookup.h */
639  .n_next_nodes = IP6_IOAM_CACHE_TS_INPUT_N_NEXT,
640  .next_nodes =
641  {
642 #define _(s,n) [IP6_IOAM_CACHE_TS_INPUT_NEXT_##s] = n,
644 #undef _
645  },
646 };
647 
649 /* *INDENT-ON* */
650 
652 
653 typedef struct
654 {
657 
658 /* packet trace format function */
659 static u8 *
661 {
662  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
663  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
665  va_arg (*args, ioam_cache_ts_timer_tick_trace_t *);
666 
667  s = format (s, "IOAM_CACHE_TS_TIMER_TICK: thread index %d",
668  t->thread_index);
669  return s;
670 }
671 
672 #define foreach_ioam_cache_ts_timer_tick_error \
673  _(TIMER, "Timer events")
674 
675 typedef enum
676 {
677 #define _(sym,str) IOAM_CACHE_TS_TIMER_TICK_ERROR_##sym,
679 #undef _
682 
684 #define _(sym,string) string,
686 #undef _
687 };
688 
689 void
691 {
693  enable ==
694  0 ? VLIB_NODE_STATE_DISABLED :
695  VLIB_NODE_STATE_POLLING);
696 }
697 
698 void
700 {
702  int i;
703  u32 pool_index;
704  u32 thread_index = vlib_get_thread_index ();
705  u32 count = 0;
706 
707  for (i = 0; i < vec_len (expired_timers); i++)
708  {
709  /* Get pool index and pool id */
710  pool_index = expired_timers[i] & 0x0FFFFFFF;
711 
712  /* Handle expiration */
713  ioam_cache_ts_send (thread_index, pool_index);
714  count++;
715  }
718  IOAM_CACHE_TS_TIMER_TICK_ERROR_TIMER, count);
719 }
720 
721 static uword
723  vlib_node_runtime_t * node,
724  vlib_frame_t * f)
725 {
727  u32 my_thread_index = vlib_get_thread_index ();
728  struct timespec ts, tsrem;
729 
730  tw_timer_expire_timers_16t_2w_512sl (&cm->timer_wheels[my_thread_index],
731  vlib_time_now (vm));
732  ts.tv_sec = 0;
733  ts.tv_nsec = 1000 * 1000 * IOAM_CACHE_TS_TICK;
734  while (nanosleep (&ts, &tsrem) < 0)
735  {
736  ts = tsrem;
737  }
738 
739  return 0;
740 }
741 /* *INDENT-OFF* */
744  .name = "ioam-cache-ts-timer-tick",
746  .type = VLIB_NODE_TYPE_INPUT,
747 
750 
751  .n_next_nodes = 1,
752 
753  .state = VLIB_NODE_STATE_DISABLED,
754 
755  /* edit / add dispositions here */
756  .next_nodes = {
757  [0] = "error-drop",
758  },
759 };
760 /* *INDENT-ON* */
761 
762 /*
763  * fd.io coding-style-patch-verification: ON
764  *
765  * Local Variables:
766  * eval: (c-set-style "gnu")
767  * End:
768  */
u8 rewrite_pool_index_offset
Definition: ioam_cache.h:179
ip6_address_t sr_localsid_ts
Definition: ioam_cache.h:198
#define CLIB_UNUSED(x)
Definition: clib.h:79
#define TCP_FLAG_SYN
Definition: fa_node.h:13
unsigned long u64
Definition: types.h:89
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:225
static int ip6_ioam_analyse_compare_path_delay(ip6_hop_by_hop_header_t *hbh0, ip6_hop_by_hop_header_t *hbh1, bool oneway)
u32 thread_index
Definition: main.h:176
int i
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static int ioam_cache_ts_update(u32 thread_id, i32 pool_index, u32 buffer_index, ip6_hop_by_hop_header_t *hbh)
Definition: ioam_cache.h:712
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
ip_lookup_next_t
An adjacency is a representation of an attached L3 peer.
Definition: adj.h:50
static char * ip6_reset_ts_hbh_error_strings[]
i64 word
Definition: types.h:111
#define foreach_ip6_ioam_cache_ts_input_next
vlib_node_registration_t ioam_cache_ts_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_ts_node)
#define TCP_FLAG_ACK
Definition: fa_node.h:16
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:184
static int ioam_cache_ts_add(ip6_header_t *ip0, u16 src_port, u16 dst_port, u32 seq_no, u8 max_responses, u64 now, u32 thread_id, u32 *pool_index)
Definition: ioam_cache.h:640
#define foreach_ip6_reset_ts_hbh_error
unsigned int u32
Definition: types.h:88
tw_timer_wheel_16t_2w_512sl_t * timer_wheels
per thread single-wheel
Definition: ioam_cache.h:190
static void ioam_cache_ts_check_and_send(u32 thread_id, i32 pool_index)
Definition: ioam_cache.h:694
#define foreach_ioam_cache_ts_timer_tick_error
static void ioam_e2e_id_rewrite_handler(ioam_e2e_id_option_t *e2e_option, ip6_address_t *address)
Definition: ioam_cache.h:284
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
Definition: node.h:194
vlib_node_registration_t ip6_reset_ts_hbh_node
(constructor) VLIB_REGISTER_NODE (ip6_reset_ts_hbh_node)
vlib_node_registration_t ioam_cache_ts_timer_tick_node
(constructor) VLIB_REGISTER_NODE (ioam_cache_ts_timer_tick_node)
unsigned short u16
Definition: types.h:57
void ioam_cache_ts_timer_node_enable(vlib_main_t *vm, u8 enable)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:202
void expired_cache_ts_timer_callback(u32 *expired_timers)
#define PREDICT_FALSE(x)
Definition: clib.h:105
static int ioam_cache_ts_lookup(ip6_header_t *ip0, u8 protocol, u16 src_port, u16 dst_port, u32 seq_no, ip6_hop_by_hop_header_t **hbh, u32 *pool_index, u8 *thread_id, u8 response_seen)
Definition: ioam_cache.h:751
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1168
#define TCP_FLAG_RST
Definition: fa_node.h:14
static u8 * format_ioam_cache_ts_timer_tick_trace(u8 *s, va_list *args)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
u16 n_vectors
Definition: node.h:380
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:77
vlib_main_t * vm
Definition: buffer.c:294
#define clib_memcpy(a, b, c)
Definition: string.h:75
static u8 * format_cache_ts_trace(u8 *s, va_list *args)
#define ARRAY_LEN(x)
Definition: clib.h:59
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
static uword ip6_reset_ts_hbh_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:492
static uword ioam_cache_ts_timer_tick_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:215
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:147
size_t count
Definition: vapi.c:42
static void ioam_cache_ts_send(u32 thread_id, i32 pool_index)
Definition: ioam_cache.h:680
static int ip6_locate_header(vlib_buffer_t *p0, ip6_header_t *ip0, int find_hdr_type, u32 *offset)
Definition: ip6.h:497
#define foreach_cache_ts_error
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
struct _vlib_node_registration vlib_node_registration_t
static uword ip6_ioam_cache_ts_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:334
u16 payload_length
Definition: ip6_packet.h:338
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t * vlib_main
Definition: ioam_cache.h:201
u64 uword
Definition: types.h:112
static char * cache_ts_error_strings[]
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
ioam_cache_main_t ioam_cache_main
Definition: ioam_cache.c:58
static u8 * format_ip6_reset_ts_hbh_trace(u8 *s, va_list *args)
Segment Routing data structures definitions.
static char * ioam_cache_ts_timer_tick_error_strings[]
u8 data[0]
Packet data.
Definition: buffer.h:172
u16 flags
Copy of main node flags.
Definition: node.h:486
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:295
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
#define IOAM_CACHE_TS_TICK
Definition: ioam_cache.h:508