FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
ioam_pop.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
22 
23 /* Statistics (not really errors) */
24 #define foreach_vxlan_gpe_pop_ioam_v4_error \
25 _(POPPED, "good packets popped")
26 
28 #define _(sym,string) string,
30 #undef _
31 };
32 
33 typedef enum
34 {
35 #define _(sym,str) VXLAN_GPE_POP_IOAM_V4_ERROR_##sym,
37 #undef _
40 
41 typedef struct
42 {
45 
46 
47 u8 *
49 {
50  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53  = va_arg (*args, vxlan_gpe_pop_ioam_v4_trace_t *);
54  ioam_trace_t *t = &(t1->fmt_trace);
55  vxlan_gpe_ioam_option_t *fmt_trace0;
56  vxlan_gpe_ioam_option_t *opt0, *limit0;
58 
59  u8 type0;
60 
61  fmt_trace0 = (vxlan_gpe_ioam_option_t *) t->option_data;
62 
63  s = format (s, "VXLAN_GPE_IOAM_POP: next_index %d len %d traced %d",
64  t->next_index, fmt_trace0->length, t->trace_len);
65 
66  opt0 = (vxlan_gpe_ioam_option_t *) (fmt_trace0 + 1);
67  limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) fmt_trace0) + t->trace_len;
68 
69  while (opt0 < limit0)
70  {
71  type0 = opt0->type;
72  switch (type0)
73  {
74  case 0: /* Pad, just stop */
75  opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
76  break;
77 
78  default:
79  if (hm->trace[type0])
80  {
81  s = (*hm->trace[type0]) (s, opt0);
82  }
83  else
84  {
85  s =
86  format (s, "\n unrecognized option %d length %d", type0,
87  opt0->length);
88  }
89  opt0 =
90  (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
91  sizeof (vxlan_gpe_ioam_option_t));
92  break;
93  }
94  }
95 
96  return s;
97 }
98 
99 always_inline void
101  vlib_buffer_t * b0)
102 {
103  ip4_header_t *ip0;
104  udp_header_t *udp_hdr0;
105  vxlan_gpe_header_t *gpe_hdr0;
106  vxlan_gpe_ioam_hdr_t *gpe_ioam0;
107 
108  ip0 = vlib_buffer_get_current (b0);
109 
110  udp_hdr0 = (udp_header_t *) (ip0 + 1);
111  gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
112  gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
113 
114  /* Pop the iOAM data */
116  (word) (sizeof (udp_header_t) +
117  sizeof (ip4_header_t) +
118  sizeof (vxlan_gpe_header_t) +
119  gpe_ioam0->length));
120 
121  return;
122 }
123 
124 
125 
126 always_inline void
128  vlib_node_runtime_t * node,
129  vxlan_gpe_main_t * ngm,
130  vlib_buffer_t * b0, u32 * next0)
131 {
132  CLIB_UNUSED (ip4_header_t * ip0);
133  CLIB_UNUSED (udp_header_t * udp_hdr0);
134  CLIB_UNUSED (vxlan_gpe_header_t * gpe_hdr0);
135  CLIB_UNUSED (vxlan_gpe_ioam_hdr_t * gpe_ioam0);
139 
140 
141  /* Pop the iOAM header */
142  ip0 = vlib_buffer_get_current (b0);
143  udp_hdr0 = (udp_header_t *) (ip0 + 1);
144  gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
145  gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
146  opt0 = (vxlan_gpe_ioam_option_t *) (gpe_ioam0 + 1);
147  limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) gpe_ioam0 + gpe_ioam0->length);
148 
149  /*
150  * Basic validity checks
151  */
152  if (gpe_ioam0->length > clib_net_to_host_u16 (ip0->length))
153  {
154  *next0 = VXLAN_GPE_INPUT_NEXT_DROP;
155  goto trace00;
156  }
157 
158  /* Scan the set of h-b-h options, process ones that we understand */
159  while (opt0 < limit0)
160  {
161  u8 type0;
162  type0 = opt0->type;
163  switch (type0)
164  {
165  case 0: /* Pad1 */
166  opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
167  continue;
168  case 1: /* PadN */
169  break;
170  default:
171  if (hm->pop_options[type0])
172  {
173  if ((*hm->pop_options[type0]) (ip0, opt0) < 0)
174  {
175  *next0 = VXLAN_GPE_INPUT_NEXT_DROP;
176  goto trace00;
177  }
178  }
179  break;
180  }
181  opt0 =
182  (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
183  sizeof (vxlan_gpe_ioam_hdr_t));
184  }
185 
186 
187  *next0 =
188  (gpe_ioam0->protocol < VXLAN_GPE_PROTOCOL_MAX) ?
189  ngm->
190  decap_next_node_list[gpe_ioam0->protocol] : VXLAN_GPE_INPUT_NEXT_DROP;
191 
192 trace00:
193  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
194  {
196  vlib_add_trace (vm, node, b0, sizeof (*t));
197  u32 trace_len = gpe_ioam0->length;
198  t->fmt_trace.next_index = *next0;
199  /* Capture the h-b-h option verbatim */
200  trace_len =
201  trace_len <
202  ARRAY_LEN (t->fmt_trace.
203  option_data) ? trace_len : ARRAY_LEN (t->fmt_trace.
204  option_data);
205  t->fmt_trace.trace_len = trace_len;
206  clib_memcpy_fast (&(t->fmt_trace.option_data), gpe_ioam0, trace_len);
207  }
208 
209  /* Remove the iOAM header inside the VxLAN-GPE header */
210  vxlan_gpe_ioam_pop_v4 (vm, node, b0);
211  return;
212 }
213 
214 always_inline void
216  vlib_node_runtime_t * node,
217  vxlan_gpe_main_t * ngm,
218  vlib_buffer_t * b0, vlib_buffer_t * b1,
219  u32 * next0, u32 * next1)
220 {
221 
222  vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b0, next0);
223  vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b1, next1);
224 }
225 
226 
227 
228 static uword
230  vlib_node_runtime_t * node,
231  vlib_frame_t * from_frame, u8 is_ipv6)
232 {
233  u32 n_left_from, next_index, *from, *to_next;
235 
236  from = vlib_frame_vector_args (from_frame);
237  n_left_from = from_frame->n_vectors;
238 
239  next_index = node->cached_next_index;
240 
241  while (n_left_from > 0)
242  {
243  u32 n_left_to_next;
244 
245  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
246 
247  while (n_left_from >= 4 && n_left_to_next >= 2)
248  {
249  u32 bi0, bi1;
250  vlib_buffer_t *b0, *b1;
251  u32 next0, next1;
252 
253  /* Prefetch next iteration. */
254  {
255  vlib_buffer_t *p2, *p3;
256 
257  p2 = vlib_get_buffer (vm, from[2]);
258  p3 = vlib_get_buffer (vm, from[3]);
259 
260  vlib_prefetch_buffer_header (p2, LOAD);
261  vlib_prefetch_buffer_header (p3, LOAD);
262 
263  CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
264  CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
265  }
266 
267  bi0 = from[0];
268  bi1 = from[1];
269  to_next[0] = bi0;
270  to_next[1] = bi1;
271  from += 2;
272  to_next += 2;
273  n_left_to_next -= 2;
274  n_left_from -= 2;
275 
276  b0 = vlib_get_buffer (vm, bi0);
277  b1 = vlib_get_buffer (vm, bi1);
278 
279  vxlan_gpe_pop_ioam_v4_two_inline (vm, node, ngm, b0, b1, &next0,
280  &next1);
281 
282 
283  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
284  n_left_to_next, bi0, bi1, next0,
285  next1);
286  }
287 
288  while (n_left_from > 0 && n_left_to_next > 0)
289  {
290  u32 bi0;
291  vlib_buffer_t *b0;
292  u32 next0;
293 
294  bi0 = from[0];
295  to_next[0] = bi0;
296  from += 1;
297  to_next += 1;
298  n_left_from -= 1;
299  n_left_to_next -= 1;
300 
301  b0 = vlib_get_buffer (vm, bi0);
302 
303  vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b0, &next0);
304 
305 
306  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
307  n_left_to_next, bi0, next0);
308  }
309 
310  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
311  }
312 
313  return from_frame->n_vectors;
314 }
315 
316 
317 static uword
319  vlib_node_runtime_t * node, vlib_frame_t * from_frame)
320 {
321  return vxlan_gpe_pop_ioam (vm, node, from_frame, 0);
322 }
323 
324 /* *INDENT-OFF* */
326  .function = vxlan_gpe_pop_ioam_v4,
327  .name = "vxlan-gpe-pop-ioam-v4",
328  .vector_size = sizeof (u32),
329  .format_trace = format_vxlan_gpe_pop_ioam_v4_trace,
331 
333  .error_strings = vxlan_gpe_pop_ioam_v4_error_strings,
334 
335  .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
336 
337  .next_nodes = {
338 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
340 #undef _
341  },
342 };
343 /* *INDENT-ON* */
344 
345 
346 
347 /*
348  * fd.io coding-style-patch-verification: ON
349  *
350  * Local Variables:
351  * eval: (c-set-style "gnu")
352  * End:
353  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define CLIB_UNUSED(x)
Definition: clib.h:83
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u8 * format_vxlan_gpe_pop_ioam_v4_trace(u8 *s, va_list *args)
Definition: ioam_pop.c:48
VXLAN GPE definitions.
int(* pop_options[256])(ip4_header_t *ip, vxlan_gpe_ioam_option_t *opt)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
unsigned char u8
Definition: types.h:56
static void vxlan_gpe_ioam_pop_v4(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b0)
Definition: ioam_pop.c:100
u8 protocol
see vxlan_gpe_protocol_t
static void vxlan_gpe_pop_ioam_v4_one_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, u32 *next0)
Definition: ioam_pop.c:127
i64 word
Definition: types.h:111
#define always_inline
Definition: clib.h:99
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main
unsigned int u32
Definition: types.h:88
u8 option_data[256]
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define foreach_vxlan_gpe_pop_ioam_v4_error
Definition: ioam_pop.c:24
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
vxlan_gpe_main_t vxlan_gpe_main
Definition: vxlan_gpe.c:46
#define PREDICT_FALSE(x)
Definition: clib.h:112
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:397
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:323
u8 data[]
Packet data.
Definition: buffer.h:181
#define ARRAY_LEN(x)
Definition: clib.h:63
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
#define foreach_vxlan_gpe_input_next
next nodes for VXLAN GPE input
Definition: vxlan_gpe.h:171
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
Struct for VXLAN GPE node state.
Definition: vxlan_gpe.h:196
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static uword vxlan_gpe_pop_ioam_v4(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: ioam_pop.c:318
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
vlib_node_registration_t vxlan_gpe_pop_ioam_v4_node
(constructor) VLIB_REGISTER_NODE (vxlan_gpe_pop_ioam_v4_node)
Definition: ioam_pop.c:325
static uword vxlan_gpe_pop_ioam(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u8 is_ipv6)
Definition: ioam_pop.c:229
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
VXLAN GPE Extension (iOAM) Header definition.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
VXLAN GPE Header definition.
static char * vxlan_gpe_pop_ioam_v4_error_strings[]
Definition: ioam_pop.c:27
vxlan_gpe_pop_ioam_v4_error_t
Definition: ioam_pop.c:33
static void vxlan_gpe_pop_ioam_v4_two_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, vlib_buffer_t *b1, u32 *next0, u32 *next1)
Definition: ioam_pop.c:215
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u8 *(* trace[256])(u8 *s, vxlan_gpe_ioam_option_t *opt)