FD.io VPP  v16.06
Vector Packet Processing
encap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
21 
22 /* Statistics (not really errors) */
23 #define foreach_vxlan_gpe_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated")
25 
26 static char * vxlan_gpe_encap_error_strings[] = {
27 #define _(sym,string) string,
29 #undef _
30 };
31 
32 typedef enum {
33 #define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
35 #undef _
38 
39 typedef enum {
44 
45 typedef struct {
48 
49 
50 u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
51 {
52  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
53  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
55  = va_arg (*args, vxlan_gpe_encap_trace_t *);
56 
57  s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
58  return s;
59 }
60 
61 #define foreach_fixed_header_offset \
62 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
63 
64 static uword
66  vlib_node_runtime_t * node,
67  vlib_frame_t * from_frame)
68 {
69  u32 n_left_from, next_index, * from, * to_next;
71  vnet_main_t * vnm = ngm->vnet_main;
73  u32 pkts_encapsulated = 0;
74  u16 old_l0 = 0, old_l1 = 0;
75  u32 cpu_index = os_get_cpu_number();
76  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
77 
78  from = vlib_frame_vector_args (from_frame);
79  n_left_from = from_frame->n_vectors;
80 
81  next_index = node->cached_next_index;
82  stats_sw_if_index = node->runtime_data[0];
83  stats_n_packets = stats_n_bytes = 0;
84 
85  while (n_left_from > 0)
86  {
87  u32 n_left_to_next;
88 
89  vlib_get_next_frame (vm, node, next_index,
90  to_next, n_left_to_next);
91 
92  while (n_left_from >= 4 && n_left_to_next >= 2)
93  {
94  u32 bi0, bi1;
95  vlib_buffer_t * b0, * b1;
98  u32 sw_if_index0, sw_if_index1, len0, len1;
99  vnet_hw_interface_t * hi0, * hi1;
100  ip4_header_t * ip0, * ip1;
101  udp_header_t * udp0, * udp1;
102  u64 * copy_src0, * copy_dst0;
103  u64 * copy_src1, * copy_dst1;
104  u32 * copy_src_last0, * copy_dst_last0;
105  u32 * copy_src_last1, * copy_dst_last1;
106  vxlan_gpe_tunnel_t * t0, * t1;
107  u16 new_l0, new_l1;
108  ip_csum_t sum0, sum1;
109 
110  /* Prefetch next iteration. */
111  {
112  vlib_buffer_t * p2, * p3;
113 
114  p2 = vlib_get_buffer (vm, from[2]);
115  p3 = vlib_get_buffer (vm, from[3]);
116 
117  vlib_prefetch_buffer_header (p2, LOAD);
118  vlib_prefetch_buffer_header (p3, LOAD);
119 
122  }
123 
124  bi0 = from[0];
125  bi1 = from[1];
126  to_next[0] = bi0;
127  to_next[1] = bi1;
128  from += 2;
129  to_next += 2;
130  n_left_to_next -= 2;
131  n_left_from -= 2;
132 
133  b0 = vlib_get_buffer (vm, bi0);
134  b1 = vlib_get_buffer (vm, bi1);
135 
136  /* 1-wide cache? */
137  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
138  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
140  (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
142  (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
143 
144  t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
145  t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
146 
147  ASSERT(vec_len(t0->rewrite) >= 24);
148  ASSERT(vec_len(t1->rewrite) >= 24);
149 
150  /* Apply the rewrite string. $$$$ vnet_rewrite? */
151  vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
152  vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
153 
154  ip0 = vlib_buffer_get_current(b0);
155  ip1 = vlib_buffer_get_current(b1);
156  /* Copy the fixed header */
157  copy_dst0 = (u64 *) ip0;
158  copy_src0 = (u64 *) t0->rewrite;
159  copy_dst1 = (u64 *) ip1;
160  copy_src1 = (u64 *) t1->rewrite;
161 
162  ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
163 
164  /* Copy first 36 octets 8-bytes at a time */
165 #define _(offs) copy_dst0[offs] = copy_src0[offs];
167 #undef _
168 #define _(offs) copy_dst1[offs] = copy_src1[offs];
170 #undef _
171 
172  /* Last 4 octets. Hopefully gcc will be our friend */
173  copy_dst_last0 = (u32 *)(&copy_dst0[7]);
174  copy_src_last0 = (u32 *)(&copy_src0[7]);
175  copy_dst_last1 = (u32 *)(&copy_dst1[7]);
176  copy_src_last1 = (u32 *)(&copy_src1[7]);
177 
178  copy_dst_last0[0] = copy_src_last0[0];
179  copy_dst_last1[0] = copy_src_last1[0];
180 
181  /* If there are TLVs to copy, do so */
182  if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64))
183  clib_memcpy (&copy_dst0[3], t0->rewrite + 64 ,
184  _vec_len (t0->rewrite)-64);
185 
186  if (PREDICT_FALSE (_vec_len(t1->rewrite) > 64))
187  clib_memcpy (&copy_dst0[3], t1->rewrite + 64 ,
188  _vec_len (t1->rewrite)-64);
189 
190  /* fix the <bleep>ing outer-IP checksum */
191  sum0 = ip0->checksum;
192  /* old_l0 always 0, see the rewrite setup */
193  new_l0 =
194  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
195 
196  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
197  length /* changed member */);
198  ip0->checksum = ip_csum_fold (sum0);
199  ip0->length = new_l0;
200 
201  sum1 = ip1->checksum;
202  /* old_l1 always 0, see the rewrite setup */
203  new_l1 =
204  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
205 
206  sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
207  length /* changed member */);
208  ip1->checksum = ip_csum_fold (sum1);
209  ip1->length = new_l1;
210 
211  /* Fix UDP length */
212  udp0 = (udp_header_t *)(ip0+1);
213  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
214  - sizeof (*ip0));
215  udp1 = (udp_header_t *)(ip1+1);
216  new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
217  - sizeof (*ip1));
218 
219  udp0->length = new_l0;
220  udp1->length = new_l1;
221 
222  /* Reset to look up tunnel partner in the configured FIB */
223  vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
224  vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
225  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
226  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
227  pkts_encapsulated += 2;
228 
229  len0 = vlib_buffer_length_in_chain(vm, b0);
230  len1 = vlib_buffer_length_in_chain(vm, b0);
231  stats_n_packets += 2;
232  stats_n_bytes += len0 + len1;
233 
234  /* Batch stats increment on the same vxlan tunnel so counter is not
235  incremented per packet. Note stats are still incremented for deleted
236  and admin-down tunnel where packets are dropped. It is not worthwhile
237  to check for this rare case and affect normal path performance. */
238  if (PREDICT_FALSE(
239  (sw_if_index0 != stats_sw_if_index)
240  || (sw_if_index1 != stats_sw_if_index))) {
241  stats_n_packets -= 2;
242  stats_n_bytes -= len0 + len1;
243  if (sw_if_index0 == sw_if_index1) {
244  if (stats_n_packets)
247  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
248  stats_sw_if_index = sw_if_index0;
249  stats_n_packets = 2;
250  stats_n_bytes = len0 + len1;
251  } else {
254  cpu_index, sw_if_index0, 1, len0);
257  cpu_index, sw_if_index1, 1, len1);
258  }
259  }
260 
262  {
264  vlib_add_trace (vm, node, b0, sizeof (*tr));
265  tr->tunnel_index = t0 - ngm->tunnels;
266  }
267 
269  {
271  vlib_add_trace (vm, node, b1, sizeof (*tr));
272  tr->tunnel_index = t1 - ngm->tunnels;
273  }
274 
275  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
276  to_next, n_left_to_next,
277  bi0, bi1, next0, next1);
278  }
279 
280  while (n_left_from > 0 && n_left_to_next > 0)
281  {
282  u32 bi0;
283  vlib_buffer_t * b0;
285  u32 sw_if_index0, len0;
286  vnet_hw_interface_t * hi0;
287  ip4_header_t * ip0;
288  udp_header_t * udp0;
289  u64 * copy_src0, * copy_dst0;
290  u32 * copy_src_last0, * copy_dst_last0;
291  vxlan_gpe_tunnel_t * t0;
292  u16 new_l0;
293  ip_csum_t sum0;
294 
295  bi0 = from[0];
296  to_next[0] = bi0;
297  from += 1;
298  to_next += 1;
299  n_left_from -= 1;
300  n_left_to_next -= 1;
301 
302  b0 = vlib_get_buffer (vm, bi0);
303 
304  /* 1-wide cache? */
305  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
307  (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
308 
309  t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
310 
311  ASSERT(vec_len(t0->rewrite) >= 24);
312 
313  /* Apply the rewrite string. $$$$ vnet_rewrite? */
314  vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
315 
316  ip0 = vlib_buffer_get_current(b0);
317  /* Copy the fixed header */
318  copy_dst0 = (u64 *) ip0;
319  copy_src0 = (u64 *) t0->rewrite;
320 
321  ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
322 
323  /* Copy first 36 octets 8-bytes at a time */
324 #define _(offs) copy_dst0[offs] = copy_src0[offs];
326 #undef _
327  /* Last 4 octets. Hopefully gcc will be our friend */
328  copy_dst_last0 = (u32 *)(&copy_dst0[7]);
329  copy_src_last0 = (u32 *)(&copy_src0[7]);
330 
331  copy_dst_last0[0] = copy_src_last0[0];
332 
333  /* If there are TLVs to copy, do so */
334  if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64))
335  clib_memcpy (&copy_dst0[3], t0->rewrite + 64 ,
336  _vec_len (t0->rewrite)-64);
337 
338  /* fix the <bleep>ing outer-IP checksum */
339  sum0 = ip0->checksum;
340  /* old_l0 always 0, see the rewrite setup */
341  new_l0 =
342  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
343 
344  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
345  length /* changed member */);
346  ip0->checksum = ip_csum_fold (sum0);
347  ip0->length = new_l0;
348 
349  /* Fix UDP length */
350  udp0 = (udp_header_t *)(ip0+1);
351  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
352  - sizeof (*ip0));
353 
354  udp0->length = new_l0;
355 
356  /* Reset to look up tunnel partner in the configured FIB */
357  vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
358  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
359  pkts_encapsulated ++;
360 
361  len0 = vlib_buffer_length_in_chain(vm, b0);
362  stats_n_packets += 1;
363  stats_n_bytes += len0;
364 
365  /* Batch stats increment on the same vxlan tunnel so counter is not
366  * incremented per packet. Note stats are still incremented for deleted
367  * and admin-down tunnel where packets are dropped. It is not worthwhile
368  * to check for this rare case and affect normal path performance. */
369  if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
370  {
371  stats_n_packets -= 1;
372  stats_n_bytes -= len0;
373  if (stats_n_packets)
376  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
377  stats_n_packets = 1;
378  stats_n_bytes = len0;
379  stats_sw_if_index = sw_if_index0;
380  }
382  {
384  vlib_add_trace (vm, node, b0, sizeof (*tr));
385  tr->tunnel_index = t0 - ngm->tunnels;
386  }
387  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
388  to_next, n_left_to_next,
389  bi0, next0);
390  }
391 
392  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
393  }
395  VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
396  pkts_encapsulated);
397  /* Increment any remaining batch stats */
398  if (stats_n_packets) {
401  stats_sw_if_index, stats_n_packets, stats_n_bytes);
402  node->runtime_data[0] = stats_sw_if_index;
403  }
404 
405  return from_frame->n_vectors;
406 }
407 
409  .function = vxlan_gpe_encap,
410  .name = "vxlan-gpe-encap",
411  .vector_size = sizeof (u32),
412  .format_trace = format_vxlan_gpe_encap_trace,
414 
416  .error_strings = vxlan_gpe_encap_error_strings,
417 
418  .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
419 
420  .next_nodes = {
421  [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
422  [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
423  },
424 };
425 
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
#define CLIB_UNUSED(x)
Definition: clib.h:79
#define foreach_vxlan_gpe_encap_error
Definition: encap.c:23
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
vnet_interface_main_t interface_main
Definition: vnet.h:62
uword ip_csum_t
Definition: ip_packet.h:86
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:184
vnet_main_t * vnet_main
Definition: vxlan_gpe.h:110
static uword vxlan_gpe_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:65
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:458
always_inline void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Definition: counter.h:210
u8 * format_vxlan_gpe_encap_trace(u8 *s, va_list *args)
Definition: encap.c:50
always_inline uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:112
unsigned long u64
Definition: types.h:89
vxlan_gpe_encap_next_t
Definition: encap.c:39
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
always_inline u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:138
#define pool_elt_at_index(p, i)
Definition: pool.h:346
vxlan_gpe_encap_error_t
Definition: encap.c:32
uword os_get_cpu_number(void)
Definition: unix-misc.c:206
vxlan_gpe_main_t vxlan_gpe_main
Definition: vxlan_gpe.c:17
#define PREDICT_FALSE(x)
Definition: clib.h:97
always_inline void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:970
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Definition: buffer_node.h:43
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
u16 n_vectors
Definition: node.h:307
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
#define clib_memcpy(a, b, c)
Definition: string.h:63
always_inline vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define ARRAY_LEN(x)
Definition: clib.h:59
u16 cached_next_index
Definition: node.h:422
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:300
u8 * format(u8 *s, char *fmt,...)
Definition: format.c:405
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:91
vxlan_gpe_tunnel_t * tunnels
Definition: vxlan_gpe.h:97
u64 uword
Definition: types.h:112
Definition: defs.h:46
unsigned short u16
Definition: types.h:57
i64 word
Definition: types.h:111
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:197
unsigned char u8
Definition: types.h:56
#define foreach_fixed_header_offset
Definition: encap.c:61
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:133
static char * vxlan_gpe_encap_error_strings[]
Definition: encap.c:26
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:162
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:140
u8 data[0]
Packet data.
Definition: buffer.h:150
vlib_node_registration_t vxlan_gpe_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan_gpe_encap_node)
Definition: encap.c:408
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:84
uword runtime_data[(128-1 *sizeof(vlib_node_function_t *)-1 *sizeof(vlib_error_t *)-11 *sizeof(u32)-5 *sizeof(u16))/sizeof(uword)]
Definition: node.h:432
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
Definition: defs.h:45