FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
esp_encrypt.c
Go to the documentation of this file.
1 /*
2  * esp_encrypt.c : IPSec ESP encrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/udp/udp.h>
22 
23 #include <vnet/crypto/crypto.h>
24 
25 #include <vnet/ipsec/ipsec.h>
26 #include <vnet/ipsec/esp.h>
27 
28 #define foreach_esp_encrypt_next \
29 _(DROP, "error-drop") \
30 _(IP4_LOOKUP, "ip4-lookup") \
31 _(IP6_LOOKUP, "ip6-lookup") \
32 _(INTERFACE_OUTPUT, "interface-output")
33 
34 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
35 typedef enum
36 {
38 #undef _
41 
42 #define foreach_esp_encrypt_error \
43  _(RX_PKTS, "ESP pkts received") \
44  _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
45  _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
46  _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
47  _(NO_TRAILER_SPACE, "no trailer space (packet dropped)")
48 
49 typedef enum
50 {
51 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
53 #undef _
56 
57 static char *esp_encrypt_error_strings[] = {
58 #define _(sym,string) string,
60 #undef _
61 };
62 
63 typedef struct
64 {
73 
74 /* packet trace format function */
75 static u8 *
76 format_esp_encrypt_trace (u8 * s, va_list * args)
77 {
78  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80  esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
81 
82  s =
83  format (s,
84  "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
85  t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
88  t->udp_encap ? " udp-encap-enabled" : "");
89  return s;
90 }
91 
92 /* pad packet in input buffer */
94 esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz,
95  u16 * next, vlib_node_runtime_t * node,
96  u16 buffer_data_size)
97 {
98  static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
99  0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
100  0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
101  };
102 
103  u16 min_length = b->current_length + sizeof (esp_footer_t);
104  u16 new_length = round_pow2 (min_length, block_size);
105  u8 pad_bytes = new_length - min_length;
107  new_length - sizeof (esp_footer_t));
108 
109  if (b->current_data + new_length + icv_sz > buffer_data_size)
110  {
111  b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
112  next[0] = ESP_ENCRYPT_NEXT_DROP;
113  return 0;
114  }
115 
116  if (pad_bytes)
117  {
118  ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
119  pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
120  clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
121  }
122 
123  f->pad_length = pad_bytes;
124  b->current_length = new_length + icv_sz;
125  return &f->next_header;
126 }
127 
129 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
130 {
131  ip_csum_t sum;
132  u16 old_len;
133 
134  len = clib_net_to_host_u16 (len);
135  old_len = ip4->length;
136 
137  if (is_transport)
138  {
139  u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
140 
141  sum = ip_csum_update (ip4->checksum, ip4->protocol,
142  prot, ip4_header_t, protocol);
143  ip4->protocol = prot;
144 
145  sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
146  }
147  else
148  sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
149 
150  ip4->length = len;
151  ip4->checksum = ip_csum_fold (sum);
152 }
153 
156 {
157  clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
158  udp->length = clib_net_to_host_u16 (len);
159 }
160 
163 {
164 #ifdef CLIB_HAVE_VEC128
165  static const u8x16 ext_hdr_types = {
166  IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
167  IP_PROTOCOL_IPV6_ROUTE,
168  IP_PROTOCOL_IPV6_FRAGMENTATION,
169  };
170 
171  return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
172 #else
173  return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
174  (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
175  (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
176 #endif
177 }
178 
181 {
182  /* this code assumes that HbH, route and frag headers will be before
183  others, if that is not the case, they will end up encrypted */
184 
185  u8 len = sizeof (ip6_header_t);
186  ip6_ext_header_t *p;
187 
188  /* if next packet doesn't have ext header */
189  if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
190  return len;
191 
192  p = (void *) (ip6 + 1);
193  len += ip6_ext_header_len (p);
194 
195  while (ext_hdr_is_pre_esp (p->next_hdr))
196  {
197  len += ip6_ext_header_len (p);
198  p = ip6_ext_next_header (p);
199  }
200 
201  return len;
202 }
203 
206  vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
207 {
208  u32 n_fail, n_ops = vec_len (ops);
209  vnet_crypto_op_t *op = ops;
210 
211  if (n_ops == 0)
212  return;
213 
214  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
215 
216  while (n_fail)
217  {
218  ASSERT (op - ops < n_ops);
219 
220  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
221  {
222  u32 bi = op->user_data;
223  b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
224  nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
225  n_fail--;
226  }
227  op++;
228  }
229 }
230 
231 typedef struct
232 {
233  u32 salt;
234  u64 iv;
235 } __clib_packed esp_gcm_nonce_t;
236 
237 STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
238 
241  vlib_frame_t * frame, int is_ip6, int is_tun)
242 {
243  ipsec_main_t *im = &ipsec_main;
245  u32 *from = vlib_frame_vector_args (frame);
246  u32 n_left = frame->n_vectors;
247  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
248  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
249  esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
250  u32 thread_index = vm->thread_index;
251  u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
252  u32 current_sa_index = ~0, current_sa_packets = 0;
253  u32 current_sa_bytes = 0, spi = 0;
254  u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
255  ipsec_sa_t *sa0 = 0;
256 
257  vlib_get_buffers (vm, from, b, n_left);
260 
261  while (n_left > 0)
262  {
263  u32 sa_index0;
264  dpo_id_t *dpo;
265  esp_header_t *esp;
266  u8 *payload, *next_hdr_ptr;
267  u16 payload_len;
268  u32 hdr_len;
269 
270  if (n_left > 2)
271  {
272  u8 *p;
273  vlib_prefetch_buffer_header (b[2], LOAD);
274  p = vlib_buffer_get_current (b[1]);
278  }
279 
280  if (is_tun)
281  {
282  /* we are on a ipsec tunnel's feature arc */
283  u32 next0;
284  sa_index0 = *(u32 *) vnet_feature_next_with_data (&next0, b[0],
285  sizeof
286  (sa_index0));
287  next[0] = next0;
288  }
289  else
290  sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
291 
292  if (sa_index0 != current_sa_index)
293  {
294  if (current_sa_packets)
296  current_sa_index,
297  current_sa_packets,
298  current_sa_bytes);
299  current_sa_packets = current_sa_bytes = 0;
300 
301  sa0 = pool_elt_at_index (im->sad, sa_index0);
302  current_sa_index = sa_index0;
303  spi = clib_net_to_host_u32 (sa0->spi);
304  block_sz = sa0->crypto_block_size;
305  icv_sz = sa0->integ_icv_size;
306  iv_sz = sa0->crypto_iv_size;
307  }
308 
309  if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
310  {
311  b[0]->error = node->errors[ESP_ENCRYPT_ERROR_CHAINED_BUFFER];
312  next[0] = ESP_ENCRYPT_NEXT_DROP;
313  goto trace;
314  }
315 
316  if (PREDICT_FALSE (esp_seq_advance (sa0)))
317  {
318  b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
319  next[0] = ESP_ENCRYPT_NEXT_DROP;
320  goto trace;
321  }
322 
323  /* space for IV */
324  hdr_len = iv_sz;
325 
326  if (ipsec_sa_is_set_IS_TUNNEL (sa0))
327  {
328  payload = vlib_buffer_get_current (b[0]);
329  next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
330  next, node,
331  buffer_data_size);
332  if (!next_hdr_ptr)
333  goto trace;
334  payload_len = b[0]->current_length;
335 
336  /* ESP header */
337  hdr_len += sizeof (*esp);
338  esp = (esp_header_t *) (payload - hdr_len);
339 
340  /* optional UDP header */
341  if (ipsec_sa_is_set_UDP_ENCAP (sa0))
342  {
343  hdr_len += sizeof (udp_header_t);
344  esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
345  payload_len + hdr_len);
346  }
347 
348  /* IP header */
349  if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
350  {
351  ip6_header_t *ip6;
352  u16 len = sizeof (ip6_header_t);
353  hdr_len += len;
354  ip6 = (ip6_header_t *) (payload - hdr_len);
355  clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
356  *next_hdr_ptr = (is_ip6 ?
357  IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
358  len = payload_len + hdr_len - len;
359  ip6->payload_length = clib_net_to_host_u16 (len);
360  }
361  else
362  {
363  ip4_header_t *ip4;
364  u16 len = sizeof (ip4_header_t);
365  hdr_len += len;
366  ip4 = (ip4_header_t *) (payload - hdr_len);
367  clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
368  *next_hdr_ptr = (is_ip6 ?
369  IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
370  len = payload_len + hdr_len;
371  esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
372  }
373 
374  dpo = &sa0->dpo;
375  if (!is_tun)
376  {
377  next[0] = dpo->dpoi_next_node;
378  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
379  }
380  }
381  else /* transport mode */
382  {
383  u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
384  udp_header_t *udp = 0;
385  u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
386 
387  ip_len = is_ip6 ?
388  esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr) :
389  ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
390 
391  vlib_buffer_advance (b[0], ip_len);
392  payload = vlib_buffer_get_current (b[0]);
393  next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
394  next, node,
395  buffer_data_size);
396  if (!next_hdr_ptr)
397  goto trace;
398  payload_len = b[0]->current_length;
399 
400  /* ESP header */
401  hdr_len += sizeof (*esp);
402  esp = (esp_header_t *) (payload - hdr_len);
403 
404  /* optional UDP header */
405  if (ipsec_sa_is_set_UDP_ENCAP (sa0))
406  {
407  hdr_len += sizeof (udp_header_t);
408  udp = (udp_header_t *) (payload - hdr_len);
409  }
410 
411  /* IP header */
412  hdr_len += ip_len;
413  ip_hdr = payload - hdr_len;
414 
415  /* L2 header */
416  if (!is_tun)
417  {
418  l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
419  hdr_len += l2_len;
420  l2_hdr = payload - hdr_len;
421 
422  /* copy l2 and ip header */
423  clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
424  }
425  else
426  l2_len = 0;
427 
428  clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
429 
430  if (is_ip6)
431  {
432  ip6_header_t *ip6 = (ip6_header_t *) (ip_hdr);
433  *next_hdr_ptr = ip6->protocol;
434  ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
435  ip6->payload_length =
436  clib_host_to_net_u16 (payload_len + hdr_len - l2_len -
437  ip_len);
438  }
439  else
440  {
441  u16 len;
442  ip4_header_t *ip4 = (ip4_header_t *) (ip_hdr);
443  *next_hdr_ptr = ip4->protocol;
444  len = payload_len + hdr_len - l2_len;
445  if (udp)
446  {
447  esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
448  esp_fill_udp_hdr (sa0, udp, len - ip_len);
449  }
450  else
451  esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
452  }
453 
454  if (!is_tun)
455  next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
456  }
457 
458  esp->spi = spi;
459  esp->seq = clib_net_to_host_u32 (sa0->seq);
460 
461  if (sa0->crypto_enc_op_id)
462  {
463  vnet_crypto_op_t *op;
466  op->src = op->dst = payload;
467  op->key_index = sa0->crypto_key_index;
468  op->len = payload_len - icv_sz;
469  op->user_data = b - bufs;
470 
471  if (ipsec_sa_is_set_IS_AEAD (sa0))
472  {
473  /*
474  * construct the AAD in a scratch space in front
475  * of the IP header.
476  */
477  op->aad = payload - hdr_len - sizeof (esp_aead_t);
478 
479  esp_aad_fill (op, esp, sa0);
480 
481  op->tag = payload + op->len;
482  op->tag_len = 16;
483 
484  u64 *iv = (u64 *) (payload - iv_sz);
485  nonce->salt = sa0->salt;
486  nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
487  op->iv = (u8 *) nonce;
488  nonce++;
489  }
490  else
491  {
492  op->iv = payload - iv_sz;
494  }
495  }
496 
497  if (sa0->integ_op_id)
498  {
499  vnet_crypto_op_t *op;
501  vnet_crypto_op_init (op, sa0->integ_op_id);
502  op->src = payload - iv_sz - sizeof (esp_header_t);
503  op->digest = payload + payload_len - icv_sz;
504  op->key_index = sa0->integ_key_index;
505  op->digest_len = icv_sz;
506  op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
507  op->user_data = b - bufs;
508  if (ipsec_sa_is_set_USE_ESN (sa0))
509  {
510  u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
511  clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
512  op->len += sizeof (seq_hi);
513  }
514  }
515 
516  vlib_buffer_advance (b[0], 0LL - hdr_len);
517 
518  current_sa_packets += 1;
519  current_sa_bytes += payload_len;
520 
521  trace:
522  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
523  {
524  esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
525  sizeof (*tr));
526  tr->sa_index = sa_index0;
527  tr->spi = sa0->spi;
528  tr->seq = sa0->seq;
529  tr->sa_seq_hi = sa0->seq_hi;
530  tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
531  tr->crypto_alg = sa0->crypto_alg;
532  tr->integ_alg = sa0->integ_alg;
533  }
534  /* next */
535  n_left -= 1;
536  next += 1;
537  b += 1;
538  }
539 
541  current_sa_index, current_sa_packets,
542  current_sa_bytes);
543  esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts);
544  esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
545 
547  ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
548 
549  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
550  return frame->n_vectors;
551 }
552 
554  vlib_node_runtime_t * node,
555  vlib_frame_t * from_frame)
556 {
557  return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0);
558 }
559 
560 /* *INDENT-OFF* */
562  .name = "esp4-encrypt",
563  .vector_size = sizeof (u32),
564  .format_trace = format_esp_encrypt_trace,
566 
568  .error_strings = esp_encrypt_error_strings,
569 
570  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
571  .next_nodes = {
572 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
574 #undef _
575  },
576 };
577 /* *INDENT-ON* */
578 
580  vlib_node_runtime_t * node,
581  vlib_frame_t * from_frame)
582 {
583  return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0);
584 }
585 
586 /* *INDENT-OFF* */
588  .name = "esp6-encrypt",
589  .vector_size = sizeof (u32),
590  .format_trace = format_esp_encrypt_trace,
592 
594  .error_strings = esp_encrypt_error_strings,
595 
596  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
597  .next_nodes = {
598 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
600 #undef _
601  },
602 };
603 /* *INDENT-ON* */
604 
606  vlib_node_runtime_t * node,
607  vlib_frame_t * from_frame)
608 {
609  return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1);
610 }
611 
612 /* *INDENT-OFF* */
614  .name = "esp4-encrypt-tun",
615  .vector_size = sizeof (u32),
616  .format_trace = format_esp_encrypt_trace,
618 
620  .error_strings = esp_encrypt_error_strings,
621 
622  .n_next_nodes = 1,
623  .next_nodes = {
624  [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop",
625  },
626 };
627 
628 VNET_FEATURE_INIT (esp4_encrypt_tun_feat_node, static) =
629 {
630  .arc_name = "ip4-output",
631  .node_name = "esp4-encrypt-tun",
632  .runs_before = VNET_FEATURES ("adj-midchain-tx"),
633 };
634 
635 VNET_FEATURE_INIT (esp6o4_encrypt_tun_feat_node, static) =
636 {
637  .arc_name = "ip6-output",
638  .node_name = "esp4-encrypt-tun",
639  .runs_before = VNET_FEATURES ("adj-midchain-tx"),
640 };
641 
642 VNET_FEATURE_INIT (esp4_ethernet_encrypt_tun_feat_node, static) =
643 {
644  .arc_name = "ethernet-output",
645  .node_name = "esp4-encrypt-tun",
646  .runs_before = VNET_FEATURES ("adj-midchain-tx", "adj-midchain-tx-no-count"),
647 };
648 /* *INDENT-ON* */
649 
651  vlib_node_runtime_t * node,
652  vlib_frame_t * from_frame)
653 {
654  return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1);
655 }
656 
657 /* *INDENT-OFF* */
659  .name = "esp6-encrypt-tun",
660  .vector_size = sizeof (u32),
661  .format_trace = format_esp_encrypt_trace,
663 
665  .error_strings = esp_encrypt_error_strings,
666 
667  .n_next_nodes = 1,
668  .next_nodes = {
669  [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop",
670  },
671 };
672 
673 VNET_FEATURE_INIT (esp6_encrypt_tun_feat_node, static) =
674 {
675  .arc_name = "ip6-output",
676  .node_name = "esp6-encrypt-tun",
677  .runs_before = VNET_FEATURES ("adj-midchain-tx"),
678 };
679 
680 VNET_FEATURE_INIT (esp4o6_encrypt_tun_feat_node, static) =
681 {
682  .arc_name = "ip4-output",
683  .node_name = "esp6-encrypt-tun",
684  .runs_before = VNET_FEATURES ("adj-midchain-tx"),
685 };
686 
687 /* *INDENT-ON* */
688 
689 typedef struct
690 {
693 
694 static u8 *
695 format_esp_no_crypto_trace (u8 * s, va_list * args)
696 {
697  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
698  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
699  esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
700 
701  s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
702 
703  return s;
704 }
705 
706 enum
707 {
710 };
711 
712 enum
713 {
715 };
716 
717 static char *esp_no_crypto_error_strings[] = {
718  "Outbound ESP packets received",
719 };
720 
723  vlib_frame_t * frame)
724 {
725  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
726  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
727  u32 *from = vlib_frame_vector_args (frame);
728  u32 n_left = frame->n_vectors;
729 
730  vlib_get_buffers (vm, from, b, n_left);
731 
732  while (n_left > 0)
733  {
734  u32 next0;
735  u32 sa_index0;
736 
737  /* packets are always going to be dropped, but get the sa_index */
738  sa_index0 = *(u32 *) vnet_feature_next_with_data (&next0, b[0],
739  sizeof (sa_index0));
740 
741  next[0] = ESP_NO_CRYPTO_NEXT_DROP;
742 
743  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
744  {
745  esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
746  sizeof (*tr));
747  tr->sa_index = sa_index0;
748  }
749 
750  n_left -= 1;
751  next += 1;
752  b += 1;
753  }
754 
757 
758  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
759 
760  return frame->n_vectors;
761 }
762 
764  vlib_node_runtime_t * node,
765  vlib_frame_t * from_frame)
766 {
767  return esp_no_crypto_inline (vm, node, from_frame);
768 }
769 
770 /* *INDENT-OFF* */
772 {
773  .name = "esp4-no-crypto",
774  .vector_size = sizeof (u32),
775  .format_trace = format_esp_no_crypto_trace,
777  .error_strings = esp_no_crypto_error_strings,
778  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
779  .next_nodes = {
780  [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
781  },
782 };
783 
784 VNET_FEATURE_INIT (esp4_no_crypto_tun_feat_node, static) =
785 {
786  .arc_name = "ip4-output",
787  .node_name = "esp4-no-crypto",
788  .runs_before = VNET_FEATURES ("adj-midchain-tx"),
789 };
790 
792  vlib_node_runtime_t * node,
793  vlib_frame_t * from_frame)
794 {
795  return esp_no_crypto_inline (vm, node, from_frame);
796 }
797 
798 /* *INDENT-OFF* */
800 {
801  .name = "esp6-no-crypto",
802  .vector_size = sizeof (u32),
803  .format_trace = format_esp_no_crypto_trace,
805  .error_strings = esp_no_crypto_error_strings,
806  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
807  .next_nodes = {
808  [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
809  },
810 };
811 
812 VNET_FEATURE_INIT (esp6_no_crypto_tun_feat_node, static) =
813 {
814  .arc_name = "ip6-output",
815  .node_name = "esp6-no-crypto",
816  .runs_before = VNET_FEATURES ("adj-midchain-tx"),
817 };
818 /* *INDENT-ON* */
819 
820 /*
821  * fd.io coding-style-patch-verification: ON
822  *
823  * Local Variables:
824  * eval: (c-set-style "gnu")
825  * End:
826  */
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
Definition: crypto.c:46
u8 * format_ipsec_integ_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:110
static_always_inline u8 esp_get_ip6_hdr_len(ip6_header_t *ip6)
Definition: esp_encrypt.c:180
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:889
u32 flags
Definition: vhost_user.h:141
#define clib_min(x, y)
Definition: clib.h:302
#define CLIB_UNUSED(x)
Definition: clib.h:83
ipsec_per_thread_data_t * ptd
Definition: ipsec.h:169
vnet_crypto_op_t * integ_ops
Definition: ipsec.h:89
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static char * esp_no_crypto_error_strings[]
Definition: esp_encrypt.c:717
static_always_inline void esp_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts)
Definition: esp_encrypt.c:205
ipsec_integ_alg_t
Definition: ipsec_sa.h:58
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:572
ipsec_integ_alg_t integ_alg
Definition: ipsec_sa.h:150
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
struct esp_aead_t_ esp_aead_t
AES GCM Additional Authentication data.
vnet_crypto_op_t * crypto_ops
Definition: ipsec.h:88
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
Definition: ip6_packet.h:552
static u8 * format_esp_encrypt_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:76
ipsec_integ_alg_t integ_alg
Definition: esp_encrypt.c:71
uword ip_csum_t
Definition: ip_packet.h:219
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
ipsec_crypto_alg_t crypto_alg
Definition: esp_encrypt.c:70
#define VLIB_NODE_FN(node)
Definition: node.h:202
static_always_inline u8 * esp_add_footer_and_icv(vlib_buffer_t *b, u8 block_size, u8 icv_sz, u16 *next, vlib_node_runtime_t *node, u16 buffer_data_size)
Definition: esp_encrypt.c:94
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
vnet_crypto_op_id_t integ_op_id
Definition: ipsec_sa.h:129
unsigned char u8
Definition: types.h:56
vlib_node_registration_t esp4_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_node)
Definition: esp_encrypt.c:613
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u32 seq_hi
Definition: ipsec_sa.h:119
static int esp_seq_advance(ipsec_sa_t *sa)
Definition: esp.h:84
vnet_crypto_key_index_t crypto_key_index
Definition: ipsec_sa.h:125
static uword esp_no_crypto_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: esp_encrypt.c:722
#define static_always_inline
Definition: clib.h:100
#define foreach_esp_encrypt_next
Definition: esp_encrypt.c:28
ipsec_main_t ipsec_main
Definition: ipsec.c:28
#define always_inline
Definition: clib.h:99
static_always_inline void esp_fill_udp_hdr(ipsec_sa_t *sa, udp_header_t *udp, u16 len)
Definition: esp_encrypt.c:155
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
Definition: crypto.h:221
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline u8 ext_hdr_is_pre_esp(u8 nexthdr)
Definition: esp_encrypt.c:162
u8 * format_ipsec_crypto_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:78
unsigned int u32
Definition: types.h:88
static const u8 pad_data[]
Definition: ipsec.h:175
#define VLIB_FRAME_SIZE
Definition: node.h:378
STATIC_ASSERT_SIZEOF(esp_gcm_nonce_t, 12)
static u8 * format_esp_no_crypto_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:695
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
esp_encrypt_error_t
Definition: esp_encrypt.c:49
vlib_node_registration_t esp6_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_node)
Definition: esp_encrypt.c:587
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:170
static_always_inline void * vnet_feature_next_with_data(u32 *next0, vlib_buffer_t *b0, u32 n_data_bytes)
Definition: feature.h:289
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
vlib_node_registration_t esp6_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_node)
Definition: esp_encrypt.c:658
static u8 iv[]
Definition: aes_cbc.c:24
static void esp_aad_fill(vnet_crypto_op_t *op, const esp_header_t *esp, const ipsec_sa_t *sa)
Definition: esp.h:138
uword user_data
Definition: crypto.h:143
vl_api_ip_proto_t protocol
Definition: punt.api:39
u32 salt
Definition: ipsec_sa.h:163
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
vlib_node_registration_t esp4_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_no_crypto_tun_node)
Definition: esp_encrypt.c:771
#define PREDICT_FALSE(x)
Definition: clib.h:112
u32 node_index
Node index.
Definition: node.h:496
static char * esp_encrypt_error_strings[]
Definition: esp_encrypt.c:57
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
u8 len
Definition: ip_types.api:90
#define ip6_ext_header_len(p)
Definition: ip6_packet.h:548
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:397
#define VNET_CRYPTO_OP_FLAG_INIT_IV
Definition: crypto.h:131
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:323
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
vlib_node_registration_t esp6_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_no_crypto_tun_node)
Definition: esp_encrypt.c:799
vlib_node_registration_t esp4_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_node)
Definition: esp_encrypt.c:561
udp_header_t udp_hdr
Definition: ipsec_sa.h:139
#define ARRAY_LEN(x)
Definition: clib.h:63
esp_encrypt_next_t
Definition: esp_encrypt.c:35
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:248
#define ESP_MAX_BLOCK_SIZE
Definition: esp.h:76
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
Definition: ipsec_sa.c:27
#define ASSERT(truth)
u32 spi
Definition: ipsec.api:274
ip6_header_t ip6_hdr
Definition: ipsec_sa.h:137
ipsec_sa_t * sad
Definition: ipsec.h:97
static_always_inline void esp_update_ip4_hdr(ip4_header_t *ip4, u16 len, int is_transport, int is_udp)
Definition: esp_encrypt.c:129
u32 seq
Definition: esp.h:29
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
u32 spi
Definition: esp.h:26
vnet_crypto_key_index_t integ_key_index
Definition: ipsec_sa.h:126
#define VNET_FEATURES(...)
Definition: feature.h:442
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static_always_inline void clib_memcpy_le64(u8 *dst, u8 *src, u8 len)
Definition: string.h:283
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:374
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:186
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
ipsec_crypto_alg_t
Definition: ipsec_sa.h:36
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:269
static uword esp_encrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip6, int is_tun)
Definition: esp_encrypt.c:240
u64 gcm_iv_counter
Definition: ipsec_sa.h:164
vnet_crypto_op_id_t crypto_enc_op_id
Definition: ipsec_sa.h:127
vnet_crypto_op_status_t status
Definition: crypto.h:129
#define vnet_buffer(b)
Definition: buffer.h:365
dpo_id_t dpo
Definition: ipsec_sa.h:123
ipsec_crypto_alg_t crypto_alg
Definition: ipsec_sa.h:146
ip4_header_t ip4_hdr
Definition: ipsec_sa.h:136
u8 crypto_block_size
Definition: ipsec_sa.h:115
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:182
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:235
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
VNET_FEATURE_INIT(esp4_encrypt_tun_feat_node, static)
u8 crypto_iv_size
Definition: ipsec_sa.h:114
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 salt
Definition: ipsec.api:289
#define foreach_esp_encrypt_error
Definition: esp_encrypt.c:42
u8 integ_icv_size
Definition: ipsec_sa.h:116
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:275
static_always_inline void clib_memcpy_le32(u8 *dst, u8 *src, u8 len)
Definition: string.h:289