FD.io VPP  v21.01.1
Vector Packet Processing
esp_encrypt.c
Go to the documentation of this file.
1 /*
2  * esp_encrypt.c : IPSec ESP encrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 
22 #include <vnet/crypto/crypto.h>
23 
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/ipsec_tun.h>
26 #include <vnet/ipsec/esp.h>
27 #include <vnet/tunnel/tunnel_dp.h>
28 
29 #define foreach_esp_encrypt_next \
30 _(DROP4, "ip4-drop") \
31 _(DROP6, "ip6-drop") \
32 _(HANDOFF4, "handoff4") \
33 _(HANDOFF6, "handoff6") \
34 _(INTERFACE_OUTPUT, "interface-output")
35 
36 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
37 typedef enum
38 {
40 #undef _
43 
44 #define foreach_esp_encrypt_error \
45  _(RX_PKTS, "ESP pkts received") \
46  _(POST_RX_PKTS, "ESP-post pkts received") \
47  _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
48  _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
49  _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
50  _(NO_BUFFERS, "no buffers (packet dropped)") \
51 
52 typedef enum
53 {
54 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
56 #undef _
59 
60 static char *esp_encrypt_error_strings[] = {
61 #define _(sym,string) string,
63 #undef _
64 };
65 
66 typedef struct
67 {
73  ipsec_crypto_alg_t crypto_alg;
74  ipsec_integ_alg_t integ_alg;
76 
77 typedef struct
78 {
81 
82 /* packet trace format function */
83 static u8 *
84 format_esp_encrypt_trace (u8 * s, va_list * args)
85 {
86  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
87  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
88  esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
89 
90  s =
91  format (s,
92  "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
93  t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
96  t->udp_encap ? " udp-encap-enabled" : "");
97  return s;
98 }
99 
100 static u8 *
101 format_esp_post_encrypt_trace (u8 * s, va_list * args)
102 {
103  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
104  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
105  esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
106 
107  s = format (s, "esp-post: next node index %u", t->next_index);
108  return s;
109 }
110 
111 /* pad packet in input buffer */
114  u8 esp_align, u8 icv_sz,
115  u16 * next, vlib_node_runtime_t * node,
116  u16 buffer_data_size, uword total_len)
117 {
118  static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
119  0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
120  0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
121  };
122 
123  u16 min_length = total_len + sizeof (esp_footer_t);
124  u16 new_length = round_pow2 (min_length, esp_align);
125  u8 pad_bytes = new_length - min_length;
127  last[0]->current_length + pad_bytes);
128  u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
129 
130  if (last[0]->current_length + tail_sz > buffer_data_size)
131  {
132  u32 tmp_bi = 0;
133  if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
134  return 0;
135 
136  vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
137  last[0]->next_buffer = tmp_bi;
138  last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
139  f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
140  tmp->current_length += tail_sz;
141  last[0] = tmp;
142  }
143  else
144  last[0]->current_length += tail_sz;
145 
146  f->pad_length = pad_bytes;
147  if (pad_bytes)
148  {
149  ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
150  pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
151  clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
152  }
153 
154  return &f->next_header;
155 }
156 
158 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
159 {
160  ip_csum_t sum;
161  u16 old_len;
162 
163  len = clib_net_to_host_u16 (len);
164  old_len = ip4->length;
165 
166  if (is_transport)
167  {
168  u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
169 
170  sum = ip_csum_update (ip4->checksum, ip4->protocol,
171  prot, ip4_header_t, protocol);
172  ip4->protocol = prot;
173 
174  sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
175  }
176  else
177  sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
178 
179  ip4->length = len;
180  ip4->checksum = ip_csum_fold (sum);
181 }
182 
185 {
186  clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
187  udp->length = clib_net_to_host_u16 (len);
188 }
189 
192 {
193 #ifdef CLIB_HAVE_VEC128
194  static const u8x16 ext_hdr_types = {
195  IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
196  IP_PROTOCOL_IPV6_ROUTE,
197  IP_PROTOCOL_IPV6_FRAGMENTATION,
198  };
199 
200  return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
201 #else
202  return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
203  (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
204  (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
205 #endif
206 }
207 
209 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
210 {
211  /* this code assumes that HbH, route and frag headers will be before
212  others, if that is not the case, they will end up encrypted */
213  u8 len = sizeof (ip6_header_t);
214  ip6_ext_header_t *p;
215 
216  /* if next packet doesn't have ext header */
217  if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
218  {
219  *ext_hdr = NULL;
220  return len;
221  }
222 
223  p = (void *) (ip6 + 1);
224  len += ip6_ext_header_len (p);
225 
226  while (ext_hdr_is_pre_esp (p->next_hdr))
227  {
228  len += ip6_ext_header_len (p);
229  p = ip6_ext_next_header (p);
230  }
231 
232  *ext_hdr = p;
233  return len;
234 }
235 
238  vnet_crypto_op_t * ops, vlib_buffer_t * b[],
239  u16 * nexts, vnet_crypto_op_chunk_t * chunks,
240  u16 drop_next)
241 {
242  u32 n_fail, n_ops = vec_len (ops);
243  vnet_crypto_op_t *op = ops;
244 
245  if (n_ops == 0)
246  return;
247 
248  n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
249 
250  while (n_fail)
251  {
252  ASSERT (op - ops < n_ops);
253 
254  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
255  {
256  u32 bi = op->user_data;
257  b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
258  nexts[bi] = drop_next;
259  n_fail--;
260  }
261  op++;
262  }
263 }
264 
267  vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
268  u16 drop_next)
269 {
270  u32 n_fail, n_ops = vec_len (ops);
271  vnet_crypto_op_t *op = ops;
272 
273  if (n_ops == 0)
274  return;
275 
276  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
277 
278  while (n_fail)
279  {
280  ASSERT (op - ops < n_ops);
281 
282  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
283  {
284  u32 bi = op->user_data;
285  b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
286  nexts[bi] = drop_next;
287  n_fail--;
288  }
289  op++;
290  }
291 }
292 
293 typedef struct
294 {
295  u32 salt;
296  u64 iv;
297 } __clib_packed esp_gcm_nonce_t;
298 
299 STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
300 
303  ipsec_sa_t * sa0, vlib_buffer_t * b,
304  vlib_buffer_t * lb, u8 icv_sz, u8 * start,
305  u32 start_len, u16 * n_ch)
306 {
308  vlib_buffer_t *cb = b;
309  u32 n_chunks = 1;
310  u32 total_len;
311  vec_add2 (ptd->chunks, ch, 1);
312  total_len = ch->len = start_len;
313  ch->src = ch->dst = start;
314  cb = vlib_get_buffer (vm, cb->next_buffer);
315 
316  while (1)
317  {
318  vec_add2 (ptd->chunks, ch, 1);
319  n_chunks += 1;
320  if (lb == cb)
321  total_len += ch->len = cb->current_length - icv_sz;
322  else
323  total_len += ch->len = cb->current_length;
324  ch->src = ch->dst = vlib_buffer_get_current (cb);
325 
326  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
327  break;
328 
329  cb = vlib_get_buffer (vm, cb->next_buffer);
330  }
331 
332  if (n_ch)
333  *n_ch = n_chunks;
334 
335  return total_len;
336 }
337 
340  ipsec_sa_t * sa0, vlib_buffer_t * b,
341  vlib_buffer_t * lb, u8 icv_sz, u8 * start,
342  u32 start_len, u8 * digest, u16 * n_ch)
343 {
345  vlib_buffer_t *cb = b;
346  u32 n_chunks = 1;
347  u32 total_len;
348  vec_add2 (ptd->chunks, ch, 1);
349  total_len = ch->len = start_len;
350  ch->src = start;
351  cb = vlib_get_buffer (vm, cb->next_buffer);
352 
353  while (1)
354  {
355  vec_add2 (ptd->chunks, ch, 1);
356  n_chunks += 1;
357  if (lb == cb)
358  {
359  total_len += ch->len = cb->current_length - icv_sz;
360  if (ipsec_sa_is_set_USE_ESN (sa0))
361  {
362  u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
363  clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
364  ch->len += sizeof (seq_hi);
365  total_len += sizeof (seq_hi);
366  }
367  }
368  else
369  total_len += ch->len = cb->current_length;
370  ch->src = vlib_buffer_get_current (cb);
371 
372  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
373  break;
374 
375  cb = vlib_get_buffer (vm, cb->next_buffer);
376  }
377 
378  if (n_ch)
379  *n_ch = n_chunks;
380 
381  return total_len;
382 }
383 
384 always_inline void
386  vnet_crypto_op_t ** crypto_ops,
387  vnet_crypto_op_t ** integ_ops, ipsec_sa_t * sa0,
388  u8 * payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
389  vlib_buffer_t ** bufs, vlib_buffer_t ** b,
390  vlib_buffer_t * lb, u32 hdr_len, esp_header_t * esp,
391  esp_gcm_nonce_t * nonce)
392 {
393  if (sa0->crypto_enc_op_id)
394  {
395  vnet_crypto_op_t *op;
396  vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
398 
399  op->src = op->dst = payload;
400  op->key_index = sa0->crypto_key_index;
401  op->len = payload_len - icv_sz;
402  op->user_data = b - bufs;
403 
404  if (ipsec_sa_is_set_IS_AEAD (sa0))
405  {
406  /*
407  * construct the AAD in a scratch space in front
408  * of the IP header.
409  */
410  op->aad = payload - hdr_len - sizeof (esp_aead_t);
411  op->aad_len = esp_aad_fill (op->aad, esp, sa0);
412 
413  op->tag = payload + op->len;
414  op->tag_len = 16;
415 
416  u64 *iv = (u64 *) (payload - iv_sz);
417  nonce->salt = sa0->salt;
418  nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
419  op->iv = (u8 *) nonce;
420  }
421  else
422  {
423  op->iv = payload - iv_sz;
425  }
426 
427  if (lb != b[0])
428  {
429  /* is chained */
431  op->chunk_index = vec_len (ptd->chunks);
432  op->tag = vlib_buffer_get_tail (lb) - icv_sz;
433  esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
434  payload_len, &op->n_chunks);
435  }
436  }
437 
438  if (sa0->integ_op_id)
439  {
440  vnet_crypto_op_t *op;
441  vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
442  vnet_crypto_op_init (op, sa0->integ_op_id);
443  op->src = payload - iv_sz - sizeof (esp_header_t);
444  op->digest = payload + payload_len - icv_sz;
445  op->key_index = sa0->integ_key_index;
446  op->digest_len = icv_sz;
447  op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
448  op->user_data = b - bufs;
449 
450  if (lb != b[0])
451  {
452  /* is chained */
454  op->chunk_index = vec_len (ptd->chunks);
455  op->digest = vlib_buffer_get_tail (lb) - icv_sz;
456 
457  esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
458  payload - iv_sz - sizeof (esp_header_t),
459  payload_len + iv_sz +
460  sizeof (esp_header_t), op->digest,
461  &op->n_chunks);
462  }
463  else if (ipsec_sa_is_set_USE_ESN (sa0))
464  {
465  u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
466  clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
467  op->len += sizeof (seq_hi);
468  }
469  }
470 }
471 
474  vnet_crypto_async_frame_t ** async_frame,
475  ipsec_sa_t * sa, vlib_buffer_t * b,
476  esp_header_t * esp, u8 * payload, u32 payload_len,
477  u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
478  u16 async_next, vlib_buffer_t * lb)
479 {
480  esp_post_data_t *post = esp_post_data (b);
481  u8 *tag, *iv, *aad = 0;
482  u8 flag = 0;
483  u32 key_index;
484  i16 crypto_start_offset, integ_start_offset = 0;
485  u16 crypto_total_len, integ_total_len;
486 
487  post->next_index = next;
488 
489  /* crypto */
490  crypto_start_offset = payload - b->data;
491  crypto_total_len = integ_total_len = payload_len - icv_sz;
492  tag = payload + crypto_total_len;
493 
494  /* aead */
495  if (ipsec_sa_is_set_IS_AEAD (sa))
496  {
497  esp_gcm_nonce_t *nonce;
498  u64 *pkt_iv = (u64 *) (payload - iv_sz);
499 
500  aad = payload - hdr_len - sizeof (esp_aead_t);
501  esp_aad_fill (aad, esp, sa);
502  nonce = (esp_gcm_nonce_t *) (aad - sizeof (*nonce));
503  nonce->salt = sa->salt;
504  nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->gcm_iv_counter++);
505  iv = (u8 *) nonce;
506  key_index = sa->crypto_key_index;
507 
508  if (lb != b)
509  {
510  /* chain */
512  tag = vlib_buffer_get_tail (lb) - icv_sz;
513  crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
514  icv_sz, payload,
515  payload_len, 0);
516  }
517  goto out;
518  }
519 
520  /* cipher then hash */
521  iv = payload - iv_sz;
522  integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
523  integ_total_len += iv_sz + sizeof (esp_header_t);
525  key_index = sa->linked_key_index;
526 
527  if (b != lb)
528  {
530  crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
531  icv_sz, payload,
532  payload_len, 0);
533  tag = vlib_buffer_get_tail (lb) - icv_sz;
534  integ_total_len = esp_encrypt_chain_integ (vm, ptd, sa, b, lb, icv_sz,
535  payload - iv_sz -
536  sizeof (esp_header_t),
537  payload_len + iv_sz +
538  sizeof (esp_header_t),
539  tag, 0);
540  }
541  else if (ipsec_sa_is_set_USE_ESN (sa) && !ipsec_sa_is_set_IS_AEAD (sa))
542  {
543  u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
544  clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
545  integ_total_len += sizeof (seq_hi);
546  }
547 
548 out:
549  return vnet_crypto_async_add_to_frame (vm, async_frame, key_index,
550  crypto_total_len,
551  integ_total_len - crypto_total_len,
552  crypto_start_offset,
553  integ_start_offset, bi, async_next,
554  iv, tag, aad, flag);
555 }
556 
559  vlib_frame_t * frame, int is_ip6, int is_tun,
560  u16 async_next)
561 {
562  ipsec_main_t *im = &ipsec_main;
564  u32 *from = vlib_frame_vector_args (frame);
565  u32 n_left = frame->n_vectors;
566  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
567  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
568  esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
569  u32 thread_index = vm->thread_index;
570  u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
571  u32 current_sa_index = ~0, current_sa_packets = 0;
572  u32 current_sa_bytes = 0, spi = 0;
573  u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
574  ipsec_sa_t *sa0 = 0;
575  vlib_buffer_t *lb;
576  vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
577  vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
578  vnet_crypto_async_frame_t *async_frame = 0;
579  int is_async = im->async_mode;
580  vnet_crypto_async_op_id_t last_async_op = ~0;
581  u16 drop_next = (is_ip6 ? ESP_ENCRYPT_NEXT_DROP6 : ESP_ENCRYPT_NEXT_DROP4);
582  u16 n_async_drop = 0;
583 
584  vlib_get_buffers (vm, from, b, n_left);
585  if (!is_async)
586  {
591  }
592  vec_reset_length (ptd->chunks);
593 
594  while (n_left > 0)
595  {
596  u32 sa_index0;
597  dpo_id_t *dpo;
598  esp_header_t *esp;
599  u8 *payload, *next_hdr_ptr;
600  u16 payload_len, payload_len_total, n_bufs;
601  u32 hdr_len;
602 
603  if (n_left > 2)
604  {
605  u8 *p;
606  vlib_prefetch_buffer_header (b[2], LOAD);
607  p = vlib_buffer_get_current (b[1]);
611  /* speculate that the trailer goes in the first buffer */
613  CLIB_CACHE_LINE_BYTES, LOAD);
614  }
615 
616  if (is_tun)
617  {
618  /* we are on a ipsec tunnel's feature arc */
619  vnet_buffer (b[0])->ipsec.sad_index =
620  sa_index0 = ipsec_tun_protect_get_sa_out
621  (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
622  }
623  else
624  sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
625 
626  if (sa_index0 != current_sa_index)
627  {
628  if (current_sa_packets)
630  current_sa_index,
631  current_sa_packets,
632  current_sa_bytes);
633  current_sa_packets = current_sa_bytes = 0;
634 
635  sa0 = pool_elt_at_index (im->sad, sa_index0);
636 
637  /* fetch the second cacheline ASAP */
638  CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
639 
640  current_sa_index = sa_index0;
641  spi = clib_net_to_host_u32 (sa0->spi);
642  esp_align = sa0->esp_block_align;
643  icv_sz = sa0->integ_icv_size;
644  iv_sz = sa0->crypto_iv_size;
645 
646  /* submit frame when op_id is different then the old one */
647  if (is_async && sa0->crypto_async_enc_op_id != last_async_op)
648  {
649  if (async_frame && async_frame->n_elts)
650  {
651  if (vnet_crypto_async_submit_open_frame (vm, async_frame))
652  esp_async_recycle_failed_submit (async_frame, b, from,
653  nexts, &n_async_drop,
654  drop_next,
655  ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
656  }
657  async_frame =
659  last_async_op = sa0->crypto_async_enc_op_id;
660  }
661  }
662 
663  if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
664  {
665  /* this is the first packet to use this SA, claim the SA
666  * for this thread. this could happen simultaneously on
667  * another thread */
669  ipsec_sa_assign_thread (thread_index));
670  }
671 
672  if (PREDICT_FALSE (thread_index != sa0->encrypt_thread_index))
673  {
674  esp_set_next_index (is_async, from, nexts, from[b - bufs],
675  &n_async_drop,
676  (is_ip6 ? ESP_ENCRYPT_NEXT_HANDOFF6 :
677  ESP_ENCRYPT_NEXT_HANDOFF4), next);
678  goto trace;
679  }
680 
681  lb = b[0];
682  n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
683  if (n_bufs == 0)
684  {
685  b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
686  esp_set_next_index (is_async, from, nexts, from[b - bufs],
687  &n_async_drop, drop_next, next);
688  goto trace;
689  }
690 
691  if (n_bufs > 1)
692  {
693  /* find last buffer in the chain */
694  while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
695  lb = vlib_get_buffer (vm, lb->next_buffer);
696  }
697 
698  if (PREDICT_FALSE (esp_seq_advance (sa0)))
699  {
700  b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
701  esp_set_next_index (is_async, from, nexts, from[b - bufs],
702  &n_async_drop, drop_next, next);
703  goto trace;
704  }
705 
706  /* space for IV */
707  hdr_len = iv_sz;
708 
709  if (ipsec_sa_is_set_IS_TUNNEL (sa0))
710  {
711  payload = vlib_buffer_get_current (b[0]);
712  next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
713  next, node,
714  buffer_data_size,
716  (vm, b[0]));
717  if (!next_hdr_ptr)
718  {
719  b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
720  esp_set_next_index (is_async, from, nexts, from[b - bufs],
721  &n_async_drop, drop_next, next);
722  goto trace;
723  }
724  b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
725  payload_len = b[0]->current_length;
726  payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
727 
728  /* ESP header */
729  hdr_len += sizeof (*esp);
730  esp = (esp_header_t *) (payload - hdr_len);
731 
732  /* optional UDP header */
733  if (ipsec_sa_is_set_UDP_ENCAP (sa0))
734  {
735  hdr_len += sizeof (udp_header_t);
736  esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
737  payload_len_total + hdr_len);
738  }
739 
740  /* IP header */
741  if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
742  {
743  ip6_header_t *ip6;
744  u16 len = sizeof (ip6_header_t);
745  hdr_len += len;
746  ip6 = (ip6_header_t *) (payload - hdr_len);
747  clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
748 
749  if (is_ip6)
750  {
751  *next_hdr_ptr = IP_PROTOCOL_IPV6;
753  (const ip6_header_t *) payload,
754  ip6);
755  }
756  else
757  {
758  *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
760  (const ip4_header_t *) payload,
761  ip6);
762  }
763  len = payload_len_total + hdr_len - len;
764  ip6->payload_length = clib_net_to_host_u16 (len);
765  }
766  else
767  {
768  ip4_header_t *ip4;
769  u16 len = sizeof (ip4_header_t);
770  hdr_len += len;
771  ip4 = (ip4_header_t *) (payload - hdr_len);
772  clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
773 
774  if (is_ip6)
775  {
776  *next_hdr_ptr = IP_PROTOCOL_IPV6;
778  (const ip6_header_t *)
779  payload, ip4);
780  }
781  else
782  {
783  *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
785  (const ip4_header_t *)
786  payload, ip4);
787  }
788  len = payload_len_total + hdr_len;
789  esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
790  }
791 
792  dpo = &sa0->dpo;
793  if (!is_tun)
794  {
795  next[0] = dpo->dpoi_next_node;
796  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
797  }
798  else
799  next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
800  }
801  else /* transport mode */
802  {
803  u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
804  ip6_ext_header_t *ext_hdr;
805  udp_header_t *udp = 0;
806  u16 udp_len = 0;
807  u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
808 
809  ip_len = is_ip6 ?
810  esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
811  ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
812 
813  vlib_buffer_advance (b[0], ip_len);
814  payload = vlib_buffer_get_current (b[0]);
815  next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
816  next, node,
817  buffer_data_size,
819  (vm, b[0]));
820  if (!next_hdr_ptr)
821  {
822  esp_set_next_index (is_async, from, nexts, from[b - bufs],
823  &n_async_drop, drop_next, next);
824  goto trace;
825  }
826 
827  b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
828  payload_len = b[0]->current_length;
829  payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
830 
831  /* ESP header */
832  hdr_len += sizeof (*esp);
833  esp = (esp_header_t *) (payload - hdr_len);
834 
835  /* optional UDP header */
836  if (ipsec_sa_is_set_UDP_ENCAP (sa0))
837  {
838  hdr_len += sizeof (udp_header_t);
839  udp = (udp_header_t *) (payload - hdr_len);
840  }
841 
842  /* IP header */
843  hdr_len += ip_len;
844  ip_hdr = payload - hdr_len;
845 
846  /* L2 header */
847  if (!is_tun)
848  {
849  l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
850  hdr_len += l2_len;
851  l2_hdr = payload - hdr_len;
852 
853  /* copy l2 and ip header */
854  clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
855  }
856  else
857  l2_len = 0;
858 
859  if (is_ip6)
860  {
861  ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
862  if (PREDICT_TRUE (NULL == ext_hdr))
863  {
864  *next_hdr_ptr = ip6->protocol;
865  ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
866  }
867  else
868  {
869  *next_hdr_ptr = ext_hdr->next_hdr;
870  ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
871  }
872  ip6->payload_length =
873  clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
874  sizeof (ip6_header_t));
875  }
876  else
877  {
878  u16 len;
879  ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
880  *next_hdr_ptr = ip4->protocol;
881  len = payload_len_total + hdr_len - l2_len;
882  if (udp)
883  {
884  esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
885  udp_len = len - ip_len;
886  }
887  else
888  esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
889  }
890 
891  clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
892 
893  if (udp)
894  {
895  esp_fill_udp_hdr (sa0, udp, udp_len);
896  }
897 
898  next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
899  }
900 
901  if (lb != b[0])
902  {
903  crypto_ops = &ptd->chained_crypto_ops;
904  integ_ops = &ptd->chained_integ_ops;
905  }
906  else
907  {
908  crypto_ops = &ptd->crypto_ops;
909  integ_ops = &ptd->integ_ops;
910  }
911 
912  esp->spi = spi;
913  esp->seq = clib_net_to_host_u32 (sa0->seq);
914 
915  if (is_async)
916  {
917  if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
918  {
919  esp_set_next_index (is_async, from, nexts, from[b - bufs],
920  &n_async_drop, drop_next, next);
921  goto trace;
922  }
923 
924  if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
925  payload, payload_len, iv_sz,
926  icv_sz, from[b - bufs], next[0],
927  hdr_len, async_next, lb))
928  {
929  /* The fail only caused by submission, free the whole frame. */
930  if (async_frame->n_elts)
931  esp_async_recycle_failed_submit (async_frame, b, from, nexts,
932  &n_async_drop, drop_next,
933  ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
934  b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
935  esp_set_next_index (1, from, nexts, from[b - bufs],
936  &n_async_drop, drop_next, next);
937  goto trace;
938  }
939  }
940  else
941  {
942  esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
943  payload_len, iv_sz, icv_sz, bufs, b, lb,
944  hdr_len, esp, nonce++);
945  }
946 
947  vlib_buffer_advance (b[0], 0LL - hdr_len);
948 
949  current_sa_packets += 1;
950  current_sa_bytes += payload_len_total;
951 
952  trace:
953  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
954  {
955  esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
956  sizeof (*tr));
957  tr->sa_index = sa_index0;
958  tr->spi = sa0->spi;
959  tr->seq = sa0->seq;
960  tr->sa_seq_hi = sa0->seq_hi;
961  tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
962  tr->crypto_alg = sa0->crypto_alg;
963  tr->integ_alg = sa0->integ_alg;
964  }
965  /* next */
966  n_left -= 1;
967  next += 1;
968  b += 1;
969  }
970 
972  current_sa_index, current_sa_packets,
973  current_sa_bytes);
974  if (!is_async)
975  {
976  esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
977  esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
978  ptd->chunks, drop_next);
979 
980  esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
981  esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
982  ptd->chunks, drop_next);
983  }
984  else
985  {
986  if (async_frame && async_frame->n_elts)
987  {
988  if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
989  esp_async_recycle_failed_submit (async_frame, b, from, nexts,
990  &n_async_drop, drop_next,
991  ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
992  }
994  ESP_ENCRYPT_ERROR_RX_PKTS,
995  frame->n_vectors);
996  if (n_async_drop)
997  vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
998 
999  return frame->n_vectors;
1000  }
1001 
1003  ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
1004 
1005  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1006  return frame->n_vectors;
1007 }
1008 
1011  vlib_frame_t * frame)
1012 {
1013  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1014  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1015  u32 *from = vlib_frame_vector_args (frame);
1016  u32 n_left = frame->n_vectors;
1017 
1018  vlib_get_buffers (vm, from, b, n_left);
1019 
1020  if (n_left >= 4)
1021  {
1022  vlib_prefetch_buffer_header (b[0], LOAD);
1023  vlib_prefetch_buffer_header (b[1], LOAD);
1024  vlib_prefetch_buffer_header (b[2], LOAD);
1025  vlib_prefetch_buffer_header (b[3], LOAD);
1026  }
1027 
1028  while (n_left > 8)
1029  {
1030  vlib_prefetch_buffer_header (b[4], LOAD);
1031  vlib_prefetch_buffer_header (b[5], LOAD);
1032  vlib_prefetch_buffer_header (b[6], LOAD);
1033  vlib_prefetch_buffer_header (b[7], LOAD);
1034 
1035  next[0] = (esp_post_data (b[0]))->next_index;
1036  next[1] = (esp_post_data (b[1]))->next_index;
1037  next[2] = (esp_post_data (b[2]))->next_index;
1038  next[3] = (esp_post_data (b[3]))->next_index;
1039 
1041  {
1042  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1043  {
1044  esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1045  sizeof (*tr));
1046  tr->next_index = next[0];
1047  }
1048  if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1049  {
1050  esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1051  sizeof (*tr));
1052  tr->next_index = next[1];
1053  }
1054  if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1055  {
1056  esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1057  sizeof (*tr));
1058  tr->next_index = next[2];
1059  }
1060  if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1061  {
1062  esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1063  sizeof (*tr));
1064  tr->next_index = next[3];
1065  }
1066  }
1067 
1068  b += 4;
1069  next += 4;
1070  n_left -= 4;
1071  }
1072 
1073  while (n_left > 0)
1074  {
1075  next[0] = (esp_post_data (b[0]))->next_index;
1076  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1077  {
1078  esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1079  sizeof (*tr));
1080  tr->next_index = next[0];
1081  }
1082 
1083  b += 1;
1084  next += 1;
1085  n_left -= 1;
1086  }
1087 
1089  ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1090  frame->n_vectors);
1091  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1092  return frame->n_vectors;
1093 }
1094 
1097  vlib_frame_t * from_frame)
1098 {
1099  return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0,
1101 }
1102 
1103 /* *INDENT-OFF* */
1105  .name = "esp4-encrypt",
1106  .vector_size = sizeof (u32),
1107  .format_trace = format_esp_encrypt_trace,
1109 
1110  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1111  .error_strings = esp_encrypt_error_strings,
1112 
1113  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1114  .next_nodes = {
1115  [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1116  [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1117  [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1118  [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1119  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output"
1120  },
1121 };
1122 /* *INDENT-ON* */
1123 
1126  vlib_frame_t * from_frame)
1127 {
1128  return esp_encrypt_post_inline (vm, node, from_frame);
1129 }
1130 
1131 /* *INDENT-OFF* */
1133  .name = "esp4-encrypt-post",
1134  .vector_size = sizeof (u32),
1135  .format_trace = format_esp_post_encrypt_trace,
1137  .sibling_of = "esp4-encrypt",
1138 
1139  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1140  .error_strings = esp_encrypt_error_strings,
1141 };
1142 /* *INDENT-ON* */
1143 
1146  vlib_frame_t * from_frame)
1147 {
1148  return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0,
1150 }
1151 
1152 /* *INDENT-OFF* */
1154  .name = "esp6-encrypt",
1155  .vector_size = sizeof (u32),
1156  .format_trace = format_esp_encrypt_trace,
1158  .sibling_of = "esp4-encrypt",
1159 
1160  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1161  .error_strings = esp_encrypt_error_strings,
1162 };
1163 /* *INDENT-ON* */
1164 
1167  vlib_frame_t * from_frame)
1168 {
1169  return esp_encrypt_post_inline (vm, node, from_frame);
1170 }
1171 
1172 /* *INDENT-OFF* */
1174  .name = "esp6-encrypt-post",
1175  .vector_size = sizeof (u32),
1176  .format_trace = format_esp_post_encrypt_trace,
1178  .sibling_of = "esp4-encrypt",
1179 
1180  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1181  .error_strings = esp_encrypt_error_strings,
1182 };
1183 /* *INDENT-ON* */
1184 
1187  vlib_frame_t * from_frame)
1188 {
1189  return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1,
1191 }
1192 
1193 /* *INDENT-OFF* */
1195  .name = "esp4-encrypt-tun",
1196  .vector_size = sizeof (u32),
1197  .format_trace = format_esp_encrypt_trace,
1199 
1200  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1201  .error_strings = esp_encrypt_error_strings,
1202 
1203  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1204  .next_nodes = {
1205  [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1206  [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1207  [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1208  [ESP_ENCRYPT_NEXT_HANDOFF6] = "error-drop",
1209  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1210  },
1211 };
1212 
1215  vlib_frame_t * from_frame)
1216 {
1217  return esp_encrypt_post_inline (vm, node, from_frame);
1218 }
1219 
1220 /* *INDENT-OFF* */
1222  .name = "esp4-encrypt-tun-post",
1223  .vector_size = sizeof (u32),
1224  .format_trace = format_esp_post_encrypt_trace,
1226  .sibling_of = "esp4-encrypt-tun",
1227 
1228  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1229  .error_strings = esp_encrypt_error_strings,
1230 };
1231 /* *INDENT-ON* */
1232 
1235  vlib_frame_t * from_frame)
1236 {
1237  return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1,
1239 }
1240 
1241 /* *INDENT-OFF* */
1243  .name = "esp6-encrypt-tun",
1244  .vector_size = sizeof (u32),
1245  .format_trace = format_esp_encrypt_trace,
1247 
1248  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1249  .error_strings = esp_encrypt_error_strings,
1250 
1251  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1252  .next_nodes = {
1253  [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1254  [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1255  [ESP_ENCRYPT_NEXT_HANDOFF4] = "error-drop",
1256  [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1257  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1258  },
1259 };
1260 
1261 /* *INDENT-ON* */
1262 
1265  vlib_frame_t * from_frame)
1266 {
1267  return esp_encrypt_post_inline (vm, node, from_frame);
1268 }
1269 
1270 /* *INDENT-OFF* */
1272  .name = "esp6-encrypt-tun-post",
1273  .vector_size = sizeof (u32),
1274  .format_trace = format_esp_post_encrypt_trace,
1276  .sibling_of = "esp6-encrypt-tun",
1277 
1278  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1279  .error_strings = esp_encrypt_error_strings,
1280 };
1281 /* *INDENT-ON* */
1282 
1283 typedef struct
1284 {
1287 
1288 static u8 *
1289 format_esp_no_crypto_trace (u8 * s, va_list * args)
1290 {
1291  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1292  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1293  esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
1294 
1295  s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
1296 
1297  return s;
1298 }
1299 
1300 enum
1301 {
1304 };
1305 
1306 enum
1307 {
1309 };
1310 
1312  "Outbound ESP packets received",
1313 };
1314 
1317  vlib_frame_t * frame)
1318 {
1319  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1320  u32 *from = vlib_frame_vector_args (frame);
1321  u32 n_left = frame->n_vectors;
1322 
1323  vlib_get_buffers (vm, from, b, n_left);
1324 
1325  while (n_left > 0)
1326  {
1327  u32 sa_index0;
1328 
1329  /* packets are always going to be dropped, but get the sa_index */
1330  sa_index0 = ipsec_tun_protect_get_sa_out
1331  (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
1332 
1333  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1334  {
1335  esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
1336  sizeof (*tr));
1337  tr->sa_index = sa_index0;
1338  }
1339 
1340  n_left -= 1;
1341  b += 1;
1342  }
1343 
1346 
1347  vlib_buffer_enqueue_to_single_next (vm, node, from,
1349  frame->n_vectors);
1350 
1351  return frame->n_vectors;
1352 }
1353 
1356  vlib_frame_t * from_frame)
1357 {
1358  return esp_no_crypto_inline (vm, node, from_frame);
1359 }
1360 
1361 /* *INDENT-OFF* */
1363 {
1364  .name = "esp4-no-crypto",
1365  .vector_size = sizeof (u32),
1366  .format_trace = format_esp_no_crypto_trace,
1368  .error_strings = esp_no_crypto_error_strings,
1369  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1370  .next_nodes = {
1371  [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
1372  },
1373 };
1374 
1377  vlib_frame_t * from_frame)
1378 {
1379  return esp_no_crypto_inline (vm, node, from_frame);
1380 }
1381 
1382 /* *INDENT-OFF* */
1384 {
1385  .name = "esp6-no-crypto",
1386  .vector_size = sizeof (u32),
1387  .format_trace = format_esp_no_crypto_trace,
1389  .error_strings = esp_no_crypto_error_strings,
1390  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1391  .next_nodes = {
1392  [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
1393  },
1394 };
1395 /* *INDENT-ON* */
1396 
1397 /*
1398  * fd.io coding-style-patch-verification: ON
1399  *
1400  * Local Variables:
1401  * eval: (c-set-style "gnu")
1402  * End:
1403  */
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
Definition: crypto.c:99
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u8 * format_ipsec_integ_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:111
#define esp_post_data(b)
Definition: esp.h:219
static_always_inline int vnet_crypto_async_add_to_frame(vlib_main_t *vm, vnet_crypto_async_frame_t **frame, u32 key_index, u32 crypto_len, i16 integ_len_adj, i16 crypto_start_offset, u16 integ_start_offset, u32 buffer_index, u16 next_node, u8 *iv, u8 *tag, u8 *aad, u8 flags)
Definition: crypto.h:603
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
static u8 * vlib_buffer_get_tail(vlib_buffer_t *b)
Get pointer to the end of buffer&#39;s data.
Definition: buffer.h:314
#define clib_min(x, y)
Definition: clib.h:328
#define CLIB_UNUSED(x)
Definition: clib.h:87
ipsec_per_thread_data_t * ptd
Definition: ipsec.h:190
vnet_crypto_op_t * integ_ops
Definition: ipsec.h:100
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:239
static_always_inline u8 esp_get_ip6_hdr_len(ip6_header_t *ip6, ip6_ext_header_t **ext_hdr)
Definition: esp_encrypt.c:209
static char * esp_no_crypto_error_strings[]
Definition: esp_encrypt.c:1311
static u16 esp_aad_fill(u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa)
Definition: esp.h:109
#define PREDICT_TRUE(x)
Definition: clib.h:122
esp_encrypt_next_t
Definition: esp_encrypt.c:38
unsigned long u64
Definition: types.h:89
vl_api_ip_proto_t protocol
Definition: lb_types.api:72
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:239
static u32 ipsec_sa_assign_thread(u32 thread_id)
Definition: ipsec_sa.h:503
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:642
u32 esp6_post_next
Definition: esp.h:235
ipsec_integ_alg_t integ_alg
Definition: ipsec_sa.h:214
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
vnet_crypto_op_t * crypto_ops
Definition: ipsec.h:99
struct esp_aead_t_ esp_aead_t
AES GCM Additional Authentication data.
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
Definition: ip6_packet.h:518
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
static u8 * format_esp_encrypt_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:84
ipsec_integ_alg_t integ_alg
Definition: esp_encrypt.c:76
uword ip_csum_t
Definition: ip_packet.h:246
vlib_main_t * vm
Definition: in2out_ed.c:1580
ipsec_crypto_alg_t crypto_alg
Definition: esp_encrypt.c:75
#define VLIB_NODE_FN(node)
Definition: node.h:203
vnet_crypto_op_chunk_t * chunks
Definition: ipsec.h:103
static_always_inline void esp_process_chained_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, vnet_crypto_op_chunk_t *chunks, u16 drop_next)
Definition: esp_encrypt.c:237
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
vnet_crypto_op_id_t integ_op_id
Definition: ipsec_sa.h:140
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
unsigned char u8
Definition: types.h:56
vlib_node_registration_t esp4_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_node)
Definition: esp_encrypt.c:1194
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u32 seq_hi
Definition: ipsec_sa.h:123
static int esp_seq_advance(ipsec_sa_t *sa)
Definition: esp.h:84
vnet_crypto_key_index_t linked_key_index
Definition: ipsec_sa.h:147
vnet_crypto_key_index_t crypto_key_index
Definition: ipsec_sa.h:129
static uword esp_no_crypto_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: esp_encrypt.c:1316
#define static_always_inline
Definition: clib.h:109
#define foreach_esp_encrypt_next
Definition: esp_encrypt.c:29
ipsec_main_t ipsec_main
Definition: ipsec.c:28
static u8 * format_esp_post_encrypt_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:101
vl_api_ip6_address_t ip6
Definition: one.api:424
static_always_inline void tunnel_encap_fixup_6o6(tunnel_encap_decap_flags_t flags, const ip6_header_t *inner, ip6_header_t *outer)
Definition: tunnel_dp.h:104
u32 esp4_tun_post_next
Definition: esp.h:236
description fragment has unexpected format
Definition: map.api:433
static_always_inline void esp_fill_udp_hdr(ipsec_sa_t *sa, udp_header_t *udp, u16 len)
Definition: esp_encrypt.c:184
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
tunnel_encap_decap_flags_t tunnel_flags
Definition: ipsec_sa.h:167
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
Definition: crypto.h:496
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline void vlib_buffer_enqueue_to_single_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index, u32 count)
Definition: buffer_node.h:459
const cJSON *const b
Definition: cJSON.h:255
static_always_inline u8 ext_hdr_is_pre_esp(u8 nexthdr)
Definition: esp_encrypt.c:191
u8 * format_ipsec_crypto_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:79
u32 salt
unsigned int u32
Definition: types.h:88
static const u8 pad_data[]
Definition: ipsec.h:176
#define VLIB_FRAME_SIZE
Definition: node.h:378
STATIC_ASSERT_SIZEOF(esp_gcm_nonce_t, 12)
bool is_ip6
Definition: ip.api:43
static u8 * format_esp_no_crypto_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:1289
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static void esp_async_recycle_failed_submit(vnet_crypto_async_frame_t *f, vlib_buffer_t **b, u32 *from, u16 *nexts, u16 *n_dropped, u16 drop_next_index, vlib_error_t err)
Definition: esp.h:152
static_always_inline u32 esp_encrypt_chain_integ(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, ipsec_sa_t *sa0, vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz, u8 *start, u32 start_len, u8 *digest, u16 *n_ch)
Definition: esp_encrypt.c:339
vlib_node_registration_t esp6_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_node)
Definition: esp_encrypt.c:1153
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:170
static_always_inline void tunnel_encap_fixup_4o6(tunnel_encap_decap_flags_t flags, const ip4_header_t *inner, ip6_header_t *outer)
Definition: tunnel_dp.h:114
u32 encrypt_thread_index
Definition: ipsec_sa.h:119
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
vlib_node_registration_t esp6_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_node)
Definition: esp_encrypt.c:1242
static u8 iv[]
Definition: aes_cbc.c:24
u8 integ_alg
Definition: ikev2_types.api:59
static index_t ipsec_tun_protect_get_sa_out(adj_index_t ai)
Definition: ipsec_tun.h:192
uword user_data
Definition: crypto.h:233
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
u32 salt
Definition: ipsec_sa.h:164
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
esp_async_post_next_t esp_encrypt_async_next
Definition: ipsec.c:29
vlib_node_registration_t esp4_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_no_crypto_tun_node)
Definition: esp_encrypt.c:1362
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
vl_api_ip4_address_t ip4
Definition: one.api:376
static void esp_prepare_sync_op(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, vnet_crypto_op_t **crypto_ops, vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, vlib_buffer_t **bufs, vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len, esp_header_t *esp, esp_gcm_nonce_t *nonce)
Definition: esp_encrypt.c:385
u32 node_index
Node index.
Definition: node.h:488
static char * esp_encrypt_error_strings[]
Definition: esp_encrypt.c:60
static_always_inline u32 esp_encrypt_chain_crypto(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, ipsec_sa_t *sa0, vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz, u8 *start, u32 start_len, u16 *n_ch)
Definition: esp_encrypt.c:302
u32 next_index
Definition: esp_encrypt.c:79
static uword esp_encrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip6, int is_tun, u16 async_next)
Definition: esp_encrypt.c:558
static_always_inline vnet_crypto_async_frame_t * vnet_crypto_async_get_frame(vlib_main_t *vm, vnet_crypto_async_op_id_t opt)
async crypto inline functions
Definition: crypto.h:531
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
u8 len
Definition: ip_types.api:103
#define ip6_ext_header_len(p)
Definition: ip6_packet.h:514
vnet_crypto_op_t * chained_crypto_ops
Definition: ipsec.h:101
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
u16 n_vectors
Definition: node.h:397
u8 esp_block_align
Definition: ipsec_sa.h:117
#define VNET_CRYPTO_OP_FLAG_INIT_IV
Definition: crypto.h:237
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
vlib_node_registration_t esp6_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_no_crypto_tun_node)
Definition: esp_encrypt.c:1383
vlib_node_registration_t esp4_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_node)
Definition: esp_encrypt.c:1104
u8 data[]
Packet data.
Definition: buffer.h:181
udp_header_t udp_hdr
Definition: ipsec_sa.h:161
u32 spi
Definition: flow_types.api:140
#define ARRAY_LEN(x)
Definition: clib.h:67
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:265
vlib_node_registration_t esp4_encrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_post_node)
Definition: esp_encrypt.c:1132
vlib_node_registration_t esp6_encrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_post_node)
Definition: esp_encrypt.c:1173
vlib_node_registration_t esp6_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node)
Definition: esp_encrypt.c:1271
u32 esp4_post_next
Definition: esp.h:234
#define ESP_MAX_BLOCK_SIZE
Definition: esp.h:76
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
#define clib_atomic_cmp_and_swap(addr, old, new)
Definition: atomics.h:37
u32 esp6_tun_post_next
Definition: esp.h:237
Definition: esp_encrypt.c:77
static uword esp_encrypt_post_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: esp_encrypt.c:1010
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
Definition: ipsec_sa.c:27
#define ASSERT(truth)
u32 vnet_crypto_process_chained_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)
Definition: crypto.c:105
ip6_header_t ip6_hdr
Definition: ipsec_sa.h:159
ipsec_sa_t * sad
Definition: ipsec.h:111
esp_encrypt_error_t
Definition: esp_encrypt.c:54
static_always_inline void esp_update_ip4_hdr(ip4_header_t *ip4, u16 len, int is_transport, int is_udp)
Definition: esp_encrypt.c:158
static_always_inline void esp_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, u16 drop_next)
Definition: esp_encrypt.c:266
u32 seq
Definition: esp.h:29
static_always_inline u8 * esp_add_footer_and_icv(vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align, u8 icv_sz, u16 *next, vlib_node_runtime_t *node, u16 buffer_data_size, uword total_len)
Definition: esp_encrypt.c:113
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:252
char const int length
Definition: cJSON.h:163
vnet_crypto_async_op_id_t
Definition: crypto.h:159
u32 spi
Definition: esp.h:26
vnet_crypto_key_index_t integ_key_index
Definition: ipsec_sa.h:130
static_always_inline int vnet_crypto_async_submit_open_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: crypto.h:560
u16 next_index
Definition: esp.h:211
static_always_inline void clib_memcpy_le64(u8 *dst, u8 *src, u8 len)
Definition: string.h:283
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:188
vl_api_address_t ip
Definition: l2.api:501
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:296
u64 gcm_iv_counter
Definition: ipsec_sa.h:155
vnet_crypto_op_id_t crypto_enc_op_id
Definition: ipsec_sa.h:138
vnet_crypto_op_t * chained_integ_ops
Definition: ipsec.h:102
vnet_crypto_op_status_t status
Definition: crypto.h:235
static_always_inline void tunnel_encap_fixup_6o4_w_chksum(tunnel_encap_decap_flags_t flags, const ip6_header_t *inner, ip4_header_t *outer)
Definition: tunnel_dp.h:82
#define vnet_buffer(b)
Definition: buffer.h:417
dpo_id_t dpo
Definition: ipsec_sa.h:127
ipsec_crypto_alg_t crypto_alg
Definition: ipsec_sa.h:213
vlib_node_registration_t esp4_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node)
Definition: esp_encrypt.c:1221
vnet_crypto_async_op_id_t crypto_async_enc_op_id
Definition: ipsec_sa.h:145
ip4_header_t ip4_hdr
Definition: ipsec_sa.h:158
static_always_inline void tunnel_encap_fixup_4o4_w_chksum(tunnel_encap_decap_flags_t flags, const ip4_header_t *inner, ip4_header_t *outer)
Definition: tunnel_dp.h:37
u16 flags
Copy of main node flags.
Definition: node.h:501
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:184
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
u8 crypto_iv_size
Definition: ipsec_sa.h:116
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define foreach_esp_encrypt_error
Definition: esp_encrypt.c:44
u8 async_mode
Definition: ipsec.h:207
u8 integ_icv_size
Definition: ipsec_sa.h:118
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:302
static void esp_set_next_index(int is_async, u32 *from, u16 *nexts, u32 bi, u16 *drop_index, u16 drop_next, u16 *next)
Definition: esp.h:137
static_always_inline void clib_memcpy_le32(u8 *dst, u8 *src, u8 len)
Definition: string.h:289
static_always_inline int esp_prepare_async_frame(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, vnet_crypto_async_frame_t **async_frame, ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp, u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len, u16 async_next, vlib_buffer_t *lb)
Definition: esp_encrypt.c:473
signed short i16
Definition: types.h:46
Definition: esp.h:209