FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
esp_decrypt.c
Go to the documentation of this file.
1 /*
2  * esp_decrypt.c : IPSec ESP decrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/l2/l2_input.h>
22 
23 #include <vnet/ipsec/ipsec.h>
24 #include <vnet/ipsec/esp.h>
25 #include <vnet/ipsec/ipsec_io.h>
26 #include <vnet/ipsec/ipsec_tun.h>
27 
28 #include <vnet/gre/packet.h>
29 
30 #define foreach_esp_decrypt_next \
31  _ (DROP, "error-drop") \
32  _ (IP4_INPUT, "ip4-input-no-checksum") \
33  _ (IP6_INPUT, "ip6-input") \
34  _ (L2_INPUT, "l2-input") \
35  _ (MPLS_INPUT, "mpls-input") \
36  _ (HANDOFF, "handoff")
37 
38 #define _(v, s) ESP_DECRYPT_NEXT_##v,
39 typedef enum
40 {
42 #undef _
45 
46 #define foreach_esp_decrypt_post_next \
47  _ (DROP, "error-drop") \
48  _ (IP4_INPUT, "ip4-input-no-checksum") \
49  _ (IP6_INPUT, "ip6-input") \
50  _ (MPLS_INPUT, "mpls-input") \
51  _ (L2_INPUT, "l2-input")
52 
53 #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
54 typedef enum
55 {
57 #undef _
60 
61 #define foreach_esp_decrypt_error \
62  _ (RX_PKTS, "ESP pkts received") \
63  _ (RX_POST_PKTS, "ESP-POST pkts received") \
64  _ (HANDOFF, "hand-off") \
65  _ (DECRYPTION_FAILED, "ESP decryption failed") \
66  _ (INTEG_ERROR, "Integrity check failed") \
67  _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
68  _ (REPLAY, "SA replayed packet") \
69  _ (RUNT, "undersized packet") \
70  _ (NO_BUFFERS, "no buffers (packet dropped)") \
71  _ (OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
72  _ (NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
73  _ (TUN_NO_PROTO, "no tunnel protocol") \
74  _ (UNSUP_PAYLOAD, "unsupported payload")
75 
76 typedef enum
77 {
78 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
80 #undef _
83 
84 static char *esp_decrypt_error_strings[] = {
85 #define _(sym,string) string,
87 #undef _
88 };
89 
90 typedef struct
91 {
96  ipsec_crypto_alg_t crypto_alg;
97  ipsec_integ_alg_t integ_alg;
99 
100 /* The number of byres in the hisequence number */
101 #define N_HI_ESN_BYTES 4
102 
103 /* packet trace format function */
104 static u8 *
105 format_esp_decrypt_trace (u8 * s, va_list * args)
106 {
107  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
108  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
109  esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
110 
111  s = format (s,
112  "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u "
113  "pkt-seq-hi %u",
115  t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi, t->pkt_seq_hi);
116  return s;
117 }
118 
119 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
120 
123  vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
124  int e)
125 {
126  vnet_crypto_op_t *op = ops;
127  u32 n_fail, n_ops = vec_len (ops);
128 
129  if (n_ops == 0)
130  return;
131 
132  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
133 
134  while (n_fail)
135  {
136  ASSERT (op - ops < n_ops);
137  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
138  {
139  u32 err, bi = op->user_data;
140  if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
141  err = e;
142  else
143  err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
144  b[bi]->error = node->errors[err];
145  nexts[bi] = ESP_DECRYPT_NEXT_DROP;
146  n_fail--;
147  }
148  op++;
149  }
150 }
151 
154  vnet_crypto_op_t * ops, vlib_buffer_t * b[],
155  u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
156 {
157 
158  vnet_crypto_op_t *op = ops;
159  u32 n_fail, n_ops = vec_len (ops);
160 
161  if (PREDICT_TRUE (n_ops == 0))
162  return;
163 
164  n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
165 
166  while (n_fail)
167  {
168  ASSERT (op - ops < n_ops);
169  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
170  {
171  u32 err, bi = op->user_data;
172  if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
173  err = e;
174  else
175  err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
176  b[bi]->error = node->errors[err];
177  nexts[bi] = ESP_DECRYPT_NEXT_DROP;
178  n_fail--;
179  }
180  op++;
181  }
182 }
183 
184 always_inline void
186  u16 tail)
187 {
188  vlib_buffer_t *before_last = b;
189 
190  if (last->current_length > tail)
191  {
192  last->current_length -= tail;
193  return;
194  }
195  ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
196 
197  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
198  {
199  before_last = b;
201  }
202  before_last->current_length -= tail - last->current_length;
203  vlib_buffer_free_one (vm, before_last->next_buffer);
204  before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
205 }
206 
207 /* ICV is splitted in last two buffers so move it to the last buffer and
208  return pointer to it */
212  esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
213 {
214  vlib_buffer_t *before_last, *bp;
215  u16 last_sz = pd2->lb->current_length;
216  u16 first_sz = icv_sz - last_sz;
217 
218  bp = before_last = first;
219  while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
220  {
221  before_last = bp;
222  bp = vlib_get_buffer (vm, bp->next_buffer);
223  }
224 
225  u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
226  memmove (lb_curr + first_sz, lb_curr, last_sz);
227  clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
228  first_sz);
229  before_last->current_length -= first_sz;
230  if (before_last == first)
231  pd->current_length -= first_sz;
232  clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
233  if (dif)
234  dif[0] = first_sz;
235  pd2->lb = before_last;
236  pd2->icv_removed = 1;
237  pd2->free_buffer_index = before_last->next_buffer;
238  before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
239  return lb_curr;
240 }
241 
244  esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
245  u16 *len, vlib_buffer_t *b, u8 *payload)
246 {
247  if (!ipsec_sa_is_set_USE_ESN (sa))
248  return 0;
249  /* shift ICV by 4 bytes to insert ESN */
250  u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
252 
253  if (pd2->icv_removed)
254  {
255  u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
256  if (space_left >= N_HI_ESN_BYTES)
257  {
258  clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
261  }
262  else
263  return N_HI_ESN_BYTES;
264 
265  len[0] = b->current_length;
266  }
267  else
268  {
269  clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
270  clib_memcpy_fast (payload + len[0], &seq_hi, N_HI_ESN_BYTES);
271  clib_memcpy_fast (payload + len[0] + N_HI_ESN_BYTES, tmp,
274  *digest += N_HI_ESN_BYTES;
275  }
276  return N_HI_ESN_BYTES;
277 }
278 
282  esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
283  ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
284 {
285  u16 dif = 0;
286  u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
287  if (dif)
288  *len -= dif;
289 
290  if (ipsec_sa_is_set_USE_ESN (sa))
291  {
292  u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
293  u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
294 
295  if (space_left >= N_HI_ESN_BYTES)
296  {
297  clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
299  *len += N_HI_ESN_BYTES;
300  }
301  else
302  {
303  /* no space for ESN at the tail, use the next buffer
304  * (with ICV data) */
305  ASSERT (pd2->icv_removed);
308  &seq_hi, N_HI_ESN_BYTES);
309  extra_esn[0] = 1;
310  }
311  }
312  return digest;
313 }
314 
317  const esp_decrypt_packet_data_t *pd,
319  vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
320  u32 start_len, u8 **digest, u16 *n_ch,
321  u32 *integ_total_len)
322 {
325  u16 n_chunks = 1;
326  u32 total_len;
327  vec_add2 (ptd->chunks, ch, 1);
328  total_len = ch->len = start_len;
329  ch->src = start_src;
330 
331  while (1)
332  {
333  vec_add2 (ptd->chunks, ch, 1);
334  n_chunks += 1;
335  ch->src = vlib_buffer_get_current (cb);
336  if (pd2->lb == cb)
337  {
338  if (pd2->icv_removed)
339  ch->len = cb->current_length;
340  else
341  ch->len = cb->current_length - icv_sz;
342  if (ipsec_sa_is_set_USE_ESN (sa0))
343  {
344  u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
346  u8 *esn;
347  vlib_buffer_t *tmp_b;
348  u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
349  if (space_left < N_HI_ESN_BYTES)
350  {
351  if (pd2->icv_removed)
352  {
353  /* use pre-data area from the last bufer
354  that was removed from the chain */
355  tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
356  esn = tmp_b->data - N_HI_ESN_BYTES;
357  }
358  else
359  {
360  /* no space, need to allocate new buffer */
361  u32 tmp_bi = 0;
362  if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
363  return -1;
364  tmp_b = vlib_get_buffer (vm, tmp_bi);
365  esn = tmp_b->data;
366  pd2->free_buffer_index = tmp_bi;
367  }
368  clib_memcpy_fast (esn, &seq_hi, N_HI_ESN_BYTES);
369 
370  vec_add2 (ptd->chunks, ch, 1);
371  n_chunks += 1;
372  ch->src = esn;
373  ch->len = N_HI_ESN_BYTES;
374  }
375  else
376  {
377  if (pd2->icv_removed)
378  {
380  &seq_hi, N_HI_ESN_BYTES);
381  }
382  else
383  {
385  clib_memcpy_fast (*digest, &seq_hi, N_HI_ESN_BYTES);
386  clib_memcpy_fast (*digest + N_HI_ESN_BYTES, tmp,
388  *digest += N_HI_ESN_BYTES;
389  }
390  ch->len += N_HI_ESN_BYTES;
391  }
392  }
393  total_len += ch->len;
394  break;
395  }
396  else
397  total_len += ch->len = cb->current_length;
398 
399  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
400  break;
401 
402  cb = vlib_get_buffer (vm, cb->next_buffer);
403  }
404 
405  if (n_ch)
406  *n_ch = n_chunks;
407  if (integ_total_len)
408  *integ_total_len = total_len;
409 
410  return 0;
411 }
412 
417  ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
418  u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
419 {
421  vlib_buffer_t *cb = b;
422  u16 n_chunks = 1;
423  u32 total_len;
424  vec_add2 (ptd->chunks, ch, 1);
425  total_len = ch->len = start_len;
426  ch->src = ch->dst = start;
427  cb = vlib_get_buffer (vm, cb->next_buffer);
428  n_chunks = 1;
429 
430  while (1)
431  {
432  vec_add2 (ptd->chunks, ch, 1);
433  n_chunks += 1;
434  ch->src = ch->dst = vlib_buffer_get_current (cb);
435  if (pd2->lb == cb)
436  {
437  if (ipsec_sa_is_set_IS_AEAD (sa0))
438  {
439  if (pd2->lb->current_length < icv_sz)
440  {
441  u16 dif = 0;
442  *tag = esp_move_icv (vm, b, pd, pd2, icv_sz, &dif);
443 
444  /* this chunk does not contain crypto data */
445  n_chunks -= 1;
446  /* and fix previous chunk's length as it might have
447  been changed */
448  ASSERT (n_chunks > 0);
449  if (pd2->lb == b)
450  {
451  total_len -= dif;
452  ch[-1].len -= dif;
453  }
454  else
455  {
456  total_len = total_len + pd2->lb->current_length -
457  ch[-1].len;
458  ch[-1].len = pd2->lb->current_length;
459  }
460  break;
461  }
462  else
463  *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
464  }
465 
466  if (pd2->icv_removed)
467  total_len += ch->len = cb->current_length;
468  else
469  total_len += ch->len = cb->current_length - icv_sz;
470  }
471  else
472  total_len += ch->len = cb->current_length;
473 
474  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
475  break;
476 
477  cb = vlib_get_buffer (vm, cb->next_buffer);
478  }
479 
480  if (n_ch)
481  *n_ch = n_chunks;
482 
483  return total_len;
484 }
485 
489  vnet_crypto_op_t *** crypto_ops,
490  vnet_crypto_op_t *** integ_ops,
491  vnet_crypto_op_t * op,
492  ipsec_sa_t * sa0, u8 * payload,
493  u16 len, u8 icv_sz, u8 iv_sz,
497 {
498  const u8 esp_sz = sizeof (esp_header_t);
499 
501  {
502  vnet_crypto_op_init (op, sa0->integ_op_id);
503  op->key_index = sa0->integ_key_index;
504  op->src = payload;
506  op->user_data = index;
507  op->digest = payload + len;
508  op->digest_len = icv_sz;
509  op->len = len;
510 
511  if (pd->is_chain)
512  {
513  /* buffer is chained */
514  op->len = pd->current_length;
515 
516  /* special case when ICV is splitted and needs to be reassembled
517  * first -> move it to the last buffer. Also take into account
518  * that ESN needs to be added after encrypted data and may or
519  * may not fit in the tail.*/
520  if (pd2->lb->current_length < icv_sz)
521  {
522  u8 extra_esn = 0;
523  op->digest =
524  esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
525  &extra_esn, &op->len);
526 
527  if (extra_esn)
528  {
529  /* esn is in the last buffer, that was unlinked from
530  * the chain */
531  op->len = b->current_length;
532  }
533  else
534  {
535  if (pd2->lb == b)
536  {
537  /* we now have a single buffer of crypto data, adjust
538  * the length (second buffer contains only ICV) */
539  *integ_ops = &ptd->integ_ops;
540  *crypto_ops = &ptd->crypto_ops;
541  len = b->current_length;
542  goto out;
543  }
544  }
545  }
546  else
547  op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
548 
550  op->chunk_index = vec_len (ptd->chunks);
551  if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
552  payload, pd->current_length,
553  &op->digest, &op->n_chunks, 0) < 0)
554  {
555  b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
556  next[0] = ESP_DECRYPT_NEXT_DROP;
557  return;
558  }
559  }
560  else
561  esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
562  payload);
563  out:
564  vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
565  }
566 
567  payload += esp_sz;
568  len -= esp_sz;
569 
571  {
573  op->key_index = sa0->crypto_key_index;
574  op->iv = payload;
575 
576  if (ipsec_sa_is_set_IS_CTR (sa0))
577  {
578  /* construct nonce in a scratch space in front of the IP header */
579  esp_ctr_nonce_t *nonce =
580  (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
581  sizeof (*nonce));
582  if (ipsec_sa_is_set_IS_AEAD (sa0))
583  {
584  /* constuct aad in a scratch space in front of the nonce */
585  esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
586  op->aad = (u8 *) nonce - sizeof (esp_aead_t);
587  op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
588  op->tag = payload + len;
589  op->tag_len = 16;
590  }
591  else
592  {
593  nonce->ctr = clib_host_to_net_u32 (1);
594  }
595  nonce->salt = sa0->salt;
596  ASSERT (sizeof (u64) == iv_sz);
597  nonce->iv = *(u64 *) op->iv;
598  op->iv = (u8 *) nonce;
599  }
600  op->src = op->dst = payload += iv_sz;
601  op->len = len - iv_sz;
602  op->user_data = index;
603 
604  if (pd->is_chain && (pd2->lb != b))
605  {
606  /* buffer is chained */
608  op->chunk_index = vec_len (ptd->chunks);
609  esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
610  payload, len - pd->iv_sz + pd->icv_sz,
611  &op->tag, &op->n_chunks);
612  }
613 
614  vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
615  }
616 }
617 
622  u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
625  vlib_buffer_t *b, u16 *next, u16 async_next)
626 {
627  const u8 esp_sz = sizeof (esp_header_t);
628  esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
630  u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
631  u32 key_index;
632  u32 crypto_len, integ_len = 0;
633  i16 crypto_start_offset, integ_start_offset = 0;
634  u8 flags = 0;
635 
636  if (!ipsec_sa_is_set_IS_AEAD (sa0))
637  {
638  /* linked algs */
639  key_index = sa0->linked_key_index;
640  integ_start_offset = payload - b->data;
641  integ_len = len;
644 
645  if (pd->is_chain)
646  {
647  /* buffer is chained */
648  integ_len = pd->current_length;
649 
650  /* special case when ICV is splitted and needs to be reassembled
651  * first -> move it to the last buffer. Also take into account
652  * that ESN needs to be added after encrypted data and may or
653  * may not fit in the tail.*/
654  if (pd2->lb->current_length < icv_sz)
655  {
656  u8 extra_esn = 0;
657  tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
658  &extra_esn, &integ_len);
659 
660  if (extra_esn)
661  {
662  /* esn is in the last buffer, that was unlinked from
663  * the chain */
664  integ_len = b->current_length;
665  }
666  else
667  {
668  if (pd2->lb == b)
669  {
670  /* we now have a single buffer of crypto data, adjust
671  * the length (second buffer contains only ICV) */
672  len = b->current_length;
673  goto out;
674  }
675  }
676  }
677  else
678  tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
679 
681  if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
682  payload, pd->current_length, &tag, 0,
683  &integ_len) < 0)
684  {
685  /* allocate buffer failed, will not add to frame and drop */
686  return (ESP_DECRYPT_ERROR_NO_BUFFERS);
687  }
688  }
689  else
690  esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
691  }
692  else
693  key_index = sa0->crypto_key_index;
694 
695 out:
696  /* crypto */
697  payload += esp_sz;
698  len -= esp_sz;
699  iv = payload;
700 
701  if (ipsec_sa_is_set_IS_CTR (sa0))
702  {
703  /* construct nonce in a scratch space in front of the IP header */
704  esp_ctr_nonce_t *nonce =
705  (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
706  if (ipsec_sa_is_set_IS_AEAD (sa0))
707  {
708  /* constuct aad in a scratch space in front of the nonce */
709  esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
710  aad = (u8 *) nonce - sizeof (esp_aead_t);
711  esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
712  tag = payload + len;
713  }
714  else
715  {
716  nonce->ctr = clib_host_to_net_u32 (1);
717  }
718  nonce->salt = sa0->salt;
719  ASSERT (sizeof (u64) == iv_sz);
720  nonce->iv = *(u64 *) iv;
721  iv = (u8 *) nonce;
722  }
723 
724  crypto_start_offset = (payload += iv_sz) - b->data;
725  crypto_len = len - iv_sz;
726 
727  if (pd->is_chain && (pd2->lb != b))
728  {
729  /* buffer is chained */
731 
732  crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
733  payload,
734  len - pd->iv_sz + pd->icv_sz,
735  &tag, 0);
736  }
737 
738  *async_pd = *pd;
739  *async_pd2 = *pd2;
740 
741  /* for AEAD integ_len - crypto_len will be negative, it is ok since it
742  * is ignored by the engine. */
744  vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
745  integ_start_offset, bi, async_next, iv, tag, aad, flags);
746 
747  return (ESP_DECRYPT_ERROR_RX_PKTS);
748 }
749 
754  u16 * next, int is_ip6, int is_tun, int is_async)
755 {
756  ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
757  vlib_buffer_t *lb = b;
758  const u8 esp_sz = sizeof (esp_header_t);
759  const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
760  u8 pad_length = 0, next_header = 0;
761  u16 icv_sz;
762 
763  /*
764  * redo the anti-reply check
765  * in this frame say we have sequence numbers, s, s+1, s+1, s+1
766  * and s and s+1 are in the window. When we did the anti-replay
767  * check above we did so against the state of the window (W),
768  * after packet s-1. So each of the packets in the sequence will be
769  * accepted.
770  * This time s will be cheked against Ws-1, s+1 chceked against Ws
771  * (i.e. the window state is updated/advnaced)
772  * so this time the successive s+! packet will be dropped.
773  * This is a consequence of batching the decrypts. If the
774  * check-dcrypt-advance process was done for each packet it would
775  * be fine. But we batch the decrypts because it's much more efficient
776  * to do so in SW and if we offload to HW and the process is async.
777  *
778  * You're probably thinking, but this means an attacker can send the
779  * above sequence and cause VPP to perform decrpyts that will fail,
780  * and that's true. But if the attacker can determine s (a valid
781  * sequence number in the window) which is non-trivial, it can generate
782  * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
783  * implementation, sequential or batching, from decrypting these.
784  */
785  if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
786  NULL))
787  {
788  b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
789  next[0] = ESP_DECRYPT_NEXT_DROP;
790  return;
791  }
792 
793  ipsec_sa_anti_replay_advance (sa0, pd->seq, pd->seq_hi);
794 
795  if (pd->is_chain)
796  {
797  lb = pd2->lb;
798  icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
799  if (pd2->free_buffer_index)
800  {
802  lb->next_buffer = 0;
803  }
804  if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
805  {
806  /* esp footer is either splitted in two buffers or in the before
807  * last buffer */
808 
809  vlib_buffer_t *before_last = b, *bp = b;
810  while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
811  {
812  before_last = bp;
813  bp = vlib_get_buffer (vm, bp->next_buffer);
814  }
815  u8 *bt = vlib_buffer_get_tail (before_last);
816 
817  if (lb->current_length == icv_sz)
818  {
819  esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
820  pad_length = f->pad_length;
821  next_header = f->next_header;
822  }
823  else
824  {
825  pad_length = (bt - 1)[0];
826  next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
827  }
828  }
829  else
830  {
831  esp_footer_t *f =
832  (esp_footer_t *) (lb->data + lb->current_data +
833  lb->current_length - sizeof (esp_footer_t) -
834  icv_sz);
835  pad_length = f->pad_length;
836  next_header = f->next_header;
837  }
838  }
839  else
840  {
841  icv_sz = pd->icv_sz;
842  esp_footer_t *f =
843  (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
844  sizeof (esp_footer_t) - icv_sz);
845  pad_length = f->pad_length;
846  next_header = f->next_header;
847  }
848 
849  u16 adv = pd->iv_sz + esp_sz;
850  u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
851  u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
852  b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
853 
854  if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
855  {
856  u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
857  sizeof (udp_header_t) : 0;
858  u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
859  u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
860  u8 *ip = old_ip + adv + udp_sz;
861 
862  if (is_ip6 && ip_hdr_sz > 64)
863  memmove (ip, old_ip, ip_hdr_sz);
864  else
865  clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
866 
867  b->current_data = pd->current_data + adv - ip_hdr_sz;
868  b->current_length += ip_hdr_sz - adv;
869  esp_remove_tail (vm, b, lb, tail);
870 
871  if (is_ip6)
872  {
874  u16 len = clib_net_to_host_u16 (ip6->payload_length);
875  len -= adv + tail_orig;
876  ip6->payload_length = clib_host_to_net_u16 (len);
877  ip6->protocol = next_header;
878  next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
879  }
880  else
881  {
883  ip_csum_t sum = ip4->checksum;
884  u16 len = clib_net_to_host_u16 (ip4->length);
885  len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
886  sum = ip_csum_update (sum, ip4->protocol, next_header,
888  sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
889  ip4->checksum = ip_csum_fold (sum);
890  ip4->protocol = next_header;
891  ip4->length = len;
892  next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
893  }
894  }
895  else
896  {
897  if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
898  {
899  next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
900  b->current_data = pd->current_data + adv;
901  b->current_length = pd->current_length - adv;
902  esp_remove_tail (vm, b, lb, tail);
903  }
904  else if (next_header == IP_PROTOCOL_IPV6)
905  {
906  next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
907  b->current_data = pd->current_data + adv;
908  b->current_length = pd->current_length - adv;
909  esp_remove_tail (vm, b, lb, tail);
910  }
911  else if (next_header == IP_PROTOCOL_MPLS_IN_IP)
912  {
913  next[0] = ESP_DECRYPT_NEXT_MPLS_INPUT;
914  b->current_data = pd->current_data + adv;
915  b->current_length = pd->current_length - adv;
916  esp_remove_tail (vm, b, lb, tail);
917  }
918  else
919  {
920  if (is_tun && next_header == IP_PROTOCOL_GRE)
921  {
922  gre_header_t *gre;
923 
924  b->current_data = pd->current_data + adv;
925  b->current_length = pd->current_length - adv - tail;
926 
927  gre = vlib_buffer_get_current (b);
928 
929  vlib_buffer_advance (b, sizeof (*gre));
930 
931  switch (clib_net_to_host_u16 (gre->protocol))
932  {
933  case GRE_PROTOCOL_teb:
935  next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
936  break;
937  case GRE_PROTOCOL_ip4:
938  next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
939  break;
940  case GRE_PROTOCOL_ip6:
941  next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
942  break;
943  default:
944  b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
945  next[0] = ESP_DECRYPT_NEXT_DROP;
946  break;
947  }
948  }
949  else
950  {
951  next[0] = ESP_DECRYPT_NEXT_DROP;
952  b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
953  return;
954  }
955  }
956  if (is_tun)
957  {
958  if (ipsec_sa_is_set_IS_PROTECT (sa0))
959  {
960  /*
961  * There are two encap possibilities
962  * 1) the tunnel and ths SA are prodiving encap, i.e. it's
963  * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
964  * implying the SA is in tunnel mode (on a tunnel interface)
965  * 2) only the tunnel provides encap
966  * MAC | TUN-IP | ESP | PAYLOAD
967  * implying the SA is in transport mode.
968  *
969  * For 2) we need only strip the tunnel encap and we're good.
970  * since the tunnel and crypto ecnap (int the tun=protect
971  * object) are the same and we verified above that these match
972  * for 1) we need to strip the SA-IP outer headers, to
973  * reveal the tunnel IP and then check that this matches
974  * the configured tunnel.
975  */
976  const ipsec_tun_protect_t *itp;
977 
978  itp =
979  ipsec_tun_protect_get (vnet_buffer (b)->ipsec.protect_index);
980 
981  if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
982  {
983  const ip4_header_t *ip4;
984 
986 
988  &ip4->dst_address) ||
990  &ip4->src_address))
991  {
992  next[0] = ESP_DECRYPT_NEXT_DROP;
993  b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
994  }
995  }
996  else if (next_header == IP_PROTOCOL_IPV6)
997  {
998  const ip6_header_t *ip6;
999 
1001 
1002  if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
1003  &ip6->dst_address) ||
1005  &ip6->src_address))
1006  {
1007  next[0] = ESP_DECRYPT_NEXT_DROP;
1008  b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
1009  }
1010  }
1011  }
1012  }
1013  }
1014 }
1015 
1018  vlib_frame_t *from_frame, int is_ip6, int is_tun,
1019  u16 async_next_node)
1020 {
1023  u16 len;
1026  u32 n_left = from_frame->n_vectors;
1028  vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
1029  u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
1030  u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts;
1031  u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
1032  u32 sync_bi[VLIB_FRAME_SIZE];
1033  u32 noop_bi[VLIB_FRAME_SIZE];
1034  esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1035  esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1036  esp_decrypt_packet_data_t cpd = { };
1037  u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1038  const u8 esp_sz = sizeof (esp_header_t);
1039  ipsec_sa_t *sa0 = 0;
1040  vnet_crypto_op_t _op, *op = &_op;
1041  vnet_crypto_op_t **crypto_ops;
1042  vnet_crypto_op_t **integ_ops;
1043  int is_async = im->async_mode;
1044  vnet_crypto_async_op_id_t async_op = ~0;
1046  esp_decrypt_error_t err;
1047 
1049  if (!is_async)
1050  {
1052  vec_reset_length (ptd->integ_ops);
1055  }
1057  vec_reset_length (ptd->chunks);
1058  clib_memset (sync_nexts, -1, sizeof (sync_nexts));
1059  clib_memset (async_frames, 0, sizeof (async_frames));
1060 
1061  while (n_left > 0)
1062  {
1063  u8 *payload;
1064 
1065  err = ESP_DECRYPT_ERROR_RX_PKTS;
1066  if (n_left > 2)
1067  {
1068  u8 *p;
1069  vlib_prefetch_buffer_header (b[2], LOAD);
1070  p = vlib_buffer_get_current (b[1]);
1071  clib_prefetch_load (p);
1072  p -= CLIB_CACHE_LINE_BYTES;
1073  clib_prefetch_load (p);
1074  }
1075 
1076  u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1077  if (n_bufs == 0)
1078  {
1079  err = ESP_DECRYPT_ERROR_NO_BUFFERS;
1080  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1081  ESP_DECRYPT_NEXT_DROP);
1082  goto next;
1083  }
1084 
1085  if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1086  {
1087  if (current_sa_pkts)
1089  current_sa_index,
1090  current_sa_pkts,
1091  current_sa_bytes);
1092  current_sa_bytes = current_sa_pkts = 0;
1093 
1094  current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1095  sa0 = ipsec_sa_get (current_sa_index);
1096 
1097  /* fetch the second cacheline ASAP */
1098  clib_prefetch_load (sa0->cacheline1);
1099  cpd.icv_sz = sa0->integ_icv_size;
1100  cpd.iv_sz = sa0->crypto_iv_size;
1101  cpd.flags = sa0->flags;
1102  cpd.sa_index = current_sa_index;
1103  is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
1104  }
1105 
1106  if (PREDICT_FALSE (~0 == sa0->thread_index))
1107  {
1108  /* this is the first packet to use this SA, claim the SA
1109  * for this thread. this could happen simultaneously on
1110  * another thread */
1113  }
1114 
1115  if (PREDICT_FALSE (thread_index != sa0->thread_index))
1116  {
1117  vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
1118  err = ESP_DECRYPT_ERROR_HANDOFF;
1119  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1120  ESP_DECRYPT_NEXT_HANDOFF);
1121  goto next;
1122  }
1123 
1124  /* store packet data for next round for easier prefetch */
1125  pd->sa_data = cpd.sa_data;
1126  pd->current_data = b[0]->current_data;
1127  pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1128  payload = b[0]->data + pd->current_data;
1129  pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1130  pd->is_chain = 0;
1131  pd2->lb = b[0];
1132  pd2->free_buffer_index = 0;
1133  pd2->icv_removed = 0;
1134 
1135  if (n_bufs > 1)
1136  {
1137  pd->is_chain = 1;
1138  /* find last buffer in the chain */
1139  while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1140  pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1141 
1142  crypto_ops = &ptd->chained_crypto_ops;
1143  integ_ops = &ptd->chained_integ_ops;
1144  }
1145  else
1146  {
1147  crypto_ops = &ptd->crypto_ops;
1148  integ_ops = &ptd->integ_ops;
1149  }
1150 
1151  pd->current_length = b[0]->current_length;
1152 
1153  /* anti-reply check */
1154  if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
1155  &pd->seq_hi))
1156  {
1157  err = ESP_DECRYPT_ERROR_REPLAY;
1158  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1159  ESP_DECRYPT_NEXT_DROP);
1160  goto next;
1161  }
1162 
1163  if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1164  {
1165  err = ESP_DECRYPT_ERROR_RUNT;
1166  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1167  ESP_DECRYPT_NEXT_DROP);
1168  goto next;
1169  }
1170 
1171  len = pd->current_length - cpd.icv_sz;
1172  current_sa_pkts += 1;
1173  current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1174 
1175  if (is_async)
1176  {
1177  async_op = sa0->crypto_async_dec_op_id;
1178 
1179  /* get a frame for this op if we don't yet have one or it's full
1180  */
1181  if (NULL == async_frames[async_op] ||
1182  vnet_crypto_async_frame_is_full (async_frames[async_op]))
1183  {
1184  async_frames[async_op] =
1185  vnet_crypto_async_get_frame (vm, async_op);
1186  /* Save the frame to the list we'll submit at the end */
1187  vec_add1 (ptd->async_frames, async_frames[async_op]);
1188  }
1189 
1191  vm, node, ptd, async_frames[async_op], sa0, payload, len,
1192  cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
1193  async_next_node);
1194  if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1195  {
1196  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1197  ESP_DECRYPT_NEXT_DROP);
1198  }
1199  }
1200  else
1202  vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
1203  cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs);
1204  /* next */
1205  next:
1206  if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1207  {
1208  noop_bi[n_noop] = from[b - bufs];
1209  n_noop++;
1210  noop_next++;
1211  }
1212  else if (!is_async)
1213  {
1214  sync_bi[n_sync] = from[b - bufs];
1215  sync_bufs[n_sync] = b[0];
1216  n_sync++;
1217  sync_next++;
1218  pd += 1;
1219  pd2 += 1;
1220  }
1221  else
1222  async_next++;
1223 
1224  n_left -= 1;
1225  b += 1;
1226  }
1227 
1228  if (PREDICT_TRUE (~0 != current_sa_index))
1230  current_sa_index, current_sa_pkts,
1231  current_sa_bytes);
1232 
1233  /* submit or free all of the open frames */
1234  vnet_crypto_async_frame_t **async_frame;
1235 
1236  vec_foreach (async_frame, ptd->async_frames)
1237  {
1238  /* free frame and move on if no ops were successfully added */
1239  if (PREDICT_FALSE (!(*async_frame)->n_elts))
1240  {
1241  vnet_crypto_async_free_frame (vm, *async_frame);
1242  continue;
1243  }
1244  if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1245  {
1247  vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1248  n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
1249  vnet_crypto_async_reset_frame (*async_frame);
1250  vnet_crypto_async_free_frame (vm, *async_frame);
1251  }
1252  }
1253 
1254  if (n_sync)
1255  {
1256  esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1257  ESP_DECRYPT_ERROR_INTEG_ERROR);
1258  esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1259  sync_nexts, ptd->chunks,
1260  ESP_DECRYPT_ERROR_INTEG_ERROR);
1261 
1262  esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1263  ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1265  sync_nexts, ptd->chunks,
1266  ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1267  }
1268 
1269  /* Post decryption ronud - adjust packet data start and length and next
1270  node */
1271 
1272  n_left = n_sync;
1273  sync_next = sync_nexts;
1274  pd = pkt_data;
1275  pd2 = pkt_data2;
1276  b = sync_bufs;
1277 
1278  while (n_left)
1279  {
1280  if (n_left >= 2)
1281  {
1282  void *data = b[1]->data + pd[1].current_data;
1283 
1284  /* buffer metadata */
1285  vlib_prefetch_buffer_header (b[1], LOAD);
1286 
1287  /* esp_footer_t */
1288  CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1289  CLIB_CACHE_LINE_BYTES, LOAD);
1290 
1291  /* packet headers */
1293  CLIB_CACHE_LINE_BYTES * 2, LOAD);
1294  }
1295 
1296  /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1297  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1298  current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1299 
1300  if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
1301  esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6,
1302  is_tun, 0);
1303 
1304  /* trace: */
1305  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1306  {
1307  esp_decrypt_trace_t *tr;
1308  tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1309  sa0 = ipsec_sa_get (current_sa_index);
1310  tr->crypto_alg = sa0->crypto_alg;
1311  tr->integ_alg = sa0->integ_alg;
1312  tr->seq = pd->seq;
1313  tr->sa_seq = sa0->seq;
1314  tr->sa_seq_hi = sa0->seq_hi;
1315  tr->pkt_seq_hi = pd->seq_hi;
1316  }
1317 
1318  /* next */
1319  n_left -= 1;
1320  sync_next += 1;
1321  pd += 1;
1322  pd2 += 1;
1323  b += 1;
1324  }
1325 
1326  vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
1327  from_frame->n_vectors);
1328 
1329  if (n_sync)
1330  vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1331 
1332  if (n_noop)
1333  vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1334 
1335  return (from_frame->n_vectors);
1336 }
1337 
1341  vlib_frame_t * from_frame, int is_ip6, int is_tun)
1342 {
1344  u32 n_left = from_frame->n_vectors;
1348 
1349  while (n_left > 0)
1350  {
1351  esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1352 
1353  if (n_left > 2)
1354  {
1355  vlib_prefetch_buffer_header (b[2], LOAD);
1356  vlib_prefetch_buffer_header (b[1], LOAD);
1357  }
1358 
1359  if (!pd->is_chain)
1360  esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun,
1361  1);
1362  else
1363  {
1365  esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
1366  is_tun, 1);
1367  }
1368 
1369  /*trace: */
1370  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1371  {
1372  ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
1373  esp_decrypt_trace_t *tr;
1374  esp_decrypt_packet_data_t *async_pd =
1375  &(esp_post_data (b[0]))->decrypt_data;
1376  tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1377  sa0 = ipsec_sa_get (async_pd->sa_index);
1378 
1379  tr->crypto_alg = sa0->crypto_alg;
1380  tr->integ_alg = sa0->integ_alg;
1381  tr->seq = pd->seq;
1382  tr->sa_seq = sa0->seq;
1383  tr->sa_seq_hi = sa0->seq_hi;
1384  }
1385 
1386  n_left--;
1387  next++;
1388  b++;
1389  }
1390 
1391  n_left = from_frame->n_vectors;
1392  vlib_node_increment_counter (vm, node->node_index,
1393  ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1394 
1396 
1397  return n_left;
1398 }
1399 
1403 {
1404  return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1406 }
1407 
1411 {
1412  return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1413 }
1414 
1418 {
1419  return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1421 }
1422 
1426 {
1427  return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1428 }
1429 
1433 {
1434  return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1436 }
1437 
1441 {
1442  return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1443 }
1444 
1448 {
1449  return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1451 }
1452 
1456 {
1457  return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1458 }
1459 
1460 /* *INDENT-OFF* */
1462  .name = "esp4-decrypt",
1463  .vector_size = sizeof (u32),
1464  .format_trace = format_esp_decrypt_trace,
1466 
1467  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1468  .error_strings = esp_decrypt_error_strings,
1469 
1470  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1471  .next_nodes = {
1472  [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1473  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1474  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1475  [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1476  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1477  [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1478  },
1479 };
1480 
1482  .name = "esp4-decrypt-post",
1483  .vector_size = sizeof (u32),
1484  .format_trace = format_esp_decrypt_trace,
1486 
1487  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1488  .error_strings = esp_decrypt_error_strings,
1489 
1490  .sibling_of = "esp4-decrypt",
1491 };
1492 
1494  .name = "esp6-decrypt",
1495  .vector_size = sizeof (u32),
1496  .format_trace = format_esp_decrypt_trace,
1498 
1499  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1500  .error_strings = esp_decrypt_error_strings,
1501 
1502  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1503  .next_nodes = {
1504  [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1505  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1506  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1507  [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1508  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1509  [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1510  },
1511 };
1512 
1514  .name = "esp6-decrypt-post",
1515  .vector_size = sizeof (u32),
1516  .format_trace = format_esp_decrypt_trace,
1518 
1519  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1520  .error_strings = esp_decrypt_error_strings,
1521 
1522  .sibling_of = "esp6-decrypt",
1523 };
1524 
1526  .name = "esp4-decrypt-tun",
1527  .vector_size = sizeof (u32),
1528  .format_trace = format_esp_decrypt_trace,
1530  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1531  .error_strings = esp_decrypt_error_strings,
1532  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1533  .next_nodes = {
1534  [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1535  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1536  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1537  [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1538  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1539  [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1540  },
1541 };
1542 
1544  .name = "esp4-decrypt-tun-post",
1545  .vector_size = sizeof (u32),
1546  .format_trace = format_esp_decrypt_trace,
1548 
1549  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1550  .error_strings = esp_decrypt_error_strings,
1551 
1552  .sibling_of = "esp4-decrypt-tun",
1553 };
1554 
1556  .name = "esp6-decrypt-tun",
1557  .vector_size = sizeof (u32),
1558  .format_trace = format_esp_decrypt_trace,
1560  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1561  .error_strings = esp_decrypt_error_strings,
1562  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1563  .next_nodes = {
1564  [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1565  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1566  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1567  [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1568  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1569  [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1570  },
1571 };
1572 
1574  .name = "esp6-decrypt-tun-post",
1575  .vector_size = sizeof (u32),
1576  .format_trace = format_esp_decrypt_trace,
1578 
1579  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1580  .error_strings = esp_decrypt_error_strings,
1581 
1582  .sibling_of = "esp6-decrypt-tun",
1583 };
1584 /* *INDENT-ON* */
1585 
1586 #ifndef CLIB_MARCH_VARIANT
1587 
1588 static clib_error_t *
1590 {
1592 
1593  im->esp4_dec_fq_index =
1595  im->esp6_dec_fq_index =
1597  im->esp4_dec_tun_fq_index =
1599  im->esp6_dec_tun_fq_index =
1601 
1602  return 0;
1603 }
1604 
1606 
1607 #endif
1608 
1609 /*
1610  * fd.io coding-style-patch-verification: ON
1611  *
1612  * Local Variables:
1613  * eval: (c-set-style "gnu")
1614  * End:
1615  */
esp_post_data
#define esp_post_data(b)
Definition: esp.h:228
vec_reset_length
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
Definition: vec_bootstrap.h:194
ipsec.h
foreach_esp_decrypt_error
#define foreach_esp_decrypt_error
Definition: esp_decrypt.c:61
ipsec_per_thread_data_t::integ_ops
vnet_crypto_op_t * integ_ops
Definition: ipsec.h:101
vnet_crypto_op_t::digest
u8 * digest
Definition: crypto.h:299
tmp
u32 * tmp
Definition: interface_output.c:1096
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
vnet_crypto_async_free_frame
static_always_inline void vnet_crypto_async_free_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: crypto.h:580
vnet_crypto_op_init
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
Definition: crypto.h:528
im
vnet_interface_main_t * im
Definition: interface_output.c:415
vnet_crypto_op_t::digest_len
u8 digest_len
Definition: crypto.h:268
esp_decrypt_trace_t::integ_alg
ipsec_integ_alg_t integ_alg
Definition: esp_decrypt.c:97
ipsec_tun.h
esp_decrypt_chain_crypto
static_always_inline u32 esp_decrypt_chain_crypto(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0, vlib_buffer_t *b, u8 icv_sz, u8 *start, u32 start_len, u8 **tag, u16 *n_ch)
Definition: esp_decrypt.c:414
ipsec_sa_t::integ_key_index
vnet_crypto_key_index_t integ_key_index
Definition: ipsec_sa.h:139
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
bufs
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:717
ipsec_sa_anti_replay_and_sn_advance
static int ipsec_sa_anti_replay_and_sn_advance(const ipsec_sa_t *sa, u32 seq, u32 hi_seq_used, bool post_decrypt, u32 *hi_seq_req)
Definition: ipsec_sa.h:338
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
vnet_crypto_op_t::tag
u8 * tag
Definition: crypto.h:298
ipsec_tun_protect_t_::itp_tun
ipsec_ep_t itp_tun
Definition: ipsec_tun.h:124
esp_remove_tail
static void esp_remove_tail(vlib_main_t *vm, vlib_buffer_t *b, vlib_buffer_t *last, u16 tail)
Definition: esp_decrypt.c:185
ESP_DECRYPT_POST_N_NEXT
@ ESP_DECRYPT_POST_N_NEXT
Definition: esp_decrypt.c:58
esp_decrypt_packet_data_t::is_chain
u16 is_chain
Definition: esp.h:202
format_ipsec_integ_alg
u8 * format_ipsec_integ_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:111
esp6_decrypt_tun_node
vlib_node_registration_t esp6_decrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_tun_node)
Definition: esp_decrypt.c:1555
ip4
vl_api_ip4_address_t ip4
Definition: one.api:376
VNET_CRYPTO_ASYNC_OP_N_IDS
@ VNET_CRYPTO_ASYNC_OP_N_IDS
Definition: crypto.h:195
ipsec_sa_t::thread_index
u32 thread_index
Definition: ipsec_sa.h:129
esn
vl_api_ikev2_sa_transform_t esn
Definition: ikev2_types.api:128
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
format_esp_decrypt_trace
static u8 * format_esp_decrypt_trace(u8 *s, va_list *args)
Definition: esp_decrypt.c:105
f
vlib_frame_t * f
Definition: interface_output.c:1098
ipsec_per_thread_data_t::chunks
vnet_crypto_op_chunk_t * chunks
Definition: ipsec.h:104
vnet_crypto_op_t::status
vnet_crypto_op_status_t status
Definition: crypto.h:260
esp_decrypt_packet_data_t::iv_sz
u8 iv_sz
Definition: esp.h:191
vlib_get_buffers
vlib_get_buffers(vm, from, b, n_left_from)
next
u16 * next
Definition: nat44_ei_out2in.c:718
VLIB_NODE_TYPE_INTERNAL
@ VLIB_NODE_TYPE_INTERNAL
Definition: node.h:72
VLIB_FRAME_SIZE
#define VLIB_FRAME_SIZE
Definition: node.h:368
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
vnet_crypto_op_t::src
u8 * src
Definition: crypto.h:277
u16
unsigned short u16
Definition: types.h:57
vnet_crypto_op_t::dst
u8 * dst
Definition: crypto.h:278
ESP_MAX_ICV_SIZE
#define ESP_MAX_ICV_SIZE
Definition: esp.h:90
first
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
vnet_crypto_async_frame_t
Definition: crypto.h:358
esp_set_next_index
static void esp_set_next_index(vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err, u16 index, u16 *nexts, u16 drop_next)
Definition: esp.h:150
esp_decrypt_prepare_sync_op
static_always_inline void esp_decrypt_prepare_sync_op(vlib_main_t *vm, vlib_node_runtime_t *node, ipsec_per_thread_data_t *ptd, vnet_crypto_op_t ***crypto_ops, vnet_crypto_op_t ***integ_ops, vnet_crypto_op_t *op, ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz, u8 iv_sz, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b, u16 *next, u32 index)
Definition: esp_decrypt.c:487
vnet_crypto_async_add_to_frame
static_always_inline void vnet_crypto_async_add_to_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *f, u32 key_index, u32 crypto_len, i16 integ_len_adj, i16 crypto_start_offset, u16 integ_start_offset, u32 buffer_index, u16 next_node, u8 *iv, u8 *tag, u8 *aad, u8 flags)
Definition: crypto.h:621
vnet_crypto_op_t::user_data
uword user_data
Definition: crypto.h:258
esp_decrypt_trace_t::crypto_alg
ipsec_crypto_alg_t crypto_alg
Definition: esp_decrypt.c:96
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
esp_decrypt_packet_data_t::current_length
i16 current_length
Definition: esp.h:200
esp_aead_t_
AES GCM Additional Authentication data.
Definition: esp.h:76
from_frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * from_frame
Definition: esp_encrypt.c:1328
vlib_buffer_enqueue_to_next
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
ipsec_sa_t::integ_icv_size
u8 integ_icv_size
Definition: ipsec_sa.h:125
vnet_crypto_op_t::iv
u8 * iv
Definition: crypto.h:293
esp4_decrypt_tun_post_node
vlib_node_registration_t esp4_decrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node)
Definition: esp_decrypt.c:1543
ipsec_sa_t::crypto_dec_op_id
vnet_crypto_op_id_t crypto_dec_op_id
Definition: ipsec_sa.h:148
vnet_crypto_op_t::len
u32 len
Definition: crypto.h:287
vlib_frame_t
Definition: node.h:372
esp_decrypt_init
static clib_error_t * esp_decrypt_init(vlib_main_t *vm)
Definition: esp_decrypt.c:1589
vlib_buffer_length_in_chain
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
vnet_crypto_async_submit_open_frame
static_always_inline int vnet_crypto_async_submit_open_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: crypto.h:589
vec_add_aligned
#define vec_add_aligned(V, E, N, A)
Add N elements to end of vector V (no header, specified alignment)
Definition: vec.h:698
esp_decrypt_post_inline
static uword esp_decrypt_post_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip6, int is_tun)
Definition: esp_decrypt.c:1339
udp_header_t
Definition: udp_packet.h:45
ESP_DECRYPT_N_ERROR
@ ESP_DECRYPT_N_ERROR
Definition: esp_decrypt.c:81
ip4_header_t
Definition: ip4_packet.h:87
vnet_crypto_op_t::aad_len
u16 aad_len
Definition: crypto.h:271
vnet_crypto_op_t
Definition: crypto.h:255
vnet_crypto_op_t::flags
u8 flags
Definition: crypto.h:261
vlib_frame_queue_main_init
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1561
esp_process_chained_ops
static_always_inline void esp_process_chained_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, vnet_crypto_op_chunk_t *chunks, int e)
Definition: esp_decrypt.c:153
vnet_crypto_op_t::key_index
u32 key_index
Definition: crypto.h:292
esp.h
vnet_crypto_op_t::chunk_index
u32 chunk_index
Definition: crypto.h:289
CLIB_PREFETCH
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:76
esp_move_icv
static_always_inline u8 * esp_move_icv(vlib_main_t *vm, vlib_buffer_t *first, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, u16 icv_sz, u16 *dif)
Definition: esp_decrypt.c:210
VNET_CRYPTO_OP_FLAG_HMAC_CHECK
#define VNET_CRYPTO_OP_FLAG_HMAC_CHECK
Definition: crypto.h:263
i16
signed short i16
Definition: types.h:46
vlib_buffer_t::current_data
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
vlib_buffer_advance
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:276
esp4_decrypt_node
vlib_node_registration_t esp4_decrypt_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_node)
Definition: esp_decrypt.c:1461
vnet_crypto_process_ops
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
Definition: crypto.c:99
ipsec_sa_t::crypto_async_dec_op_id
vnet_crypto_async_op_id_t crypto_async_dec_op_id
Definition: ipsec_sa.h:155
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
vlib_buffer_t::error
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
len
u8 len
Definition: ip_types.api:103
ip46_address_is_equal_v6
static_always_inline int ip46_address_is_equal_v6(const ip46_address_t *ip46, const ip6_address_t *ip6)
Definition: ip46_address.h:115
VLIB_NODE_FN
#define VLIB_NODE_FN(node)
Definition: node.h:202
vec_add2
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:644
vec_add1
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:606
N_HI_ESN_BYTES
#define N_HI_ESN_BYTES
Definition: esp_decrypt.c:101
ipsec_tun_protect_t_
Definition: ipsec_tun.h:107
format_ipsec_crypto_alg
u8 * format_ipsec_crypto_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:79
vlib_buffer_alloc
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:702
ipsec_sa_get
static ipsec_sa_t * ipsec_sa_get(u32 sa_index)
Definition: ipsec_sa.h:605
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:441
VNET_CRYPTO_OP_NONE
@ VNET_CRYPTO_OP_NONE
Definition: crypto.h:221
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
foreach_esp_decrypt_post_next
#define foreach_esp_decrypt_post_next
Definition: esp_decrypt.c:46
ipsec_per_thread_data_t
Definition: ipsec.h:97
vnet_crypto_process_chained_ops
u32 vnet_crypto_process_chained_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)
Definition: crypto.c:105
ipsec_sa_counters
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
Definition: ipsec_sa.c:27
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
esp_decrypt_packet_data2_t
Definition: esp.h:211
clib_memcpy_le64
static_always_inline void clib_memcpy_le64(u8 *dst, u8 *src, u8 len)
Definition: string.h:300
ipsec_per_thread_data_t::async_frames
vnet_crypto_async_frame_t ** async_frames
Definition: ipsec.h:105
ARRAY_LEN
#define ARRAY_LEN(x)
Definition: clib.h:70
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
ipsec_main_t
Definition: ipsec.h:108
ipsec_sa_t::crypto_iv_size
u8 crypto_iv_size
Definition: ipsec_sa.h:123
esp_async_post_next_t::esp4_tun_post_next
u32 esp4_tun_post_next
Definition: esp.h:245
vnet_crypto_op_chunk_t::src
u8 * src
Definition: crypto.h:250
VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:264
static_always_inline
#define static_always_inline
Definition: clib.h:112
ipsec_sa_t::seq
u32 seq
Definition: ipsec_sa.h:132
esp_async_post_next_t::esp6_tun_post_next
u32 esp6_tun_post_next
Definition: esp.h:246
packet.h
uword
u64 uword
Definition: types.h:112
last
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
if
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
vnet_crypto_async_reset_frame
static_always_inline void vnet_crypto_async_reset_frame(vnet_crypto_async_frame_t *f)
Definition: crypto.h:650
ipsec_main
ipsec_main_t ipsec_main
Definition: ipsec.c:29
esp_decrypt_next_t
esp_decrypt_next_t
Definition: esp_decrypt.c:39
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:215
vlib_node_increment_counter
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1244
esp_decrypt_packet_data_t::flags
ipsec_sa_flags_t flags
Definition: esp.h:192
esp_header_t
Definition: esp.h:22
foreach_esp_decrypt_next
#define foreach_esp_decrypt_next
Definition: esp_decrypt.c:30
ipsec_ep_t_::dst
ip46_address_t dst
Definition: ipsec_tun.h:102
esp_decrypt_packet_data_t::seq_hi
u32 seq_hi
Definition: esp.h:203
vnet_crypto_op_chunk_t
Definition: crypto.h:248
vnet_crypto_async_frame_is_full
static_always_inline u8 vnet_crypto_async_frame_is_full(const vnet_crypto_async_frame_t *f)
Definition: crypto.h:665
esp_decrypt_trace_t::seq
u32 seq
Definition: esp_decrypt.c:92
esp6_decrypt_tun_post_node
vlib_node_registration_t esp6_decrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node)
Definition: esp_decrypt.c:1573
esp_decrypt_packet_data2_t::free_buffer_index
u32 free_buffer_index
Definition: esp.h:214
vlib_buffer_chain_linearize
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:1471
ipsec_sa_assign_thread
static u32 ipsec_sa_assign_thread(u32 thread_id)
Definition: ipsec_sa.h:598
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
ipsec_ep_t_::src
ip46_address_t src
Definition: ipsec_tun.h:101
vlib_node_registration_t
struct _vlib_node_registration vlib_node_registration_t
ESP_DECRYPT_N_NEXT
@ ESP_DECRYPT_N_NEXT
Definition: esp_decrypt.c:43
esp_decrypt_post_next_t
esp_decrypt_post_next_t
Definition: esp_decrypt.c:54
esp_decrypt_packet_data_t::hdr_sz
u16 hdr_sz
Definition: esp.h:201
ipsec_sa_t::crypto_alg
ipsec_crypto_alg_t crypto_alg
Definition: ipsec_sa.h:215
gre_header_t::protocol
u16 protocol
Definition: packet.h:55
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_buffer_space_left_at_end
static u32 vlib_buffer_space_left_at_end(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:1462
l2_input.h
esp_decrypt_chain_integ
static_always_inline int esp_decrypt_chain_integ(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, const esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0, vlib_buffer_t *b, u8 icv_sz, u8 *start_src, u32 start_len, u8 **digest, u16 *n_ch, u32 *integ_total_len)
Definition: esp_decrypt.c:316
vnet_crypto_op_t::n_chunks
u16 n_chunks
Definition: crypto.h:282
data
u8 data[128]
Definition: ipsec_types.api:95
esp_process_ops
static_always_inline void esp_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, int e)
Definition: esp_decrypt.c:122
ipsec_sa_t
Definition: ipsec_sa.h:116
is_ip6
bool is_ip6
Definition: ip.api:43
esp_move_icv_esn
static_always_inline u8 * esp_move_icv_esn(vlib_main_t *vm, vlib_buffer_t *first, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, u16 icv_sz, ipsec_sa_t *sa, u8 *extra_esn, u32 *len)
Definition: esp_decrypt.c:280
esp_decrypt_post_crypto
static_always_inline void esp_decrypt_post_crypto(vlib_main_t *vm, vlib_node_runtime_t *node, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun, int is_async)
Definition: esp_decrypt.c:751
esp_decrypt_error_t
esp_decrypt_error_t
Definition: esp_decrypt.c:76
esp6_decrypt_post_node
vlib_node_registration_t esp6_decrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_post_node)
Definition: esp_decrypt.c:1513
vnet_crypto_op_chunk_t::dst
u8 * dst
Definition: crypto.h:251
index
u32 index
Definition: flow_types.api:221
esp_decrypt_error_strings
static char * esp_decrypt_error_strings[]
Definition: esp_decrypt.c:84
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
vnet_update_l2_len
static u16 vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:300
vnet_crypto_op_t::tag_len
u8 tag_len
Definition: crypto.h:269
esp_decrypt_packet_data2_t::lb
vlib_buffer_t * lb
Definition: esp.h:213
esp6_decrypt_node
vlib_node_registration_t esp6_decrypt_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_node)
Definition: esp_decrypt.c:1493
u64
unsigned long u64
Definition: types.h:89
vnet_crypto_op_t::aad
u8 * aad
Definition: crypto.h:294
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
data_len
u8 data_len
Definition: ikev2_types.api:24
vlib_buffer_get_tail
static u8 * vlib_buffer_get_tail(vlib_buffer_t *b)
Get pointer to the end of buffer's data.
Definition: buffer.h:338
esp_decrypt_trace_t::sa_seq_hi
u32 sa_seq_hi
Definition: esp_decrypt.c:94
ipsec_per_thread_data_t::chained_crypto_ops
vnet_crypto_op_t * chained_crypto_ops
Definition: ipsec.h:102
ip.h
u32
unsigned int u32
Definition: types.h:88
VLIB_INIT_FUNCTION
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
esp_decrypt_trace_t::sa_seq
u32 sa_seq
Definition: esp_decrypt.c:93
clib_atomic_cmp_and_swap
#define clib_atomic_cmp_and_swap(addr, old, new)
Definition: atomics.h:37
ip6
vl_api_ip6_address_t ip6
Definition: one.api:424
protocol
vl_api_ip_proto_t protocol
Definition: lb_types.api:72
esp_decrypt_packet_data_t::sa_index
u32 sa_index
Definition: esp.h:193
clib_prefetch_load
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:92
esp_aad_fill
static u16 esp_aad_fill(u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa, u32 seq_hi)
Definition: esp.h:121
vnet_crypto_async_get_frame
static_always_inline vnet_crypto_async_frame_t * vnet_crypto_async_get_frame(vlib_main_t *vm, vnet_crypto_async_op_id_t opt)
async crypto inline functions
Definition: crypto.h:563
vec_foreach
#define vec_foreach(var, vec)
Vector iterator.
Definition: vec_bootstrap.h:213
ipsec_sa_t::integ_op_id
vnet_crypto_op_id_t integ_op_id
Definition: ipsec_sa.h:149
n_left
u32 n_left
Definition: interface_output.c:1096
esp_decrypt_trace_t::pkt_seq_hi
u32 pkt_seq_hi
Definition: esp_decrypt.c:95
iv
static u8 iv[]
Definition: aes_cbc.c:24
ipsec_sa_t::salt
u32 salt
Definition: ipsec_sa.h:172
ipsec_sa_t::seq_hi
u32 seq_hi
Definition: ipsec_sa.h:133
esp_decrypt_prepare_async_frame
static_always_inline esp_decrypt_error_t esp_decrypt_prepare_async_frame(vlib_main_t *vm, vlib_node_runtime_t *node, ipsec_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz, u8 iv_sz, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, u32 bi, vlib_buffer_t *b, u16 *next, u16 async_next)
Definition: esp_decrypt.c:619
ip6_header_t
Definition: ip6_packet.h:294
ipsec_sa_t::linked_key_index
vnet_crypto_key_index_t linked_key_index
Definition: ipsec_sa.h:156
esp4_decrypt_post_node
vlib_node_registration_t esp4_decrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_post_node)
Definition: esp_decrypt.c:1481
ipsec_per_thread_data_t::chained_integ_ops
vnet_crypto_op_t * chained_integ_ops
Definition: ipsec.h:103
esp_post_data2
#define esp_post_data2(b)
Definition: esp.h:236
length
char const int length
Definition: cJSON.h:163
clib_memset
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t
Definition: main.h:102
ip_csum_update
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:295
vlib_node_t
Definition: node.h:247
gre_header_t
Definition: packet.h:37
vlib_add_trace
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
esp_async_post_next_t::esp4_post_next
u32 esp4_post_next
Definition: esp.h:243
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
vnet_crypto_async_op_id_t
vnet_crypto_async_op_id_t
Definition: crypto.h:182
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
ip
vl_api_address_t ip
Definition: l2.api:558
vlib_init_function_t
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
Definition: init.h:51
ip_csum_t
uword ip_csum_t
Definition: ip_packet.h:245
esp_decrypt_packet_data2_t::icv_removed
u8 icv_removed
Definition: esp.h:215
vlib_buffer_t::data
u8 data[]
Packet data.
Definition: buffer.h:204
esp_decrypt_packet_data_t::current_data
i16 current_data
Definition: esp.h:199
ipsec_sa_t::integ_alg
ipsec_integ_alg_t integ_alg
Definition: ipsec_sa.h:216
esp_decrypt_packet_data_t::icv_sz
u8 icv_sz
Definition: esp.h:190
esp4_decrypt_tun_node
vlib_node_registration_t esp4_decrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_tun_node)
Definition: esp_decrypt.c:1525
esp_decrypt_trace_t
Definition: esp_decrypt.c:90
nexts
u16 nexts[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:718
vlib_buffer_free_one
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:1012
esp_decrypt_packet_data_t::sa_data
u64 sa_data
Definition: esp.h:195
ipsec_sa_t::flags
ipsec_sa_flags_t flags
Definition: ipsec_sa.h:121
vnet.h
api_errno.h
vlib_node_runtime_t
Definition: node.h:454
esp_decrypt_packet_data_t::seq
u32 seq
Definition: esp.h:198
ipsec_tun_protect_get
static ipsec_tun_protect_t * ipsec_tun_protect_get(u32 index)
Definition: ipsec_tun.h:175
esp_async_recycle_failed_submit
static u32 esp_async_recycle_failed_submit(vlib_main_t *vm, vnet_crypto_async_frame_t *f, vlib_node_runtime_t *node, u32 err, u16 index, u32 *from, u16 *nexts, u16 drop_next_index)
Definition: esp.h:159
from
from
Definition: nat44_ei_hairpinning.c:415
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
vnet_crypto_op_chunk_t::len
u32 len
Definition: crypto.h:252
ip_csum_fold
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:301
esp_decrypt_packet_data_t
The post data structure to for esp_encrypt/decrypt_inline to write to vib_buffer_t opaque unused fiel...
Definition: esp.h:184
ipsec_io.h
esp_decrypt_async_next
esp_async_post_next_t esp_decrypt_async_next
Definition: ipsec.c:31
esp_async_post_next_t::esp6_post_next
u32 esp6_post_next
Definition: esp.h:244
type
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_increment_combined_counter
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
esp_decrypt_inline
static uword esp_decrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip6, int is_tun, u16 async_next_node)
Definition: esp_decrypt.c:1017
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
ipsec_sa_t::crypto_key_index
vnet_crypto_key_index_t crypto_key_index
Definition: ipsec_sa.h:138
esp_insert_esn
static_always_inline u16 esp_insert_esn(vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest, u16 *len, vlib_buffer_t *b, u8 *payload)
Definition: esp_decrypt.c:243
ipsec_per_thread_data_t::crypto_ops
vnet_crypto_op_t * crypto_ops
Definition: ipsec.h:100
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
VLIB_REGISTER_NODE
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
ipsec_sa_anti_replay_advance
static void ipsec_sa_anti_replay_advance(ipsec_sa_t *sa, u32 seq, u32 hi_seq)
Definition: ipsec_sa.h:535
ip46_address_is_equal_v4
static_always_inline int ip46_address_is_equal_v4(const ip46_address_t *ip46, const ip4_address_t *ip4)
Definition: ip46_address.h:108
flags
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105