FD.io VPP  v21.01.1
Vector Packet Processing
esp_decrypt.c
Go to the documentation of this file.
1 /*
2  * esp_decrypt.c : IPSec ESP decrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/l2/l2_input.h>
22 
23 #include <vnet/ipsec/ipsec.h>
24 #include <vnet/ipsec/esp.h>
25 #include <vnet/ipsec/ipsec_io.h>
26 #include <vnet/ipsec/ipsec_tun.h>
27 
28 #include <vnet/gre/packet.h>
29 
30 #define foreach_esp_decrypt_next \
31 _(DROP, "error-drop") \
32 _(IP4_INPUT, "ip4-input-no-checksum") \
33 _(IP6_INPUT, "ip6-input") \
34 _(L2_INPUT, "l2-input") \
35 _(HANDOFF, "handoff")
36 
37 #define _(v, s) ESP_DECRYPT_NEXT_##v,
38 typedef enum
39 {
41 #undef _
44 
45 #define foreach_esp_decrypt_post_next \
46 _(DROP, "error-drop") \
47 _(IP4_INPUT, "ip4-input-no-checksum") \
48 _(IP6_INPUT, "ip6-input") \
49 _(L2_INPUT, "l2-input")
50 
51 #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
52 typedef enum
53 {
55 #undef _
58 
59 #define foreach_esp_decrypt_error \
60  _(RX_PKTS, "ESP pkts received") \
61  _(RX_POST_PKTS, "ESP-POST pkts received") \
62  _(DECRYPTION_FAILED, "ESP decryption failed") \
63  _(INTEG_ERROR, "Integrity check failed") \
64  _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
65  _(REPLAY, "SA replayed packet") \
66  _(RUNT, "undersized packet") \
67  _(NO_BUFFERS, "no buffers (packet dropped)") \
68  _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
69  _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
70  _(TUN_NO_PROTO, "no tunnel protocol") \
71  _(UNSUP_PAYLOAD, "unsupported payload") \
72 
73 
74 typedef enum
75 {
76 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
78 #undef _
81 
82 static char *esp_decrypt_error_strings[] = {
83 #define _(sym,string) string,
85 #undef _
86 };
87 
88 typedef struct
89 {
93  ipsec_crypto_alg_t crypto_alg;
94  ipsec_integ_alg_t integ_alg;
96 
97 /* packet trace format function */
98 static u8 *
99 format_esp_decrypt_trace (u8 * s, va_list * args)
100 {
101  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
102  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
103  esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
104 
105  s =
106  format (s,
107  "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u",
109  t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi);
110  return s;
111 }
112 
113 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
114 
117  vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
118  int e)
119 {
120  vnet_crypto_op_t *op = ops;
121  u32 n_fail, n_ops = vec_len (ops);
122 
123  if (n_ops == 0)
124  return;
125 
126  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
127 
128  while (n_fail)
129  {
130  ASSERT (op - ops < n_ops);
131  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
132  {
133  u32 err, bi = op->user_data;
134  if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
135  err = e;
136  else
137  err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
138  b[bi]->error = node->errors[err];
139  nexts[bi] = ESP_DECRYPT_NEXT_DROP;
140  n_fail--;
141  }
142  op++;
143  }
144 }
145 
148  vnet_crypto_op_t * ops, vlib_buffer_t * b[],
149  u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
150 {
151 
152  vnet_crypto_op_t *op = ops;
153  u32 n_fail, n_ops = vec_len (ops);
154 
155  if (n_ops == 0)
156  return;
157 
158  n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
159 
160  while (n_fail)
161  {
162  ASSERT (op - ops < n_ops);
163  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
164  {
165  u32 err, bi = op->user_data;
166  if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
167  err = e;
168  else
169  err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
170  b[bi]->error = node->errors[err];
171  nexts[bi] = ESP_DECRYPT_NEXT_DROP;
172  n_fail--;
173  }
174  op++;
175  }
176 }
177 
178 always_inline void
180  u16 tail)
181 {
182  vlib_buffer_t *before_last = b;
183 
184  if (last->current_length > tail)
185  {
186  last->current_length -= tail;
187  return;
188  }
189  ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
190 
191  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
192  {
193  before_last = b;
194  b = vlib_get_buffer (vm, b->next_buffer);
195  }
196  before_last->current_length -= tail - last->current_length;
197  vlib_buffer_free_one (vm, before_last->next_buffer);
198  before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
199 }
200 
201 /* ICV is splitted in last two buffers so move it to the last buffer and
202  return pointer to it */
205  esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
206 {
207  vlib_buffer_t *before_last, *bp;
208  u16 last_sz = pd2->lb->current_length;
209  u16 first_sz = icv_sz - last_sz;
210 
211  bp = before_last = first;
212  while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
213  {
214  before_last = bp;
215  bp = vlib_get_buffer (vm, bp->next_buffer);
216  }
217 
218  u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
219  memmove (lb_curr + first_sz, lb_curr, last_sz);
220  clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
221  first_sz);
222  before_last->current_length -= first_sz;
223  clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
224  if (dif)
225  dif[0] = first_sz;
226  pd2->lb = before_last;
227  pd2->icv_removed = 1;
228  pd2->free_buffer_index = before_last->next_buffer;
229  before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
230  return lb_curr;
231 }
232 
236  u8 ** digest, u16 * len, vlib_buffer_t * b, u8 * payload)
237 {
238  if (!ipsec_sa_is_set_USE_ESN (sa))
239  return 0;
240 
241  /* shift ICV by 4 bytes to insert ESN */
242  u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
243  u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa->seq_hi);
244 
245  if (pd2->icv_removed)
246  {
247  u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
248  if (space_left >= sz)
249  {
250  clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz);
251  *data_len += sz;
252  }
253  else
254  return sz;
255 
256  len[0] = b->current_length;
257  }
258  else
259  {
260  clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
261  clib_memcpy_fast (payload + len[0], &seq_hi, sz);
262  clib_memcpy_fast (payload + len[0] + sz, tmp, ESP_MAX_ICV_SIZE);
263  *data_len += sz;
264  *digest += sz;
265  }
266  return sz;
267 }
268 
271  esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
272  ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
273 {
274  u16 dif = 0;
275  u8 *digest = esp_move_icv (vm, first, pd2, icv_sz, &dif);
276  if (dif)
277  *len -= dif;
278 
279  if (ipsec_sa_is_set_USE_ESN (sa))
280  {
281  u8 sz = sizeof (sa->seq_hi);
282  u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
283  u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
284 
285  if (space_left >= sz)
286  {
287  clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz);
288  *len += sz;
289  }
290  else
291  {
292  /* no space for ESN at the tail, use the next buffer
293  * (with ICV data) */
294  ASSERT (pd2->icv_removed);
296  clib_memcpy_fast (vlib_buffer_get_current (tmp) - sz, &seq_hi, sz);
297  extra_esn[0] = 1;
298  }
299  }
300  return digest;
301 }
302 
306  ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
307  u8 * start_src, u32 start_len,
308  u8 ** digest, u16 * n_ch, u32 * integ_total_len)
309 {
312  u16 n_chunks = 1;
313  u32 total_len;
314  vec_add2 (ptd->chunks, ch, 1);
315  total_len = ch->len = start_len;
316  ch->src = start_src;
317 
318  while (1)
319  {
320  vec_add2 (ptd->chunks, ch, 1);
321  n_chunks += 1;
322  ch->src = vlib_buffer_get_current (cb);
323  if (pd2->lb == cb)
324  {
325  if (pd2->icv_removed)
326  ch->len = cb->current_length;
327  else
328  ch->len = cb->current_length - icv_sz;
329  if (ipsec_sa_is_set_USE_ESN (sa0))
330  {
331  u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
332  u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
333  u8 *esn;
334  vlib_buffer_t *tmp_b;
335  u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
336  if (space_left < sz)
337  {
338  if (pd2->icv_removed)
339  {
340  /* use pre-data area from the last bufer
341  that was removed from the chain */
342  tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
343  esn = tmp_b->data - sz;
344  }
345  else
346  {
347  /* no space, need to allocate new buffer */
348  u32 tmp_bi = 0;
349  if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
350  return -1;
351  tmp_b = vlib_get_buffer (vm, tmp_bi);
352  esn = tmp_b->data;
353  pd2->free_buffer_index = tmp_bi;
354  }
355  clib_memcpy_fast (esn, &seq_hi, sz);
356 
357  vec_add2 (ptd->chunks, ch, 1);
358  n_chunks += 1;
359  ch->src = esn;
360  ch->len = sz;
361  }
362  else
363  {
364  if (pd2->icv_removed)
365  {
367  (pd2->lb), &seq_hi, sz);
368  }
369  else
370  {
371  clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE);
372  clib_memcpy_fast (*digest, &seq_hi, sz);
373  clib_memcpy_fast (*digest + sz, tmp, ESP_MAX_ICV_SIZE);
374  *digest += sz;
375  }
376  ch->len += sz;
377  }
378  }
379  total_len += ch->len;
380  break;
381  }
382  else
383  total_len += ch->len = cb->current_length;
384 
385  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
386  break;
387 
388  cb = vlib_get_buffer (vm, cb->next_buffer);
389  }
390 
391  if (n_ch)
392  *n_ch = n_chunks;
393  if (integ_total_len)
394  *integ_total_len = total_len;
395 
396  return 0;
397 }
398 
402  ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
403  u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
404 {
406  vlib_buffer_t *cb = b;
407  u16 n_chunks = 1;
408  u32 total_len;
409  vec_add2 (ptd->chunks, ch, 1);
410  total_len = ch->len = start_len;
411  ch->src = ch->dst = start;
412  cb = vlib_get_buffer (vm, cb->next_buffer);
413  n_chunks = 1;
414 
415  while (1)
416  {
417  vec_add2 (ptd->chunks, ch, 1);
418  n_chunks += 1;
419  ch->src = ch->dst = vlib_buffer_get_current (cb);
420  if (pd2->lb == cb)
421  {
422  if (ipsec_sa_is_set_IS_AEAD (sa0))
423  {
424  if (pd2->lb->current_length < icv_sz)
425  {
426  u16 dif = 0;
427  *tag = esp_move_icv (vm, b, pd2, icv_sz, &dif);
428 
429  /* this chunk does not contain crypto data */
430  n_chunks -= 1;
431  /* and fix previous chunk's length as it might have
432  been changed */
433  ASSERT (n_chunks > 0);
434  if (pd2->lb == b)
435  {
436  total_len -= dif;
437  ch[-1].len -= dif;
438  }
439  else
440  {
441  total_len = total_len + pd2->lb->current_length -
442  ch[-1].len;
443  ch[-1].len = pd2->lb->current_length;
444  }
445  break;
446  }
447  else
448  *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
449  }
450 
451  if (pd2->icv_removed)
452  total_len += ch->len = cb->current_length;
453  else
454  total_len += ch->len = cb->current_length - icv_sz;
455  }
456  else
457  total_len += ch->len = cb->current_length;
458 
459  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
460  break;
461 
462  cb = vlib_get_buffer (vm, cb->next_buffer);
463  }
464 
465  if (n_ch)
466  *n_ch = n_chunks;
467 
468  return total_len;
469 }
470 
474  vnet_crypto_op_t *** crypto_ops,
475  vnet_crypto_op_t *** integ_ops,
476  vnet_crypto_op_t * op,
477  ipsec_sa_t * sa0, u8 * payload,
478  u16 len, u8 icv_sz, u8 iv_sz,
481  vlib_buffer_t * b, u16 * next, u32 index)
482 {
483  const u8 esp_sz = sizeof (esp_header_t);
484 
486  {
487  vnet_crypto_op_init (op, sa0->integ_op_id);
488  op->key_index = sa0->integ_key_index;
489  op->src = payload;
491  op->user_data = index;
492  op->digest = payload + len;
493  op->digest_len = icv_sz;
494  op->len = len;
495 
496  if (pd->is_chain)
497  {
498  /* buffer is chained */
499  op->len = pd->current_length;
500 
501  /* special case when ICV is splitted and needs to be reassembled
502  * first -> move it to the last buffer. Also take into account
503  * that ESN needs to be added after encrypted data and may or
504  * may not fit in the tail.*/
505  if (pd2->lb->current_length < icv_sz)
506  {
507  u8 extra_esn = 0;
508  op->digest =
509  esp_move_icv_esn (vm, b, pd2, icv_sz, sa0,
510  &extra_esn, &op->len);
511 
512  if (extra_esn)
513  {
514  /* esn is in the last buffer, that was unlinked from
515  * the chain */
516  op->len = b->current_length;
517  }
518  else
519  {
520  if (pd2->lb == b)
521  {
522  /* we now have a single buffer of crypto data, adjust
523  * the length (second buffer contains only ICV) */
524  *integ_ops = &ptd->integ_ops;
525  *crypto_ops = &ptd->crypto_ops;
526  len = b->current_length;
527  goto out;
528  }
529  }
530  }
531  else
532  op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
533 
535  op->chunk_index = vec_len (ptd->chunks);
536  if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz,
537  payload, pd->current_length,
538  &op->digest, &op->n_chunks, 0) < 0)
539  {
540  b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
541  next[0] = ESP_DECRYPT_NEXT_DROP;
542  return;
543  }
544  }
545  else
546  esp_insert_esn (vm, sa0, pd2, &op->len, &op->digest, &len, b,
547  payload);
548  out:
549  vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
550  }
551 
552  payload += esp_sz;
553  len -= esp_sz;
554 
556  {
558  op->key_index = sa0->crypto_key_index;
559  op->iv = payload;
560 
561  if (ipsec_sa_is_set_IS_AEAD (sa0))
562  {
563  esp_header_t *esp0;
564  esp_aead_t *aad;
565  u8 *scratch;
566 
567  /*
568  * construct the AAD and the nonce (Salt || IV) in a scratch
569  * space in front of the IP header.
570  */
571  scratch = payload - esp_sz;
572  esp0 = (esp_header_t *) (scratch);
573 
574  scratch -= (sizeof (*aad) + pd->hdr_sz);
575  op->aad = scratch;
576 
577  op->aad_len = esp_aad_fill (op->aad, esp0, sa0);
578 
579  /*
580  * we don't need to refer to the ESP header anymore so we
581  * can overwrite it with the salt and use the IV where it is
582  * to form the nonce = (Salt + IV)
583  */
584  op->iv -= sizeof (sa0->salt);
585  clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
586 
587  op->tag = payload + len;
588  op->tag_len = 16;
589  }
590  op->src = op->dst = payload += iv_sz;
591  op->len = len - iv_sz;
592  op->user_data = index;
593 
594  if (pd->is_chain && (pd2->lb != b))
595  {
596  /* buffer is chained */
598  op->chunk_index = vec_len (ptd->chunks);
599  esp_decrypt_chain_crypto (vm, ptd, pd2, sa0, b, icv_sz,
600  payload, len - pd->iv_sz + pd->icv_sz,
601  &op->tag, &op->n_chunks);
602  }
603 
604  vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
605  }
606 }
607 
613  ipsec_sa_t * sa0, u8 * payload, u16 len,
614  u8 icv_sz, u8 iv_sz,
617  vlib_buffer_t * b, u16 * next,
618  u16 async_next)
619 {
620  const u8 esp_sz = sizeof (esp_header_t);
621  u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index;
622  esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
624  u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
625  u32 key_index;
626  u32 crypto_len, integ_len = 0;
627  i16 crypto_start_offset, integ_start_offset = 0;
628  u8 flags = 0;
629 
630  if (!ipsec_sa_is_set_IS_AEAD (sa0))
631  {
632  /* linked algs */
633  key_index = sa0->linked_key_index;
634  integ_start_offset = payload - b->data;
635  integ_len = len;
636 
637  if (pd->is_chain)
638  {
639  /* buffer is chained */
640  integ_len = pd->current_length;
641 
642  /* special case when ICV is splitted and needs to be reassembled
643  * first -> move it to the last buffer. Also take into account
644  * that ESN needs to be added after encrypted data and may or
645  * may not fit in the tail.*/
646  if (pd2->lb->current_length < icv_sz)
647  {
648  u8 extra_esn = 0;
649  tag = esp_move_icv_esn (vm, b, pd2, icv_sz, sa0,
650  &extra_esn, &integ_len);
651 
652  if (extra_esn)
653  {
654  /* esn is in the last buffer, that was unlinked from
655  * the chain */
656  integ_len = b->current_length;
657  }
658  else
659  {
660  if (pd2->lb == b)
661  {
662  /* we now have a single buffer of crypto data, adjust
663  * the length (second buffer contains only ICV) */
664  len = b->current_length;
665  goto out;
666  }
667  }
668  }
669  else
670  tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
671 
673  if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz, payload,
674  pd->current_length, &tag,
675  0, &integ_len) < 0)
676  {
677  /* allocate buffer failed, will not add to frame and drop */
678  b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
679  next[0] = ESP_DECRYPT_NEXT_DROP;
680  return -1;
681  }
682  }
683  else
684  esp_insert_esn (vm, sa0, pd2, &integ_len, &tag, &len, b, payload);
685  }
686  else
687  key_index = sa0->crypto_key_index;
688 
689 out:
690  /* crypto */
691  payload += esp_sz;
692  len -= esp_sz;
693  iv = payload;
694 
695  if (ipsec_sa_is_set_IS_AEAD (sa0))
696  {
697  esp_header_t *esp0;
698  u8 *scratch;
699 
700  /*
701  * construct the AAD and the nonce (Salt || IV) in a scratch
702  * space in front of the IP header.
703  */
704  scratch = payload - esp_sz;
705  esp0 = (esp_header_t *) (scratch);
706 
707  scratch -= (sizeof (esp_aead_t) + pd->hdr_sz);
708  aad = scratch;
709 
710  esp_aad_fill (aad, esp0, sa0);
711 
712  /*
713  * we don't need to refer to the ESP header anymore so we
714  * can overwrite it with the salt and use the IV where it is
715  * to form the nonce = (Salt + IV)
716  */
717  iv -= sizeof (sa0->salt);
718  clib_memcpy_fast (iv, &sa0->salt, sizeof (sa0->salt));
719 
720  tag = payload + len;
721  }
722 
723  crypto_start_offset = (payload += iv_sz) - b->data;
724  crypto_len = len - iv_sz;
725 
726  if (pd->is_chain && (pd2->lb != b))
727  {
728  /* buffer is chained */
730 
731  crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd2, sa0, b, icv_sz,
732  payload,
733  len - pd->iv_sz + pd->icv_sz,
734  &tag, 0);
735  }
736 
737  *async_pd = *pd;
738  *async_pd2 = *pd2;
739  pd->protect_index = current_protect_index;
740 
741  /* for AEAD integ_len - crypto_len will be negative, it is ok since it
742  * is ignored by the engine. */
743  return vnet_crypto_async_add_to_frame (vm, f, key_index, crypto_len,
744  integ_len - crypto_len,
745  crypto_start_offset,
746  integ_start_offset,
747  bi, async_next, iv, tag, aad, flags);
748 }
749 
754  u16 * next, int is_ip6, int is_tun, int is_async)
755 {
756  ipsec_main_t *im = &ipsec_main;
757  ipsec_sa_t *sa0 = vec_elt_at_index (im->sad, pd->sa_index);
758  vlib_buffer_t *lb = b;
759  const u8 esp_sz = sizeof (esp_header_t);
760  const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
761  u8 pad_length = 0, next_header = 0;
762  u16 icv_sz;
763 
764  /*
765  * redo the anti-reply check
766  * in this frame say we have sequence numbers, s, s+1, s+1, s+1
767  * and s and s+1 are in the window. When we did the anti-replay
768  * check above we did so against the state of the window (W),
769  * after packet s-1. So each of the packets in the sequence will be
770  * accepted.
771  * This time s will be cheked against Ws-1, s+1 chceked against Ws
772  * (i.e. the window state is updated/advnaced)
773  * so this time the successive s+! packet will be dropped.
774  * This is a consequence of batching the decrypts. If the
775  * check-dcrypt-advance process was done for each packet it would
776  * be fine. But we batch the decrypts because it's much more efficient
777  * to do so in SW and if we offload to HW and the process is async.
778  *
779  * You're probably thinking, but this means an attacker can send the
780  * above sequence and cause VPP to perform decrpyts that will fail,
781  * and that's true. But if the attacker can determine s (a valid
782  * sequence number in the window) which is non-trivial, it can generate
783  * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
784  * implementation, sequential or batching, from decrypting these.
785  */
786  if (ipsec_sa_anti_replay_check (sa0, pd->seq))
787  {
788  b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
789  next[0] = ESP_DECRYPT_NEXT_DROP;
790  return;
791  }
792 
794 
795  if (pd->is_chain)
796  {
797  lb = pd2->lb;
798  icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
799  if (pd2->free_buffer_index)
800  {
802  lb->next_buffer = 0;
803  }
804  if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
805  {
806  /* esp footer is either splitted in two buffers or in the before
807  * last buffer */
808 
809  vlib_buffer_t *before_last = b, *bp = b;
810  while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
811  {
812  before_last = bp;
813  bp = vlib_get_buffer (vm, bp->next_buffer);
814  }
815  u8 *bt = vlib_buffer_get_tail (before_last);
816 
817  if (lb->current_length == icv_sz)
818  {
819  esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
820  pad_length = f->pad_length;
821  next_header = f->next_header;
822  }
823  else
824  {
825  pad_length = (bt - 1)[0];
826  next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
827  }
828  }
829  else
830  {
831  esp_footer_t *f =
832  (esp_footer_t *) (lb->data + lb->current_data +
833  lb->current_length - sizeof (esp_footer_t) -
834  icv_sz);
835  pad_length = f->pad_length;
836  next_header = f->next_header;
837  }
838  }
839  else
840  {
841  icv_sz = pd->icv_sz;
842  esp_footer_t *f =
843  (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
844  sizeof (esp_footer_t) - icv_sz);
845  pad_length = f->pad_length;
846  next_header = f->next_header;
847  }
848 
849  u16 adv = pd->iv_sz + esp_sz;
850  u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
851  u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
852  b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
853 
854  if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
855  {
856  u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
857  sizeof (udp_header_t) : 0;
858  u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
859  u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
860  u8 *ip = old_ip + adv + udp_sz;
861 
862  if (is_ip6 && ip_hdr_sz > 64)
863  memmove (ip, old_ip, ip_hdr_sz);
864  else
865  clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
866 
867  b->current_data = pd->current_data + adv - ip_hdr_sz;
868  b->current_length += ip_hdr_sz - adv;
869  esp_remove_tail (vm, b, lb, tail);
870 
871  if (is_ip6)
872  {
873  ip6_header_t *ip6 = (ip6_header_t *) ip;
874  u16 len = clib_net_to_host_u16 (ip6->payload_length);
875  len -= adv + tail_orig;
876  ip6->payload_length = clib_host_to_net_u16 (len);
877  ip6->protocol = next_header;
878  next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
879  }
880  else
881  {
882  ip4_header_t *ip4 = (ip4_header_t *) ip;
883  ip_csum_t sum = ip4->checksum;
884  u16 len = clib_net_to_host_u16 (ip4->length);
885  len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
886  sum = ip_csum_update (sum, ip4->protocol, next_header,
888  sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
889  ip4->checksum = ip_csum_fold (sum);
890  ip4->protocol = next_header;
891  ip4->length = len;
892  next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
893  }
894  }
895  else
896  {
897  if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
898  {
899  next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
900  b->current_data = pd->current_data + adv;
901  b->current_length = pd->current_length - adv;
902  esp_remove_tail (vm, b, lb, tail);
903  }
904  else if (next_header == IP_PROTOCOL_IPV6)
905  {
906  next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
907  b->current_data = pd->current_data + adv;
908  b->current_length = pd->current_length - adv;
909  esp_remove_tail (vm, b, lb, tail);
910  }
911  else
912  {
913  if (is_tun && next_header == IP_PROTOCOL_GRE)
914  {
915  gre_header_t *gre;
916 
917  b->current_data = pd->current_data + adv;
918  b->current_length = pd->current_length - adv - tail;
919 
920  gre = vlib_buffer_get_current (b);
921 
922  vlib_buffer_advance (b, sizeof (*gre));
923 
924  switch (clib_net_to_host_u16 (gre->protocol))
925  {
926  case GRE_PROTOCOL_teb:
927  vnet_update_l2_len (b);
928  next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
929  break;
930  case GRE_PROTOCOL_ip4:
931  next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
932  break;
933  case GRE_PROTOCOL_ip6:
934  next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
935  break;
936  default:
937  b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
938  next[0] = ESP_DECRYPT_NEXT_DROP;
939  break;
940  }
941  }
942  else
943  {
944  next[0] = ESP_DECRYPT_NEXT_DROP;
945  b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
946  return;
947  }
948  }
949  if (is_tun)
950  {
951  if (ipsec_sa_is_set_IS_PROTECT (sa0))
952  {
953  /*
954  * There are two encap possibilities
955  * 1) the tunnel and ths SA are prodiving encap, i.e. it's
956  * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
957  * implying the SA is in tunnel mode (on a tunnel interface)
958  * 2) only the tunnel provides encap
959  * MAC | TUN-IP | ESP | PAYLOAD
960  * implying the SA is in transport mode.
961  *
962  * For 2) we need only strip the tunnel encap and we're good.
963  * since the tunnel and crypto ecnap (int the tun=protect
964  * object) are the same and we verified above that these match
965  * for 1) we need to strip the SA-IP outer headers, to
966  * reveal the tunnel IP and then check that this matches
967  * the configured tunnel.
968  */
969  const ipsec_tun_protect_t *itp;
970 
971  if (is_async)
973  else
974  itp =
976  ipsec.protect_index);
977 
978  if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
979  {
980  const ip4_header_t *ip4;
981 
982  ip4 = vlib_buffer_get_current (b);
983 
985  &ip4->dst_address) ||
987  &ip4->src_address))
988  {
989  next[0] = ESP_DECRYPT_NEXT_DROP;
990  b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
991  }
992  }
993  else if (next_header == IP_PROTOCOL_IPV6)
994  {
995  const ip6_header_t *ip6;
996 
997  ip6 = vlib_buffer_get_current (b);
998 
1000  &ip6->dst_address) ||
1002  &ip6->src_address))
1003  {
1004  next[0] = ESP_DECRYPT_NEXT_DROP;
1005  b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
1006  }
1007  }
1008  }
1009  }
1010  }
1011 }
1012 
1015  vlib_node_runtime_t * node, vlib_frame_t * from_frame,
1016  int is_ip6, int is_tun, u16 async_next)
1017 {
1018  ipsec_main_t *im = &ipsec_main;
1019  u32 thread_index = vm->thread_index;
1020  u16 len;
1021  ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
1022  u32 *from = vlib_frame_vector_args (from_frame);
1023  u32 n_left = from_frame->n_vectors;
1024  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1025  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1026  esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1027  esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1028  esp_decrypt_packet_data_t cpd = { };
1029  u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1030  const u8 esp_sz = sizeof (esp_header_t);
1031  ipsec_sa_t *sa0 = 0;
1032  vnet_crypto_op_t _op, *op = &_op;
1033  vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
1034  vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
1035  vnet_crypto_async_frame_t *async_frame = 0;
1036  int is_async = im->async_mode;
1037  vnet_crypto_async_op_id_t last_async_op = ~0;
1038  u16 n_async_drop = 0;
1039 
1040  vlib_get_buffers (vm, from, b, n_left);
1041  if (!is_async)
1042  {
1044  vec_reset_length (ptd->integ_ops);
1047  }
1048  vec_reset_length (ptd->chunks);
1049  clib_memset_u16 (nexts, -1, n_left);
1050 
1051  while (n_left > 0)
1052  {
1053  u8 *payload;
1054 
1055  if (n_left > 2)
1056  {
1057  u8 *p;
1058  vlib_prefetch_buffer_header (b[2], LOAD);
1059  p = vlib_buffer_get_current (b[1]);
1061  p -= CLIB_CACHE_LINE_BYTES;
1063  }
1064 
1065  u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1066  if (n_bufs == 0)
1067  {
1068  b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
1069  esp_set_next_index (is_async, from, nexts, from[b - bufs],
1070  &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
1071  next[0] = ESP_DECRYPT_NEXT_DROP;
1072  goto next;
1073  }
1074 
1075  if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1076  {
1077  if (current_sa_pkts)
1079  current_sa_index,
1080  current_sa_pkts,
1081  current_sa_bytes);
1082  current_sa_bytes = current_sa_pkts = 0;
1083 
1084  current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1085  sa0 = pool_elt_at_index (im->sad, current_sa_index);
1086 
1087  /* fetch the second cacheline ASAP */
1088  CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
1089  cpd.icv_sz = sa0->integ_icv_size;
1090  cpd.iv_sz = sa0->crypto_iv_size;
1091  cpd.flags = sa0->flags;
1092  cpd.sa_index = current_sa_index;
1093 
1094  /* submit frame when op_id is different then the old one */
1095  if (is_async && last_async_op != sa0->crypto_async_dec_op_id)
1096  {
1097  if (async_frame && async_frame->n_elts)
1098  {
1099  if (vnet_crypto_async_submit_open_frame (vm, async_frame))
1100  esp_async_recycle_failed_submit (async_frame, b, from,
1101  nexts, &n_async_drop,
1102  ESP_DECRYPT_NEXT_DROP,
1103  ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
1104  }
1105  async_frame =
1107  last_async_op = sa0->crypto_async_dec_op_id;
1108  }
1109  }
1110 
1111  if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index))
1112  {
1113  /* this is the first packet to use this SA, claim the SA
1114  * for this thread. this could happen simultaneously on
1115  * another thread */
1117  ipsec_sa_assign_thread (thread_index));
1118  }
1119 
1120  if (PREDICT_FALSE (thread_index != sa0->decrypt_thread_index))
1121  {
1122  esp_set_next_index (is_async, from, nexts, from[b - bufs],
1123  &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
1124  next[0] = ESP_DECRYPT_NEXT_HANDOFF;
1125  goto next;
1126  }
1127 
1128  /* store packet data for next round for easier prefetch */
1129  pd->sa_data = cpd.sa_data;
1130  pd->current_data = b[0]->current_data;
1131  pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1132  payload = b[0]->data + pd->current_data;
1133  pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1134  pd->is_chain = 0;
1135  pd2->lb = b[0];
1136  pd2->free_buffer_index = 0;
1137  pd2->icv_removed = 0;
1138 
1139  if (n_bufs > 1)
1140  {
1141  pd->is_chain = 1;
1142  /* find last buffer in the chain */
1143  while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1144  pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1145 
1146  crypto_ops = &ptd->chained_crypto_ops;
1147  integ_ops = &ptd->chained_integ_ops;
1148  }
1149 
1150  pd->current_length = b[0]->current_length;
1151 
1152  /* anti-reply check */
1153  if (ipsec_sa_anti_replay_check (sa0, pd->seq))
1154  {
1155  b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
1156  esp_set_next_index (is_async, from, nexts, from[b - bufs],
1157  &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
1158  goto next;
1159  }
1160 
1161  if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1162  {
1163  b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
1164  esp_set_next_index (is_async, from, nexts, from[b - bufs],
1165  &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
1166  goto next;
1167  }
1168 
1169  len = pd->current_length - cpd.icv_sz;
1170  current_sa_pkts += 1;
1171  current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1172 
1173  if (is_async)
1174  {
1175  int ret = esp_decrypt_prepare_async_frame (vm, node, ptd,
1176  &async_frame,
1177  sa0, payload, len,
1178  cpd.icv_sz,
1179  cpd.iv_sz,
1180  pd, pd2,
1181  from[b - bufs],
1182  b[0], next, async_next);
1183  if (PREDICT_FALSE (ret < 0))
1184  {
1185  b[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
1186  esp_set_next_index (1, from, nexts, from[b - bufs],
1187  &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
1188  /* when next[0] is ESP_DECRYPT_NEXT_DROP we only have to drop
1189  * the current packet. Otherwise it is frame submission error
1190  * thus we have to drop the whole frame.
1191  */
1192  if (next[0] != ESP_DECRYPT_NEXT_DROP && async_frame->n_elts)
1193  esp_async_recycle_failed_submit (async_frame, b, from,
1194  nexts, &n_async_drop,
1195  ESP_DECRYPT_NEXT_DROP,
1196  ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
1197  goto next;
1198  }
1199  }
1200  else
1201  esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops,
1202  op, sa0, payload, len, cpd.icv_sz,
1203  cpd.iv_sz, pd, pd2, b[0], next,
1204  b - bufs);
1205  /* next */
1206  next:
1207  n_left -= 1;
1208  next += 1;
1209  pd += 1;
1210  pd2 += 1;
1211  b += 1;
1212  }
1213 
1214  if (PREDICT_TRUE (~0 != current_sa_index))
1216  current_sa_index, current_sa_pkts,
1217  current_sa_bytes);
1218 
1219  if (is_async)
1220  {
1221  if (async_frame && async_frame->n_elts)
1222  {
1223  if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
1224  esp_async_recycle_failed_submit (async_frame, b, from, nexts,
1225  &n_async_drop,
1226  ESP_DECRYPT_NEXT_DROP,
1227  ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
1228  }
1229 
1230  /* no post process in async */
1232  ESP_DECRYPT_ERROR_RX_PKTS, n_left);
1233  if (n_async_drop)
1234  vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
1235 
1236  return n_left;
1237  }
1238  else
1239  {
1240  esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
1241  ESP_DECRYPT_ERROR_INTEG_ERROR);
1242  esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
1243  ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
1244 
1245  esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
1246  ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1247  esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
1248  ptd->chunks,
1249  ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1250  }
1251 
1252  /* Post decryption ronud - adjust packet data start and length and next
1253  node */
1254 
1255  n_left = from_frame->n_vectors;
1256  next = nexts;
1257  pd = pkt_data;
1258  pd2 = pkt_data2;
1259  b = bufs;
1260 
1261  while (n_left)
1262  {
1263  if (n_left >= 2)
1264  {
1265  void *data = b[1]->data + pd[1].current_data;
1266 
1267  /* buffer metadata */
1268  vlib_prefetch_buffer_header (b[1], LOAD);
1269 
1270  /* esp_footer_t */
1271  CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1272  CLIB_CACHE_LINE_BYTES, LOAD);
1273 
1274  /* packet headers */
1276  CLIB_CACHE_LINE_BYTES * 2, LOAD);
1277  }
1278 
1279  /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1280  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1281  current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1282 
1283  if (next[0] >= ESP_DECRYPT_N_NEXT)
1284  esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
1285  is_tun, 0);
1286 
1287  /* trace: */
1288  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1289  {
1290  esp_decrypt_trace_t *tr;
1291  tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1292  sa0 = pool_elt_at_index (im->sad, current_sa_index);
1293  tr->crypto_alg = sa0->crypto_alg;
1294  tr->integ_alg = sa0->integ_alg;
1295  tr->seq = pd->seq;
1296  tr->sa_seq = sa0->last_seq;
1297  tr->sa_seq_hi = sa0->seq_hi;
1298  }
1299 
1300  /* next */
1301  n_left -= 1;
1302  next += 1;
1303  pd += 1;
1304  pd2 += 1;
1305  b += 1;
1306  }
1307 
1308  n_left = from_frame->n_vectors;
1310  ESP_DECRYPT_ERROR_RX_PKTS, n_left);
1311 
1312  vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1313 
1314  return n_left;
1315 }
1316 
1320  vlib_frame_t * from_frame, int is_ip6, int is_tun)
1321 {
1322  ipsec_main_t *im = &ipsec_main;
1323  u32 *from = vlib_frame_vector_args (from_frame);
1324  u32 n_left = from_frame->n_vectors;
1325  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1326  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1327  vlib_get_buffers (vm, from, b, n_left);
1328 
1329  while (n_left > 0)
1330  {
1331  esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1332 
1333  if (n_left > 2)
1334  {
1335  vlib_prefetch_buffer_header (b[2], LOAD);
1336  vlib_prefetch_buffer_header (b[1], LOAD);
1337  }
1338 
1339  if (!pd->is_chain)
1340  esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun,
1341  1);
1342  else
1343  {
1345  esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
1346  is_tun, 1);
1347  }
1348 
1349  /*trace: */
1350  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1351  {
1352  ipsec_sa_t *sa0 = pool_elt_at_index (im->sad, pd->sa_index);
1353  esp_decrypt_trace_t *tr;
1354  esp_decrypt_packet_data_t *async_pd =
1355  &(esp_post_data (b[0]))->decrypt_data;
1356  tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1357  sa0 = pool_elt_at_index (im->sad, async_pd->sa_index);
1358 
1359  tr->crypto_alg = sa0->crypto_alg;
1360  tr->integ_alg = sa0->integ_alg;
1361  tr->seq = pd->seq;
1362  tr->sa_seq = sa0->last_seq;
1363  tr->sa_seq_hi = sa0->seq_hi;
1364  }
1365 
1366  n_left--;
1367  next++;
1368  b++;
1369  }
1370 
1371  n_left = from_frame->n_vectors;
1373  ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1374 
1375  vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1376 
1377  return n_left;
1378 }
1379 
1382  vlib_frame_t * from_frame)
1383 {
1384  return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1386 }
1387 
1390  vlib_frame_t * from_frame)
1391 {
1392  return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1393 }
1394 
1397  vlib_frame_t * from_frame)
1398 {
1399  return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1401 }
1402 
1405  vlib_frame_t * from_frame)
1406 {
1407  return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1408 }
1409 
1412  vlib_frame_t * from_frame)
1413 {
1414  return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1416 }
1417 
1420  vlib_frame_t * from_frame)
1421 {
1422  return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1423 }
1424 
1427  vlib_frame_t * from_frame)
1428 {
1429  return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1431 }
1432 
1435  vlib_frame_t * from_frame)
1436 {
1437  return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1438 }
1439 
1440 /* *INDENT-OFF* */
1442  .name = "esp4-decrypt",
1443  .vector_size = sizeof (u32),
1444  .format_trace = format_esp_decrypt_trace,
1446 
1447  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1448  .error_strings = esp_decrypt_error_strings,
1449 
1450  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1451  .next_nodes = {
1452  [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1453  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1454  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1455  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1456  [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1457  },
1458 };
1459 
1461  .name = "esp4-decrypt-post",
1462  .vector_size = sizeof (u32),
1463  .format_trace = format_esp_decrypt_trace,
1465 
1466  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1467  .error_strings = esp_decrypt_error_strings,
1468 
1469  .sibling_of = "esp4-decrypt",
1470 };
1471 
1473  .name = "esp6-decrypt",
1474  .vector_size = sizeof (u32),
1475  .format_trace = format_esp_decrypt_trace,
1477 
1478  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1479  .error_strings = esp_decrypt_error_strings,
1480 
1481  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1482  .next_nodes = {
1483  [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1484  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1485  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1486  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1487  [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1488  },
1489 };
1490 
1492  .name = "esp6-decrypt-post",
1493  .vector_size = sizeof (u32),
1494  .format_trace = format_esp_decrypt_trace,
1496 
1497  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1498  .error_strings = esp_decrypt_error_strings,
1499 
1500  .sibling_of = "esp6-decrypt",
1501 };
1502 
1504  .name = "esp4-decrypt-tun",
1505  .vector_size = sizeof (u32),
1506  .format_trace = format_esp_decrypt_trace,
1508  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1509  .error_strings = esp_decrypt_error_strings,
1510  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1511  .next_nodes = {
1512  [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1513  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1514  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1515  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1516  [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1517  },
1518 };
1519 
1521  .name = "esp4-decrypt-tun-post",
1522  .vector_size = sizeof (u32),
1523  .format_trace = format_esp_decrypt_trace,
1525 
1526  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1527  .error_strings = esp_decrypt_error_strings,
1528 
1529  .sibling_of = "esp4-decrypt-tun",
1530 };
1531 
1533  .name = "esp6-decrypt-tun",
1534  .vector_size = sizeof (u32),
1535  .format_trace = format_esp_decrypt_trace,
1537  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1538  .error_strings = esp_decrypt_error_strings,
1539  .n_next_nodes = ESP_DECRYPT_N_NEXT,
1540  .next_nodes = {
1541  [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1542  [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1543  [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1544  [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1545  [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1546  },
1547 };
1548 
1550  .name = "esp6-decrypt-tun-post",
1551  .vector_size = sizeof (u32),
1552  .format_trace = format_esp_decrypt_trace,
1554 
1555  .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1556  .error_strings = esp_decrypt_error_strings,
1557 
1558  .sibling_of = "esp6-decrypt-tun",
1559 };
1560 /* *INDENT-ON* */
1561 
1562 /*
1563  * fd.io coding-style-patch-verification: ON
1564  *
1565  * Local Variables:
1566  * eval: (c-set-style "gnu")
1567  * End:
1568  */
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
Definition: crypto.c:99
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static_always_inline void esp_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, int e)
Definition: esp_decrypt.c:116
u8 * format_ipsec_integ_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:111
#define esp_post_data(b)
Definition: esp.h:219
static_always_inline int vnet_crypto_async_add_to_frame(vlib_main_t *vm, vnet_crypto_async_frame_t **frame, u32 key_index, u32 crypto_len, i16 integ_len_adj, i16 crypto_start_offset, u16 integ_start_offset, u32 buffer_index, u16 next_node, u8 *iv, u8 *tag, u8 *aad, u8 flags)
Definition: crypto.h:603
static u8 * vlib_buffer_get_tail(vlib_buffer_t *b)
Get pointer to the end of buffer&#39;s data.
Definition: buffer.h:314
static u8 * format_esp_decrypt_trace(u8 *s, va_list *args)
Definition: esp_decrypt.c:99
The post data structure to for esp_encrypt/decrypt_inline to write to vib_buffer_t opaque unused fiel...
Definition: esp.h:176
#define CLIB_UNUSED(x)
Definition: clib.h:87
ipsec_per_thread_data_t * ptd
Definition: ipsec.h:190
vnet_crypto_op_t * integ_ops
Definition: ipsec.h:100
esp_decrypt_post_next_t
Definition: esp_decrypt.c:52
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static char * esp_decrypt_error_strings[]
Definition: esp_decrypt.c:82
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:239
ip4_address_t src_address
Definition: ip4_packet.h:125
static u16 esp_aad_fill(u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa)
Definition: esp.h:109
#define PREDICT_TRUE(x)
Definition: clib.h:122
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
vl_api_ip_proto_t protocol
Definition: lb_types.api:72
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:239
static u32 ipsec_sa_assign_thread(u32 thread_id)
Definition: ipsec_sa.h:503
static_always_inline u8 * esp_move_icv(vlib_main_t *vm, vlib_buffer_t *first, esp_decrypt_packet_data2_t *pd2, u16 icv_sz, u16 *dif)
Definition: esp_decrypt.c:204
u32 esp6_post_next
Definition: esp.h:235
ipsec_integ_alg_t integ_alg
Definition: ipsec_sa.h:214
static_always_inline int esp_decrypt_prepare_async_frame(vlib_main_t *vm, vlib_node_runtime_t *node, ipsec_per_thread_data_t *ptd, vnet_crypto_async_frame_t **f, ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz, u8 iv_sz, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, u32 bi, vlib_buffer_t *b, u16 *next, u16 async_next)
Definition: esp_decrypt.c:609
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
struct esp_aead_t_ esp_aead_t
AES GCM Additional Authentication data.
vnet_crypto_op_t * crypto_ops
Definition: ipsec.h:99
ipsec_crypto_alg_t crypto_alg
Definition: esp_decrypt.c:73
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
AES GCM Additional Authentication data.
Definition: esp.h:64
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
uword ip_csum_t
Definition: ip_packet.h:246
vlib_main_t * vm
Definition: in2out_ed.c:1580
#define VLIB_NODE_FN(node)
Definition: node.h:203
vnet_crypto_op_chunk_t * chunks
Definition: ipsec.h:103
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
vnet_crypto_op_id_t integ_op_id
Definition: ipsec_sa.h:140
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
static_always_inline int ip46_address_is_equal_v6(const ip46_address_t *ip46, const ip6_address_t *ip6)
Definition: ip46_address.h:115
ip6_address_t src_address
Definition: ip6_packet.h:310
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:90
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u32 seq_hi
Definition: ipsec_sa.h:123
static uword esp_decrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip6, int is_tun, u16 async_next)
Definition: esp_decrypt.c:1014
vnet_crypto_key_index_t linked_key_index
Definition: ipsec_sa.h:147
vnet_crypto_key_index_t crypto_key_index
Definition: ipsec_sa.h:129
static_always_inline u32 esp_decrypt_chain_crypto(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0, vlib_buffer_t *b, u8 icv_sz, u8 *start, u32 start_len, u8 **tag, u16 *n_ch)
Definition: esp_decrypt.c:400
vlib_node_registration_t esp6_decrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_tun_node)
Definition: esp_decrypt.c:1532
#define static_always_inline
Definition: clib.h:109
ip46_address_t src
Definition: ipsec_tun.h:100
ipsec_main_t ipsec_main
Definition: ipsec.c:28
esp_decrypt_next_t
Definition: esp_decrypt.c:35
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
u32 esp4_tun_post_next
Definition: esp.h:236
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
vlib_node_registration_t esp4_decrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node)
Definition: esp_decrypt.c:1520
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
Definition: crypto.h:496
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
ipsec_ep_t itp_tun
Definition: ipsec_tun.h:123
vlib_node_registration_t esp4_decrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_tun_node)
Definition: esp_decrypt.c:1503
const cJSON *const b
Definition: cJSON.h:255
u8 * format_ipsec_crypto_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:79
unsigned int u32
Definition: types.h:88
esp_decrypt_error_t
Definition: esp_decrypt.c:54
#define foreach_esp_decrypt_error
Definition: esp_decrypt.c:59
ipsec_sa_flags_t flags
Definition: ipsec_sa.h:114
#define VLIB_FRAME_SIZE
Definition: node.h:378
bool is_ip6
Definition: ip.api:43
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
u32 last_seq
Definition: ipsec_sa.h:124
static void esp_async_recycle_failed_submit(vnet_crypto_async_frame_t *f, vlib_buffer_t **b, u32 *from, u16 *nexts, u16 *n_dropped, u16 drop_next_index, vlib_error_t err)
Definition: esp.h:152
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
#define ESP_MAX_ICV_SIZE
Definition: esp.h:78
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
static_always_inline u8 * esp_move_icv_esn(vlib_main_t *vm, vlib_buffer_t *first, esp_decrypt_packet_data2_t *pd2, u16 icv_sz, ipsec_sa_t *sa, u8 *extra_esn, u32 *len)
Definition: esp_decrypt.c:270
static u8 iv[]
Definition: aes_cbc.c:24
u8 integ_alg
Definition: ikev2_types.api:59
uword user_data
Definition: crypto.h:233
esp_async_post_next_t esp_decrypt_async_next
Definition: ipsec.c:30
static_always_inline i16 esp_insert_esn(vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest, u16 *len, vlib_buffer_t *b, u8 *payload)
Definition: esp_decrypt.c:234
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
u32 salt
Definition: ipsec_sa.h:164
static void ipsec_sa_anti_replay_advance(ipsec_sa_t *sa, u32 seq)
Definition: ipsec_sa.h:438
unsigned short u16
Definition: types.h:57
static uword esp_decrypt_post_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip6, int is_tun)
Definition: esp_decrypt.c:1318
u8 data_len
Definition: ikev2_types.api:24
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
vnet_crypto_async_op_id_t crypto_async_dec_op_id
Definition: ipsec_sa.h:146
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
vl_api_ip4_address_t ip4
Definition: one.api:376
static void esp_remove_tail(vlib_main_t *vm, vlib_buffer_t *b, vlib_buffer_t *last, u16 tail)
Definition: esp_decrypt.c:179
u32 node_index
Node index.
Definition: node.h:488
#define VNET_CRYPTO_OP_FLAG_HMAC_CHECK
Definition: crypto.h:238
vlib_node_registration_t esp6_decrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_post_node)
Definition: esp_decrypt.c:1491
static_always_inline vnet_crypto_async_frame_t * vnet_crypto_async_get_frame(vlib_main_t *vm, vnet_crypto_async_op_id_t opt)
async crypto inline functions
Definition: crypto.h:531
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
u8 len
Definition: ip_types.api:103
vnet_crypto_op_t * chained_crypto_ops
Definition: ipsec.h:101
#define vec_add_aligned(V, E, N, A)
Add N elements to end of vector V (no header, specified alignment)
Definition: vec.h:678
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
u16 n_vectors
Definition: node.h:397
#define foreach_esp_decrypt_post_next
Definition: esp_decrypt.c:45
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
u16 protocol
Definition: packet.h:55
u8 data[]
Packet data.
Definition: buffer.h:181
#define ARRAY_LEN(x)
Definition: clib.h:67
static u32 vlib_buffer_space_left_at_end(vlib_main_t *vm, vlib_buffer_t *b)
vlib_node_registration_t esp6_decrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node)
Definition: esp_decrypt.c:1549
u32 esp4_post_next
Definition: esp.h:234
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
#define clib_atomic_cmp_and_swap(addr, old, new)
Definition: atomics.h:37
u32 esp6_tun_post_next
Definition: esp.h:237
ip46_address_t dst
Definition: ipsec_tun.h:101
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
Definition: ipsec_sa.c:27
#define ASSERT(truth)
u32 vnet_crypto_process_chained_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)
Definition: crypto.c:105
ipsec_sa_t * sad
Definition: ipsec.h:111
static ipsec_tun_protect_t * ipsec_tun_protect_get(u32 index)
Definition: ipsec_tun.h:185
ipsec_sa_flags_t flags
Definition: esp.h:184
static int ipsec_sa_anti_replay_check(ipsec_sa_t *sa, u32 seq)
Definition: ipsec_sa.h:311
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:252
char const int length
Definition: cJSON.h:163
vnet_crypto_async_op_id_t
Definition: crypto.h:159
vnet_crypto_key_index_t integ_key_index
Definition: ipsec_sa.h:130
static_always_inline void clib_memset_u16(void *p, u16 val, uword count)
Definition: string.h:378
static_always_inline int vnet_crypto_async_submit_open_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: crypto.h:560
static_always_inline void clib_memcpy_le64(u8 *dst, u8 *src, u8 len)
Definition: string.h:283
u16 payload_length
Definition: ip6_packet.h:301
vl_api_address_t ip
Definition: l2.api:501
vlib_node_registration_t esp4_decrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_post_node)
Definition: esp_decrypt.c:1460
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:298
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define esp_post_data2(b)
Definition: esp.h:227
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:296
u32 index
Definition: flow_types.api:221
static_always_inline int ip46_address_is_equal_v4(const ip46_address_t *ip46, const ip4_address_t *ip4)
Definition: ip46_address.h:108
vnet_crypto_op_t * chained_integ_ops
Definition: ipsec.h:102
vnet_crypto_op_status_t status
Definition: crypto.h:235
#define vnet_buffer(b)
Definition: buffer.h:417
#define foreach_esp_decrypt_next
Definition: esp_decrypt.c:30
ipsec_crypto_alg_t crypto_alg
Definition: ipsec_sa.h:213
static_always_inline void esp_process_chained_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, vnet_crypto_op_chunk_t *chunks, int e)
Definition: esp_decrypt.c:147
static_always_inline int esp_decrypt_chain_integ(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0, vlib_buffer_t *b, u8 icv_sz, u8 *start_src, u32 start_len, u8 **digest, u16 *n_ch, u32 *integ_total_len)
Definition: esp_decrypt.c:304
vlib_buffer_t * lb
Definition: esp.h:204
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:970
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
u8 crypto_iv_size
Definition: ipsec_sa.h:116
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vl_api_ikev2_sa_transform_t esn
u8 async_mode
Definition: ipsec.h:207
static_always_inline void esp_decrypt_post_crypto(vlib_main_t *vm, vlib_node_runtime_t *node, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun, int is_async)
Definition: esp_decrypt.c:751
vnet_crypto_op_id_t crypto_dec_op_id
Definition: ipsec_sa.h:139
vlib_node_registration_t esp6_decrypt_node
(constructor) VLIB_REGISTER_NODE (esp6_decrypt_node)
Definition: esp_decrypt.c:1472
u8 integ_icv_size
Definition: ipsec_sa.h:118
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u32 decrypt_thread_index
Definition: ipsec_sa.h:120
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:302
static void esp_set_next_index(int is_async, u32 *from, u16 *nexts, u32 bi, u16 *drop_index, u16 drop_next, u16 *next)
Definition: esp.h:137
ipsec_integ_alg_t integ_alg
Definition: esp_decrypt.c:74
vlib_node_registration_t esp4_decrypt_node
(constructor) VLIB_REGISTER_NODE (esp4_decrypt_node)
Definition: esp_decrypt.c:1441
ip6_address_t dst_address
Definition: ip6_packet.h:310
static_always_inline void esp_decrypt_prepare_sync_op(vlib_main_t *vm, vlib_node_runtime_t *node, ipsec_per_thread_data_t *ptd, vnet_crypto_op_t ***crypto_ops, vnet_crypto_op_t ***integ_ops, vnet_crypto_op_t *op, ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz, u8 iv_sz, esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b, u16 *next, u32 index)
Definition: esp_decrypt.c:472
signed short i16
Definition: types.h:46