FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
esp_encrypt.c
Go to the documentation of this file.
1 /*
2  * esp_encrypt.c : IPSec ESP encrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 
22 #include <vnet/crypto/crypto.h>
23 
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/ipsec_tun.h>
26 #include <vnet/ipsec/esp.h>
27 #include <vnet/tunnel/tunnel_dp.h>
28 
29 #define foreach_esp_encrypt_next \
30  _ (DROP4, "ip4-drop") \
31  _ (DROP6, "ip6-drop") \
32  _ (DROP_MPLS, "mpls-drop") \
33  _ (HANDOFF4, "handoff4") \
34  _ (HANDOFF6, "handoff6") \
35  _ (HANDOFF_MPLS, "handoff-mpls") \
36  _ (INTERFACE_OUTPUT, "interface-output")
37 
38 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
39 typedef enum
40 {
42 #undef _
45 
46 #define foreach_esp_encrypt_error \
47  _ (RX_PKTS, "ESP pkts received") \
48  _ (POST_RX_PKTS, "ESP-post pkts received") \
49  _ (HANDOFF, "Hand-off") \
50  _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
51  _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
52  _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
53  _ (NO_BUFFERS, "no buffers (packet dropped)")
54 
55 typedef enum
56 {
57 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
59 #undef _
62 
63 static char *esp_encrypt_error_strings[] = {
64 #define _(sym,string) string,
66 #undef _
67 };
68 
69 typedef struct
70 {
76  ipsec_crypto_alg_t crypto_alg;
77  ipsec_integ_alg_t integ_alg;
79 
80 typedef struct
81 {
84 
85 /* packet trace format function */
86 static u8 *
87 format_esp_encrypt_trace (u8 * s, va_list * args)
88 {
89  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
90  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
91  esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
92 
93  s =
94  format (s,
95  "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
96  t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
99  t->udp_encap ? " udp-encap-enabled" : "");
100  return s;
101 }
102 
103 static u8 *
104 format_esp_post_encrypt_trace (u8 * s, va_list * args)
105 {
106  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
107  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
108  esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
109 
110  s = format (s, "esp-post: next node index %u", t->next_index);
111  return s;
112 }
113 
114 /* pad packet in input buffer */
117  u8 icv_sz, vlib_node_runtime_t *node,
118  u16 buffer_data_size, uword total_len)
119 {
120  static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
121  0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
122  0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
123  };
124 
125  u16 min_length = total_len + sizeof (esp_footer_t);
126  u16 new_length = round_pow2 (min_length, esp_align);
127  u8 pad_bytes = new_length - min_length;
129  last[0]->current_length + pad_bytes);
130  u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
131 
132  if (last[0]->current_data + last[0]->current_length + tail_sz >
133  buffer_data_size)
134  {
135  u32 tmp_bi = 0;
136  if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
137  return 0;
138 
139  vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
140  last[0]->next_buffer = tmp_bi;
141  last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
142  f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
143  tmp->current_length += tail_sz;
144  last[0] = tmp;
145  }
146  else
147  last[0]->current_length += tail_sz;
148 
149  f->pad_length = pad_bytes;
150  if (pad_bytes)
151  {
152  ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
153  pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
154  clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
155  }
156 
157  return &f->next_header;
158 }
159 
161 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
162 {
163  ip_csum_t sum;
164  u16 old_len;
165 
166  len = clib_net_to_host_u16 (len);
167  old_len = ip4->length;
168 
169  if (is_transport)
170  {
171  u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
172 
173  sum = ip_csum_update (ip4->checksum, ip4->protocol,
174  prot, ip4_header_t, protocol);
175  ip4->protocol = prot;
176 
177  sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
178  }
179  else
180  sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
181 
182  ip4->length = len;
183  ip4->checksum = ip_csum_fold (sum);
184 }
185 
188 {
189  clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
190  udp->length = clib_net_to_host_u16 (len);
191 }
192 
195 {
196 #ifdef CLIB_HAVE_VEC128
197  static const u8x16 ext_hdr_types = {
198  IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
199  IP_PROTOCOL_IPV6_ROUTE,
200  IP_PROTOCOL_IPV6_FRAGMENTATION,
201  };
202 
203  return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
204 #else
205  return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
206  (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
207  (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
208 #endif
209 }
210 
212 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
213 {
214  /* this code assumes that HbH, route and frag headers will be before
215  others, if that is not the case, they will end up encrypted */
216  u8 len = sizeof (ip6_header_t);
217  ip6_ext_header_t *p;
218 
219  /* if next packet doesn't have ext header */
220  if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
221  {
222  *ext_hdr = NULL;
223  return len;
224  }
225 
226  p = (void *) (ip6 + 1);
227  len += ip6_ext_header_len (p);
228 
229  while (ext_hdr_is_pre_esp (p->next_hdr))
230  {
231  len += ip6_ext_header_len (p);
232  p = ip6_ext_next_header (p);
233  }
234 
235  *ext_hdr = p;
236  return len;
237 }
238 
241  vnet_crypto_op_t * ops, vlib_buffer_t * b[],
242  u16 * nexts, vnet_crypto_op_chunk_t * chunks,
243  u16 drop_next)
244 {
245  u32 n_fail, n_ops = vec_len (ops);
246  vnet_crypto_op_t *op = ops;
247 
248  if (n_ops == 0)
249  return;
250 
251  n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
252 
253  while (n_fail)
254  {
255  ASSERT (op - ops < n_ops);
256 
257  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
258  {
259  u32 bi = op->user_data;
260  b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
261  nexts[bi] = drop_next;
262  n_fail--;
263  }
264  op++;
265  }
266 }
267 
270  vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
271  u16 drop_next)
272 {
273  u32 n_fail, n_ops = vec_len (ops);
274  vnet_crypto_op_t *op = ops;
275 
276  if (n_ops == 0)
277  return;
278 
279  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
280 
281  while (n_fail)
282  {
283  ASSERT (op - ops < n_ops);
284 
285  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
286  {
287  u32 bi = op->user_data;
288  b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
289  nexts[bi] = drop_next;
290  n_fail--;
291  }
292  op++;
293  }
294 }
295 
298  ipsec_sa_t * sa0, vlib_buffer_t * b,
299  vlib_buffer_t * lb, u8 icv_sz, u8 * start,
300  u32 start_len, u16 * n_ch)
301 {
303  vlib_buffer_t *cb = b;
304  u32 n_chunks = 1;
305  u32 total_len;
306  vec_add2 (ptd->chunks, ch, 1);
307  total_len = ch->len = start_len;
308  ch->src = ch->dst = start;
309  cb = vlib_get_buffer (vm, cb->next_buffer);
310 
311  while (1)
312  {
313  vec_add2 (ptd->chunks, ch, 1);
314  n_chunks += 1;
315  if (lb == cb)
316  total_len += ch->len = cb->current_length - icv_sz;
317  else
318  total_len += ch->len = cb->current_length;
319  ch->src = ch->dst = vlib_buffer_get_current (cb);
320 
321  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
322  break;
323 
324  cb = vlib_get_buffer (vm, cb->next_buffer);
325  }
326 
327  if (n_ch)
328  *n_ch = n_chunks;
329 
330  return total_len;
331 }
332 
335  ipsec_sa_t * sa0, vlib_buffer_t * b,
336  vlib_buffer_t * lb, u8 icv_sz, u8 * start,
337  u32 start_len, u8 * digest, u16 * n_ch)
338 {
340  vlib_buffer_t *cb = b;
341  u32 n_chunks = 1;
342  u32 total_len;
343  vec_add2 (ptd->chunks, ch, 1);
344  total_len = ch->len = start_len;
345  ch->src = start;
346  cb = vlib_get_buffer (vm, cb->next_buffer);
347 
348  while (1)
349  {
350  vec_add2 (ptd->chunks, ch, 1);
351  n_chunks += 1;
352  if (lb == cb)
353  {
354  total_len += ch->len = cb->current_length - icv_sz;
355  if (ipsec_sa_is_set_USE_ESN (sa0))
356  {
357  u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
358  clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
359  ch->len += sizeof (seq_hi);
360  total_len += sizeof (seq_hi);
361  }
362  }
363  else
364  total_len += ch->len = cb->current_length;
365  ch->src = vlib_buffer_get_current (cb);
366 
367  if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
368  break;
369 
370  cb = vlib_get_buffer (vm, cb->next_buffer);
371  }
372 
373  if (n_ch)
374  *n_ch = n_chunks;
375 
376  return total_len;
377 }
378 
379 always_inline void
381  vnet_crypto_op_t **crypto_ops,
382  vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
383  u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
384  vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
385  esp_header_t *esp)
386 {
387  if (sa0->crypto_enc_op_id)
388  {
389  vnet_crypto_op_t *op;
390  vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
392 
393  op->src = op->dst = payload;
394  op->key_index = sa0->crypto_key_index;
395  op->len = payload_len - icv_sz;
396  op->user_data = bi;
397 
398  if (ipsec_sa_is_set_IS_CTR (sa0))
399  {
400  ASSERT (sizeof (u64) == iv_sz);
401  /* construct nonce in a scratch space in front of the IP header */
402  esp_ctr_nonce_t *nonce =
403  (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
404  sizeof (*nonce));
405  u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
406 
407  if (ipsec_sa_is_set_IS_AEAD (sa0))
408  {
409  /* constuct aad in a scratch space in front of the nonce */
410  op->aad = (u8 *) nonce - sizeof (esp_aead_t);
411  op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
412  op->tag = payload + op->len;
413  op->tag_len = 16;
414  }
415  else
416  {
417  nonce->ctr = clib_host_to_net_u32 (1);
418  }
419 
420  nonce->salt = sa0->salt;
421  nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
422  op->iv = (u8 *) nonce;
423  }
424  else
425  {
426  op->iv = payload - iv_sz;
428  }
429 
430  if (lb != b[0])
431  {
432  /* is chained */
434  op->chunk_index = vec_len (ptd->chunks);
435  op->tag = vlib_buffer_get_tail (lb) - icv_sz;
436  esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
437  payload_len, &op->n_chunks);
438  }
439  }
440 
441  if (sa0->integ_op_id)
442  {
443  vnet_crypto_op_t *op;
444  vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
445  vnet_crypto_op_init (op, sa0->integ_op_id);
446  op->src = payload - iv_sz - sizeof (esp_header_t);
447  op->digest = payload + payload_len - icv_sz;
448  op->key_index = sa0->integ_key_index;
449  op->digest_len = icv_sz;
450  op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
451  op->user_data = bi;
452 
453  if (lb != b[0])
454  {
455  /* is chained */
457  op->chunk_index = vec_len (ptd->chunks);
458  op->digest = vlib_buffer_get_tail (lb) - icv_sz;
459 
460  esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
461  payload - iv_sz - sizeof (esp_header_t),
462  payload_len + iv_sz +
463  sizeof (esp_header_t), op->digest,
464  &op->n_chunks);
465  }
466  else if (ipsec_sa_is_set_USE_ESN (sa0))
467  {
468  u32 tmp = clib_net_to_host_u32 (seq_hi);
469  clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
470  op->len += sizeof (seq_hi);
471  }
472  }
473 }
474 
477  vnet_crypto_async_frame_t *async_frame,
479  u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
480  u32 bi, u16 next, u32 hdr_len, u16 async_next,
481  vlib_buffer_t *lb)
482 {
483  esp_post_data_t *post = esp_post_data (b);
484  u8 *tag, *iv, *aad = 0;
485  u8 flag = 0;
486  u32 key_index;
487  i16 crypto_start_offset, integ_start_offset = 0;
488  u16 crypto_total_len, integ_total_len;
489 
490  post->next_index = next;
491 
492  /* crypto */
493  crypto_start_offset = payload - b->data;
494  crypto_total_len = integ_total_len = payload_len - icv_sz;
495  tag = payload + crypto_total_len;
496 
497  key_index = sa->linked_key_index;
498 
499  if (ipsec_sa_is_set_IS_CTR (sa))
500  {
501  ASSERT (sizeof (u64) == iv_sz);
502  /* construct nonce in a scratch space in front of the IP header */
503  esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
504  hdr_len - sizeof (*nonce));
505  u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
506 
507  if (ipsec_sa_is_set_IS_AEAD (sa))
508  {
509  /* constuct aad in a scratch space in front of the nonce */
510  aad = (u8 *) nonce - sizeof (esp_aead_t);
511  esp_aad_fill (aad, esp, sa, sa->seq_hi);
512  key_index = sa->crypto_key_index;
513  }
514  else
515  {
516  nonce->ctr = clib_host_to_net_u32 (1);
517  }
518 
519  nonce->salt = sa->salt;
520  nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
521  iv = (u8 *) nonce;
522  }
523  else
524  {
525  iv = payload - iv_sz;
527  }
528 
529  if (lb != b)
530  {
531  /* chain */
533  tag = vlib_buffer_get_tail (lb) - icv_sz;
534  crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
535  payload, payload_len, 0);
536  }
537 
538  if (sa->integ_op_id)
539  {
540  integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
541  integ_total_len += iv_sz + sizeof (esp_header_t);
542 
543  if (b != lb)
544  {
545  integ_total_len = esp_encrypt_chain_integ (
546  vm, ptd, sa, b, lb, icv_sz,
547  payload - iv_sz - sizeof (esp_header_t),
548  payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
549  }
550  else if (ipsec_sa_is_set_USE_ESN (sa))
551  {
552  u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
553  clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
554  integ_total_len += sizeof (seq_hi);
555  }
556  }
557 
558  /* this always succeeds because we know the frame is not full */
559  vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
560  integ_total_len - crypto_total_len,
561  crypto_start_offset, integ_start_offset, bi,
562  async_next, iv, tag, aad, flag);
563 }
564 
567  vlib_frame_t *frame, vnet_link_t lt, int is_tun,
568  u16 async_next_node)
569 {
573  u32 n_left = frame->n_vectors;
576  u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
577  u32 current_sa_index = ~0, current_sa_packets = 0;
578  u32 current_sa_bytes = 0, spi = 0;
579  u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
580  ipsec_sa_t *sa0 = 0;
581  vlib_buffer_t *lb;
582  vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
583  vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
585  int is_async = im->async_mode;
586  vnet_crypto_async_op_id_t async_op = ~0;
587  u16 drop_next =
588  (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
589  (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
590  ESP_ENCRYPT_NEXT_DROP_MPLS));
591  u16 handoff_next = (lt == VNET_LINK_IP6 ?
592  ESP_ENCRYPT_NEXT_HANDOFF6 :
593  (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
594  ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
595  vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
596  u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
597  u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
598  u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
599  u32 sync_bi[VLIB_FRAME_SIZE];
600  u32 noop_bi[VLIB_FRAME_SIZE];
602 
604 
610  vec_reset_length (ptd->chunks);
611  clib_memset (async_frames, 0, sizeof (async_frames));
612 
613  while (n_left > 0)
614  {
615  u32 sa_index0;
616  dpo_id_t *dpo;
617  esp_header_t *esp;
618  u8 *payload, *next_hdr_ptr;
619  u16 payload_len, payload_len_total, n_bufs;
620  u32 hdr_len;
621 
622  err = ESP_ENCRYPT_ERROR_RX_PKTS;
623 
624  if (n_left > 2)
625  {
626  u8 *p;
627  vlib_prefetch_buffer_header (b[2], LOAD);
628  p = vlib_buffer_get_current (b[1]);
629  clib_prefetch_load (p);
631  clib_prefetch_load (p);
632  /* speculate that the trailer goes in the first buffer */
634  CLIB_CACHE_LINE_BYTES, LOAD);
635  }
636 
637  if (is_tun)
638  {
639  /* we are on a ipsec tunnel's feature arc */
640  vnet_buffer (b[0])->ipsec.sad_index =
641  sa_index0 = ipsec_tun_protect_get_sa_out
642  (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
643  }
644  else
645  sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
646 
647  if (sa_index0 != current_sa_index)
648  {
649  if (current_sa_packets)
651  current_sa_index,
652  current_sa_packets,
653  current_sa_bytes);
654  current_sa_packets = current_sa_bytes = 0;
655 
656  sa0 = ipsec_sa_get (sa_index0);
657 
658  /* fetch the second cacheline ASAP */
659  clib_prefetch_load (sa0->cacheline1);
660 
661  current_sa_index = sa_index0;
662  spi = clib_net_to_host_u32 (sa0->spi);
663  esp_align = sa0->esp_block_align;
664  icv_sz = sa0->integ_icv_size;
665  iv_sz = sa0->crypto_iv_size;
666  is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
667  }
668 
669  if (PREDICT_FALSE (~0 == sa0->thread_index))
670  {
671  /* this is the first packet to use this SA, claim the SA
672  * for this thread. this could happen simultaneously on
673  * another thread */
676  }
677 
679  {
680  vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
681  err = ESP_ENCRYPT_ERROR_HANDOFF;
682  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
683  handoff_next);
684  goto trace;
685  }
686 
687  lb = b[0];
688  n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
689  if (n_bufs == 0)
690  {
691  err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
692  esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
693  goto trace;
694  }
695 
696  if (n_bufs > 1)
697  {
698  /* find last buffer in the chain */
699  while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
700  lb = vlib_get_buffer (vm, lb->next_buffer);
701  }
702 
703  if (PREDICT_FALSE (esp_seq_advance (sa0)))
704  {
705  err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
706  esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
707  goto trace;
708  }
709 
710  /* space for IV */
711  hdr_len = iv_sz;
712 
713  if (ipsec_sa_is_set_IS_TUNNEL (sa0))
714  {
715  payload = vlib_buffer_get_current (b[0]);
716  next_hdr_ptr = esp_add_footer_and_icv (
717  vm, &lb, esp_align, icv_sz, node, buffer_data_size,
719  if (!next_hdr_ptr)
720  {
721  err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
722  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
723  drop_next);
724  goto trace;
725  }
726  b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
727  payload_len = b[0]->current_length;
728  payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
729 
730  /* ESP header */
731  hdr_len += sizeof (*esp);
732  esp = (esp_header_t *) (payload - hdr_len);
733 
734  /* optional UDP header */
735  if (ipsec_sa_is_set_UDP_ENCAP (sa0))
736  {
737  hdr_len += sizeof (udp_header_t);
738  esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
739  payload_len_total + hdr_len);
740  }
741 
742  /* IP header */
743  if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
744  {
745  ip6_header_t *ip6;
746  u16 len = sizeof (ip6_header_t);
747  hdr_len += len;
748  ip6 = (ip6_header_t *) (payload - hdr_len);
749  clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
750 
751  if (VNET_LINK_IP6 == lt)
752  {
753  *next_hdr_ptr = IP_PROTOCOL_IPV6;
755  (const ip6_header_t *) payload,
756  ip6);
757  }
758  else if (VNET_LINK_IP4 == lt)
759  {
760  *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
762  (const ip4_header_t *) payload, ip6);
763  }
764  else if (VNET_LINK_MPLS == lt)
765  {
766  *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
768  sa0->tunnel_flags, b[0],
769  (const mpls_unicast_header_t *) payload, ip6);
770  }
771  else
772  ASSERT (0);
773 
774  len = payload_len_total + hdr_len - len;
775  ip6->payload_length = clib_net_to_host_u16 (len);
776  b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
777  }
778  else
779  {
780  ip4_header_t *ip4;
781  u16 len = sizeof (ip4_header_t);
782  hdr_len += len;
783  ip4 = (ip4_header_t *) (payload - hdr_len);
784  clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
785 
786  if (VNET_LINK_IP6 == lt)
787  {
788  *next_hdr_ptr = IP_PROTOCOL_IPV6;
790  (const ip6_header_t *)
791  payload, ip4);
792  }
793  else if (VNET_LINK_IP4 == lt)
794  {
795  *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
797  (const ip4_header_t *)
798  payload, ip4);
799  }
800  else if (VNET_LINK_MPLS == lt)
801  {
802  *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
804  sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
805  ip4);
806  }
807  else
808  ASSERT (0);
809 
810  len = payload_len_total + hdr_len;
811  esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
812  }
813 
814  dpo = &sa0->dpo;
815  if (!is_tun)
816  {
817  sync_next[0] = dpo->dpoi_next_node;
818  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
819  }
820  else
821  sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
822  b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
823  }
824  else /* transport mode */
825  {
826  u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
827  ip6_ext_header_t *ext_hdr;
828  udp_header_t *udp = 0;
829  u16 udp_len = 0;
830  u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
831 
832  ip_len =
833  (VNET_LINK_IP6 == lt ?
834  esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
835  ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
836 
837  vlib_buffer_advance (b[0], ip_len);
838  payload = vlib_buffer_get_current (b[0]);
839  next_hdr_ptr = esp_add_footer_and_icv (
840  vm, &lb, esp_align, icv_sz, node, buffer_data_size,
842  if (!next_hdr_ptr)
843  {
844  err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
845  esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
846  drop_next);
847  goto trace;
848  }
849 
850  b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
851  payload_len = b[0]->current_length;
852  payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
853 
854  /* ESP header */
855  hdr_len += sizeof (*esp);
856  esp = (esp_header_t *) (payload - hdr_len);
857 
858  /* optional UDP header */
859  if (ipsec_sa_is_set_UDP_ENCAP (sa0))
860  {
861  hdr_len += sizeof (udp_header_t);
862  udp = (udp_header_t *) (payload - hdr_len);
863  }
864 
865  /* IP header */
866  hdr_len += ip_len;
867  ip_hdr = payload - hdr_len;
868 
869  /* L2 header */
870  if (!is_tun)
871  {
872  l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
873  hdr_len += l2_len;
874  l2_hdr = payload - hdr_len;
875 
876  /* copy l2 and ip header */
877  clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
878  }
879  else
880  l2_len = 0;
881 
882  if (VNET_LINK_IP6 == lt)
883  {
884  ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
885  if (PREDICT_TRUE (NULL == ext_hdr))
886  {
887  *next_hdr_ptr = ip6->protocol;
888  ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
889  }
890  else
891  {
892  *next_hdr_ptr = ext_hdr->next_hdr;
893  ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
894  }
895  ip6->payload_length =
896  clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
897  sizeof (ip6_header_t));
898  }
899  else if (VNET_LINK_IP4 == lt)
900  {
901  u16 len;
902  ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
903  *next_hdr_ptr = ip4->protocol;
904  len = payload_len_total + hdr_len - l2_len;
905  if (udp)
906  {
907  esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
908  udp_len = len - ip_len;
909  }
910  else
911  esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
912  }
913 
914  clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
915 
916  if (udp)
917  {
918  esp_fill_udp_hdr (sa0, udp, udp_len);
919  }
920 
921  sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
922  }
923 
924  if (lb != b[0])
925  {
926  crypto_ops = &ptd->chained_crypto_ops;
927  integ_ops = &ptd->chained_integ_ops;
928  }
929  else
930  {
931  crypto_ops = &ptd->crypto_ops;
932  integ_ops = &ptd->integ_ops;
933  }
934 
935  esp->spi = spi;
936  esp->seq = clib_net_to_host_u32 (sa0->seq);
937 
938  if (is_async)
939  {
940  async_op = sa0->crypto_async_enc_op_id;
941 
942  /* get a frame for this op if we don't yet have one or it's full
943  */
944  if (NULL == async_frames[async_op] ||
945  vnet_crypto_async_frame_is_full (async_frames[async_op]))
946  {
947  async_frames[async_op] =
948  vnet_crypto_async_get_frame (vm, async_op);
949  /* Save the frame to the list we'll submit at the end */
950  vec_add1 (ptd->async_frames, async_frames[async_op]);
951  }
952 
953  esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
954  esp, payload, payload_len, iv_sz, icv_sz,
955  from[b - bufs], sync_next[0], hdr_len,
956  async_next_node, lb);
957  }
958  else
959  esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
960  payload, payload_len, iv_sz, icv_sz, n_sync, b,
961  lb, hdr_len, esp);
962 
963  vlib_buffer_advance (b[0], 0LL - hdr_len);
964 
965  current_sa_packets += 1;
966  current_sa_bytes += payload_len_total;
967 
968  trace:
969  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
970  {
972  sizeof (*tr));
973  tr->sa_index = sa_index0;
974  tr->spi = sa0->spi;
975  tr->seq = sa0->seq;
976  tr->sa_seq_hi = sa0->seq_hi;
977  tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
978  tr->crypto_alg = sa0->crypto_alg;
979  tr->integ_alg = sa0->integ_alg;
980  }
981 
982  /* next */
983  if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
984  {
985  noop_bi[n_noop] = from[b - bufs];
986  n_noop++;
987  noop_next++;
988  }
989  else if (!is_async)
990  {
991  sync_bi[n_sync] = from[b - bufs];
992  sync_bufs[n_sync] = b[0];
993  n_sync++;
994  sync_next++;
995  }
996  else
997  {
998  n_async++;
999  async_next++;
1000  }
1001  n_left -= 1;
1002  b += 1;
1003  }
1004 
1006  current_sa_index, current_sa_packets,
1007  current_sa_bytes);
1008  if (n_sync)
1009  {
1010  esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1011  drop_next);
1013  sync_nexts, ptd->chunks, drop_next);
1014 
1015  esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1016  drop_next);
1017  esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1018  sync_nexts, ptd->chunks, drop_next);
1019 
1020  vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1021  }
1022  if (n_async)
1023  {
1024  /* submit all of the open frames */
1025  vnet_crypto_async_frame_t **async_frame;
1026 
1027  vec_foreach (async_frame, ptd->async_frames)
1028  {
1029  if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1030  {
1032  vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1033  n_sync, noop_bi, noop_nexts, drop_next);
1034  vnet_crypto_async_reset_frame (*async_frame);
1035  vnet_crypto_async_free_frame (vm, *async_frame);
1036  }
1037  }
1038  }
1039  if (n_noop)
1040  vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1041 
1042  vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
1043  frame->n_vectors);
1044 
1045  return frame->n_vectors;
1046 }
1047 
1050  vlib_frame_t * frame)
1051 {
1055  u32 n_left = frame->n_vectors;
1056 
1058 
1059  if (n_left >= 4)
1060  {
1061  vlib_prefetch_buffer_header (b[0], LOAD);
1062  vlib_prefetch_buffer_header (b[1], LOAD);
1063  vlib_prefetch_buffer_header (b[2], LOAD);
1064  vlib_prefetch_buffer_header (b[3], LOAD);
1065  }
1066 
1067  while (n_left > 8)
1068  {
1069  vlib_prefetch_buffer_header (b[4], LOAD);
1070  vlib_prefetch_buffer_header (b[5], LOAD);
1071  vlib_prefetch_buffer_header (b[6], LOAD);
1072  vlib_prefetch_buffer_header (b[7], LOAD);
1073 
1074  next[0] = (esp_post_data (b[0]))->next_index;
1075  next[1] = (esp_post_data (b[1]))->next_index;
1076  next[2] = (esp_post_data (b[2]))->next_index;
1077  next[3] = (esp_post_data (b[3]))->next_index;
1078 
1079  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1080  {
1081  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1082  {
1084  sizeof (*tr));
1085  tr->next_index = next[0];
1086  }
1087  if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1088  {
1090  sizeof (*tr));
1091  tr->next_index = next[1];
1092  }
1093  if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1094  {
1096  sizeof (*tr));
1097  tr->next_index = next[2];
1098  }
1099  if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1100  {
1102  sizeof (*tr));
1103  tr->next_index = next[3];
1104  }
1105  }
1106 
1107  b += 4;
1108  next += 4;
1109  n_left -= 4;
1110  }
1111 
1112  while (n_left > 0)
1113  {
1114  next[0] = (esp_post_data (b[0]))->next_index;
1115  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1116  {
1118  sizeof (*tr));
1119  tr->next_index = next[0];
1120  }
1121 
1122  b += 1;
1123  next += 1;
1124  n_left -= 1;
1125  }
1126 
1127  vlib_node_increment_counter (vm, node->node_index,
1128  ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1129  frame->n_vectors);
1131  return frame->n_vectors;
1132 }
1133 
1137 {
1140 }
1141 
1142 /* *INDENT-OFF* */
1144  .name = "esp4-encrypt",
1145  .vector_size = sizeof (u32),
1146  .format_trace = format_esp_encrypt_trace,
1148 
1149  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1150  .error_strings = esp_encrypt_error_strings,
1151 
1152  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1153  .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1154  [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1155  [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1156  [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1157  [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1158  [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
1159  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
1160 };
1161 /* *INDENT-ON* */
1162 
1166 {
1168 }
1169 
1170 /* *INDENT-OFF* */
1172  .name = "esp4-encrypt-post",
1173  .vector_size = sizeof (u32),
1174  .format_trace = format_esp_post_encrypt_trace,
1176  .sibling_of = "esp4-encrypt",
1177 
1178  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1179  .error_strings = esp_encrypt_error_strings,
1180 };
1181 /* *INDENT-ON* */
1182 
1186 {
1189 }
1190 
1191 /* *INDENT-OFF* */
1193  .name = "esp6-encrypt",
1194  .vector_size = sizeof (u32),
1195  .format_trace = format_esp_encrypt_trace,
1197  .sibling_of = "esp4-encrypt",
1198 
1199  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1200  .error_strings = esp_encrypt_error_strings,
1201 };
1202 /* *INDENT-ON* */
1203 
1207 {
1209 }
1210 
1211 /* *INDENT-OFF* */
1213  .name = "esp6-encrypt-post",
1214  .vector_size = sizeof (u32),
1215  .format_trace = format_esp_post_encrypt_trace,
1217  .sibling_of = "esp4-encrypt",
1218 
1219  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1220  .error_strings = esp_encrypt_error_strings,
1221 };
1222 /* *INDENT-ON* */
1223 
1227 {
1230 }
1231 
1232 /* *INDENT-OFF* */
1234  .name = "esp4-encrypt-tun",
1235  .vector_size = sizeof (u32),
1236  .format_trace = format_esp_encrypt_trace,
1238 
1239  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1240  .error_strings = esp_encrypt_error_strings,
1241 
1242  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1243  .next_nodes = {
1244  [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1245  [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1246  [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1247  [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1248  [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1249  [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1250  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1251  },
1252 };
1253 
1257 {
1259 }
1260 
1261 /* *INDENT-OFF* */
1263  .name = "esp4-encrypt-tun-post",
1264  .vector_size = sizeof (u32),
1265  .format_trace = format_esp_post_encrypt_trace,
1267  .sibling_of = "esp4-encrypt-tun",
1268 
1269  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1270  .error_strings = esp_encrypt_error_strings,
1271 };
1272 /* *INDENT-ON* */
1273 
1277 {
1280 }
1281 
1282 /* *INDENT-OFF* */
1284  .name = "esp6-encrypt-tun",
1285  .vector_size = sizeof (u32),
1286  .format_trace = format_esp_encrypt_trace,
1288 
1289  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1290  .error_strings = esp_encrypt_error_strings,
1291 
1292  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1293  .next_nodes = {
1294  [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1295  [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1296  [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1297  [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1298  [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1299  [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1300  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1301  },
1302 };
1303 
1304 /* *INDENT-ON* */
1305 
1309 {
1311 }
1312 
1313 /* *INDENT-OFF* */
1315  .name = "esp6-encrypt-tun-post",
1316  .vector_size = sizeof (u32),
1317  .format_trace = format_esp_post_encrypt_trace,
1319  .sibling_of = "esp-mpls-encrypt-tun",
1320 
1321  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1322  .error_strings = esp_encrypt_error_strings,
1323 };
1324 /* *INDENT-ON* */
1325 
1331 }
1332 
1334  .name = "esp-mpls-encrypt-tun",
1335  .vector_size = sizeof (u32),
1336  .format_trace = format_esp_encrypt_trace,
1338 
1339  .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1340  .error_strings = esp_encrypt_error_strings,
1341 
1342  .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1343  .next_nodes = {
1344  [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1345  [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1346  [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1347  [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1348  [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1349  [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1350  [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1351  },
1352 };
1353 
1356 {
1358 }
1359 
1361  .name = "esp-mpls-encrypt-tun-post",
1362  .vector_size = sizeof (u32),
1363  .format_trace = format_esp_post_encrypt_trace,
1365  .sibling_of = "esp-mpls-encrypt-tun",
1366 
1367  .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1368  .error_strings = esp_encrypt_error_strings,
1369 };
1370 
1371 typedef struct
1372 {
1375 
1376 static u8 *
1377 format_esp_no_crypto_trace (u8 * s, va_list * args)
1378 {
1379  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1380  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1381  esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
1382 
1383  s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
1384 
1385  return s;
1386 }
1387 
1388 enum
1389 {
1392 };
1393 
1394 enum
1395 {
1397 };
1398 
1400  "Outbound ESP packets received",
1401 };
1402 
1405  vlib_frame_t * frame)
1406 {
1409  u32 n_left = frame->n_vectors;
1410 
1412 
1413  while (n_left > 0)
1414  {
1415  u32 sa_index0;
1416 
1417  /* packets are always going to be dropped, but get the sa_index */
1418  sa_index0 = ipsec_tun_protect_get_sa_out
1419  (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
1420 
1421  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1422  {
1424  sizeof (*tr));
1425  tr->sa_index = sa_index0;
1426  }
1427 
1428  n_left -= 1;
1429  b += 1;
1430  }
1431 
1432  vlib_node_increment_counter (vm, node->node_index,
1433  ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
1434 
1437  frame->n_vectors);
1438 
1439  return frame->n_vectors;
1440 }
1441 
1445 {
1447 }
1448 
1449 /* *INDENT-OFF* */
1451 {
1452  .name = "esp4-no-crypto",
1453  .vector_size = sizeof (u32),
1454  .format_trace = format_esp_no_crypto_trace,
1456  .error_strings = esp_no_crypto_error_strings,
1457  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1458  .next_nodes = {
1459  [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
1460  },
1461 };
1462 
1466 {
1468 }
1469 
1470 /* *INDENT-OFF* */
1472 {
1473  .name = "esp6-no-crypto",
1474  .vector_size = sizeof (u32),
1475  .format_trace = format_esp_no_crypto_trace,
1477  .error_strings = esp_no_crypto_error_strings,
1478  .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1479  .next_nodes = {
1480  [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
1481  },
1482 };
1483 /* *INDENT-ON* */
1484 
1485 #ifndef CLIB_MARCH_VARIANT
1486 
1487 static clib_error_t *
1489 {
1491 
1492  im->esp4_enc_fq_index =
1494  im->esp6_enc_fq_index =
1496  im->esp4_enc_tun_fq_index =
1498  im->esp6_enc_tun_fq_index =
1500  im->esp_mpls_enc_tun_fq_index =
1502 
1503  return 0;
1504 }
1505 
1507 
1508 #endif
1509 
1510 /*
1511  * fd.io coding-style-patch-verification: ON
1512  *
1513  * Local Variables:
1514  * eval: (c-set-style "gnu")
1515  * End:
1516  */
esp_post_data
#define esp_post_data(b)
Definition: esp.h:228
vec_reset_length
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
Definition: vec_bootstrap.h:194
esp_encrypt_chain_crypto
static_always_inline u32 esp_encrypt_chain_crypto(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, ipsec_sa_t *sa0, vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz, u8 *start, u32 start_len, u16 *n_ch)
Definition: esp_encrypt.c:297
ipsec.h
ipsec_per_thread_data_t::integ_ops
vnet_crypto_op_t * integ_ops
Definition: ipsec.h:101
vnet_crypto_op_t::digest
u8 * digest
Definition: crypto.h:299
tmp
u32 * tmp
Definition: interface_output.c:1096
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
vnet_crypto_async_free_frame
static_always_inline void vnet_crypto_async_free_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: crypto.h:580
dpo_id_t_::dpoi_next_node
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:186
vnet_crypto_op_init
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
Definition: crypto.h:528
im
vnet_interface_main_t * im
Definition: interface_output.c:415
esp_encrypt_trace_t::crypto_alg
ipsec_crypto_alg_t crypto_alg
Definition: esp_encrypt.c:76
vnet_crypto_op_t::digest_len
u8 digest_len
Definition: crypto.h:268
ipsec_tun.h
udp_header_t::length
u16 length
Definition: udp_packet.h:51
trace
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:870
ipsec_sa_t::integ_key_index
vnet_crypto_key_index_t integ_key_index
Definition: ipsec_sa.h:139
dpo_id_t_::dpoi_index
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:190
ext_hdr_is_pre_esp
static_always_inline u8 ext_hdr_is_pre_esp(u8 nexthdr)
Definition: esp_encrypt.c:194
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
format_esp_no_crypto_trace
static u8 * format_esp_no_crypto_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:1377
bufs
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:717
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vnet_crypto_op_t::tag
u8 * tag
Definition: crypto.h:298
ipsec_sa_t::ctr_iv_counter
u64 ctr_iv_counter
Definition: ipsec_sa.h:135
crypto.h
esp4_encrypt_tun_node
vlib_node_registration_t esp4_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_node)
Definition: esp_encrypt.c:1233
format_ipsec_integ_alg
u8 * format_ipsec_integ_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:111
ip4
vl_api_ip4_address_t ip4
Definition: one.api:376
next_index
nat44_ei_hairpin_src_next_t next_index
Definition: nat44_ei_hairpinning.c:412
VNET_CRYPTO_ASYNC_OP_N_IDS
@ VNET_CRYPTO_ASYNC_OP_N_IDS
Definition: crypto.h:195
ipsec_sa_t::thread_index
u32 thread_index
Definition: ipsec_sa.h:129
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
f
vlib_frame_t * f
Definition: interface_output.c:1098
ipsec_tun_protect_get_sa_out
static index_t ipsec_tun_protect_get_sa_out(adj_index_t ai)
Definition: ipsec_tun.h:182
node
vlib_main_t vlib_node_runtime_t * node
Definition: esp_encrypt.c:1327
ipsec_per_thread_data_t::chunks
vnet_crypto_op_chunk_t * chunks
Definition: ipsec.h:104
esp_header_t::spi
u32 spi
Definition: esp.h:26
vnet_crypto_op_t::status
vnet_crypto_op_status_t status
Definition: crypto.h:260
vlib_get_buffers
vlib_get_buffers(vm, from, b, n_left_from)
next
u16 * next
Definition: nat44_ei_out2in.c:718
esp_async_post_next_t::esp_mpls_tun_post_next
u32 esp_mpls_tun_post_next
Definition: esp.h:247
VLIB_NODE_TYPE_INTERNAL
@ VLIB_NODE_TYPE_INTERNAL
Definition: node.h:72
ip6_ext_header_len
#define ip6_ext_header_len(p)
Definition: ip6_packet.h:545
VLIB_FRAME_SIZE
#define VLIB_FRAME_SIZE
Definition: node.h:368
ESP_NO_CRYPTO_N_NEXT
@ ESP_NO_CRYPTO_N_NEXT
Definition: esp_encrypt.c:1391
esp4_no_crypto_tun_node
vlib_node_registration_t esp4_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_no_crypto_tun_node)
Definition: esp_encrypt.c:1450
esp_encrypt_trace_t::udp_encap
u8 udp_encap
Definition: esp_encrypt.c:75
ipsec_sa_t::ip4_hdr
ip4_header_t ip4_hdr
Definition: ipsec_sa.h:166
esp_encrypt_error_strings
static char * esp_encrypt_error_strings[]
Definition: esp_encrypt.c:63
esp_mpls_encrypt_tun_post_node
vlib_node_registration_t esp_mpls_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node)
Definition: esp_encrypt.c:1360
tunnel_encap_fixup_mplso6
static_always_inline void tunnel_encap_fixup_mplso6(tunnel_encap_decap_flags_t flags, const vlib_buffer_t *b, const mpls_unicast_header_t *inner, ip6_header_t *outer)
Definition: tunnel_dp.h:163
esp_encrypt_trace_t::seq
u32 seq
Definition: esp_encrypt.c:73
vnet_crypto_op_t::src
u8 * src
Definition: crypto.h:277
u16
unsigned short u16
Definition: types.h:57
esp6_no_crypto_tun_node
vlib_node_registration_t esp6_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_no_crypto_tun_node)
Definition: esp_encrypt.c:1471
vnet_crypto_op_t::dst
u8 * dst
Definition: crypto.h:278
esp_process_chained_ops
static_always_inline void esp_process_chained_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, vnet_crypto_op_chunk_t *chunks, u16 drop_next)
Definition: esp_encrypt.c:240
vnet_crypto_async_frame_t
Definition: crypto.h:358
esp_set_next_index
static void esp_set_next_index(vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err, u16 index, u16 *nexts, u16 drop_next)
Definition: esp.h:150
vnet_crypto_async_add_to_frame
static_always_inline void vnet_crypto_async_add_to_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *f, u32 key_index, u32 crypto_len, i16 integ_len_adj, i16 crypto_start_offset, u16 integ_start_offset, u32 buffer_index, u16 next_node, u8 *iv, u8 *tag, u8 *aad, u8 flags)
Definition: crypto.h:621
vnet_crypto_op_t::user_data
uword user_data
Definition: crypto.h:258
tunnel_encap_fixup_mplso4_w_chksum
static_always_inline void tunnel_encap_fixup_mplso4_w_chksum(tunnel_encap_decap_flags_t flags, const mpls_unicast_header_t *inner, ip4_header_t *outer)
Definition: tunnel_dp.h:75
mpls_unicast_header_t
Definition: packet.h:28
clib_memcpy_le32
static_always_inline void clib_memcpy_le32(u8 *dst, u8 *src, u8 len)
Definition: string.h:306
esp_aead_t_
AES GCM Additional Authentication data.
Definition: esp.h:76
esp_encrypt_trace_t::integ_alg
ipsec_integ_alg_t integ_alg
Definition: esp_encrypt.c:77
from_frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * from_frame
Definition: esp_encrypt.c:1328
vlib_buffer_enqueue_to_next
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
ipsec_sa_t::integ_icv_size
u8 integ_icv_size
Definition: ipsec_sa.h:125
vnet_crypto_op_t::iv
u8 * iv
Definition: crypto.h:293
VNET_CRYPTO_OP_FLAG_INIT_IV
#define VNET_CRYPTO_OP_FLAG_INIT_IV
Definition: crypto.h:262
vnet_crypto_op_t::len
u32 len
Definition: crypto.h:287
ipsec_sa_t::esp_block_align
u8 esp_block_align
Definition: ipsec_sa.h:124
vlib_frame_t
Definition: node.h:372
esp_header_t::seq
u32 seq
Definition: esp.h:29
vlib_buffer_length_in_chain
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
vnet_crypto_async_submit_open_frame
static_always_inline int vnet_crypto_async_submit_open_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: crypto.h:589
udp_header_t
Definition: udp_packet.h:45
ip4_header_t
Definition: ip4_packet.h:87
ipsec_sa_t::udp_hdr
udp_header_t udp_hdr
Definition: ipsec_sa.h:169
vnet_crypto_op_t::aad_len
u16 aad_len
Definition: crypto.h:271
esp6_encrypt_post_node
vlib_node_registration_t esp6_encrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_post_node)
Definition: esp_encrypt.c:1212
vnet_crypto_op_t
Definition: crypto.h:255
vnet_crypto_op_t::flags
u8 flags
Definition: crypto.h:261
vlib_frame_queue_main_init
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1561
vnet_crypto_op_t::key_index
u32 key_index
Definition: crypto.h:292
u8x16
u8x16
Definition: vector_sse42.h:157
format_esp_post_encrypt_trace
static u8 * format_esp_post_encrypt_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:104
esp_no_crypto_inline
static uword esp_no_crypto_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: esp_encrypt.c:1404
esp_encrypt_trace_t::spi
u32 spi
Definition: esp_encrypt.c:72
esp.h
vnet_crypto_op_t::chunk_index
u32 chunk_index
Definition: crypto.h:289
round_pow2
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:279
CLIB_PREFETCH
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:76
i16
signed short i16
Definition: types.h:46
esp_encrypt_trace_t::sa_index
u32 sa_index
Definition: esp_encrypt.c:71
vlib_buffer_enqueue_to_single_next
static_always_inline void vlib_buffer_enqueue_to_single_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index, u32 count)
Definition: buffer_node.h:373
tunnel_encap_fixup_6o6
static_always_inline void tunnel_encap_fixup_6o6(tunnel_encap_decap_flags_t flags, const ip6_header_t *inner, ip6_header_t *outer)
Definition: tunnel_dp.h:129
esp_encrypt_trace_t::sa_seq_hi
u32 sa_seq_hi
Definition: esp_encrypt.c:74
vlib_buffer_advance
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:276
vnet_crypto_process_ops
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
Definition: crypto.c:99
esp_encrypt_async_next
esp_async_post_next_t esp_encrypt_async_next
Definition: ipsec.c:30
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
ESP_NO_CRYPTO_ERROR_RX_PKTS
@ ESP_NO_CRYPTO_ERROR_RX_PKTS
Definition: esp_encrypt.c:1396
esp_add_footer_and_icv
static_always_inline u8 * esp_add_footer_and_icv(vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align, u8 icv_sz, vlib_node_runtime_t *node, u16 buffer_data_size, uword total_len)
Definition: esp_encrypt.c:116
vlib_buffer_t::error
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
len
u8 len
Definition: ip_types.api:103
VNET_LINK_IP4
@ VNET_LINK_IP4
Definition: interface.h:344
VLIB_NODE_FN
#define VLIB_NODE_FN(node)
Definition: node.h:202
vec_add2
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:644
vec_add1
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:606
format_ipsec_crypto_alg
u8 * format_ipsec_crypto_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:79
vlib_buffer_alloc
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:702
ipsec_sa_get
static ipsec_sa_t * ipsec_sa_get(u32 sa_index)
Definition: ipsec_sa.h:605
foreach_esp_encrypt_error
#define foreach_esp_encrypt_error
Definition: esp_encrypt.c:46
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:441
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
esp_update_ip4_hdr
static_always_inline void esp_update_ip4_hdr(ip4_header_t *ip4, u16 len, int is_transport, int is_udp)
Definition: esp_encrypt.c:161
VLIB_NODE_FLAG_TRACE
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:291
ipsec_per_thread_data_t
Definition: ipsec.h:97
vnet_crypto_process_chained_ops
u32 vnet_crypto_process_chained_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)
Definition: crypto.c:105
ipsec_sa_counters
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
Definition: ipsec_sa.c:27
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
esp4_encrypt_post_node
vlib_node_registration_t esp4_encrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_post_node)
Definition: esp_encrypt.c:1171
clib_memcpy_le64
static_always_inline void clib_memcpy_le64(u8 *dst, u8 *src, u8 len)
Definition: string.h:300
ipsec_per_thread_data_t::async_frames
vnet_crypto_async_frame_t ** async_frames
Definition: ipsec.h:105
ARRAY_LEN
#define ARRAY_LEN(x)
Definition: clib.h:70
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
ipsec_main_t
Definition: ipsec.h:108
ipsec_sa_t::crypto_iv_size
u8 crypto_iv_size
Definition: ipsec_sa.h:123
esp_async_post_next_t::esp4_tun_post_next
u32 esp4_tun_post_next
Definition: esp.h:245
esp_encrypt_init
static clib_error_t * esp_encrypt_init(vlib_main_t *vm)
Definition: esp_encrypt.c:1488
vnet_crypto_op_chunk_t::src
u8 * src
Definition: crypto.h:250
VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:264
ipsec_sa_t::tunnel_flags
tunnel_encap_decap_flags_t tunnel_flags
Definition: ipsec_sa.h:175
static_always_inline
#define static_always_inline
Definition: clib.h:112
ipsec_sa_t::seq
u32 seq
Definition: ipsec_sa.h:132
tunnel_encap_fixup_6o4_w_chksum
static_always_inline void tunnel_encap_fixup_6o4_w_chksum(tunnel_encap_decap_flags_t flags, const ip6_header_t *inner, ip4_header_t *outer)
Definition: tunnel_dp.h:107
esp_async_post_next_t::esp6_tun_post_next
u32 esp6_tun_post_next
Definition: esp.h:246
esp_encrypt_next_t
esp_encrypt_next_t
Definition: esp_encrypt.c:39
uword
u64 uword
Definition: types.h:112
last
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
esp_process_ops
static_always_inline void esp_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, u16 drop_next)
Definition: esp_encrypt.c:269
esp_no_crypto_error_strings
static char * esp_no_crypto_error_strings[]
Definition: esp_encrypt.c:1399
vnet_crypto_async_reset_frame
static_always_inline void vnet_crypto_async_reset_frame(vnet_crypto_async_frame_t *f)
Definition: crypto.h:650
ipsec_main
ipsec_main_t ipsec_main
Definition: ipsec.c:29
VNET_LINK_MPLS
@ VNET_LINK_MPLS
Definition: interface.h:349
esp_mpls_encrypt_tun_node
vlib_node_registration_t esp_mpls_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node)
Definition: esp_encrypt.c:1333
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:215
vlib_node_increment_counter
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1244
esp_header_t
Definition: esp.h:22
esp_fill_udp_hdr
static_always_inline void esp_fill_udp_hdr(ipsec_sa_t *sa, udp_header_t *udp, u16 len)
Definition: esp_encrypt.c:187
vnet_crypto_op_chunk_t
Definition: crypto.h:248
vnet_crypto_async_frame_is_full
static_always_inline u8 vnet_crypto_async_frame_is_full(const vnet_crypto_async_frame_t *f)
Definition: crypto.h:665
clib_min
#define clib_min(x, y)
Definition: clib.h:342
vlib_buffer_chain_linearize
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:1471
ipsec_sa_assign_thread
static u32 ipsec_sa_assign_thread(u32 thread_id)
Definition: ipsec_sa.h:598
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
esp_get_ip6_hdr_len
static_always_inline u8 esp_get_ip6_hdr_len(ip6_header_t *ip6, ip6_ext_header_t **ext_hdr)
Definition: esp_encrypt.c:212
esp_no_crypto_trace_t
Definition: esp_encrypt.c:1371
ESP_ENCRYPT_N_ERROR
@ ESP_ENCRYPT_N_ERROR
Definition: esp_encrypt.c:60
vlib_node_registration_t
struct _vlib_node_registration vlib_node_registration_t
ipsec_sa_t::crypto_alg
ipsec_crypto_alg_t crypto_alg
Definition: ipsec_sa.h:215
ipsec_sa_t::crypto_async_enc_op_id
vnet_crypto_async_op_id_t crypto_async_enc_op_id
Definition: ipsec_sa.h:154
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
ESP_MAX_BLOCK_SIZE
#define ESP_MAX_BLOCK_SIZE
Definition: esp.h:88
ESP_ENCRYPT_N_NEXT
@ ESP_ENCRYPT_N_NEXT
Definition: esp_encrypt.c:43
vnet_crypto_op_t::n_chunks
u16 n_chunks
Definition: crypto.h:282
vec_add2_aligned
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:656
ipsec_sa_t
Definition: ipsec_sa.h:116
esp_encrypt_error_t
esp_encrypt_error_t
Definition: esp_encrypt.c:55
vnet_crypto_op_chunk_t::dst
u8 * dst
Definition: crypto.h:251
foreach_esp_encrypt_next
#define foreach_esp_encrypt_next
Definition: esp_encrypt.c:29
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
vnet_crypto_op_t::tag_len
u8 tag_len
Definition: crypto.h:269
spi
u32 spi
Definition: flow_types.api:140
u64
unsigned long u64
Definition: types.h:89
esp_encrypt_inline
static uword esp_encrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_link_t lt, int is_tun, u16 async_next_node)
Definition: esp_encrypt.c:566
vnet_crypto_op_t::aad
u8 * aad
Definition: crypto.h:294
esp_encrypt_post_trace_t::next_index
u32 next_index
Definition: esp_encrypt.c:82
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
vlib_buffer_get_tail
static u8 * vlib_buffer_get_tail(vlib_buffer_t *b)
Get pointer to the end of buffer's data.
Definition: buffer.h:338
ipsec_per_thread_data_t::chained_crypto_ops
vnet_crypto_op_t * chained_crypto_ops
Definition: ipsec.h:102
vlib_buffer_get_default_data_size
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
ip.h
u32
unsigned int u32
Definition: types.h:88
VLIB_INIT_FUNCTION
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
format_esp_encrypt_trace
static u8 * format_esp_encrypt_trace(u8 *s, va_list *args)
Definition: esp_encrypt.c:87
clib_atomic_cmp_and_swap
#define clib_atomic_cmp_and_swap(addr, old, new)
Definition: atomics.h:37
ip6
vl_api_ip6_address_t ip6
Definition: one.api:424
protocol
vl_api_ip_proto_t protocol
Definition: lb_types.api:72
clib_prefetch_load
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:92
esp_encrypt_post_inline
static uword esp_encrypt_post_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: esp_encrypt.c:1049
pad_data
static const u8 pad_data[]
Definition: ipsec.h:177
esp_aad_fill
static u16 esp_aad_fill(u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa, u32 seq_hi)
Definition: esp.h:121
vnet_crypto_async_get_frame
static_always_inline vnet_crypto_async_frame_t * vnet_crypto_async_get_frame(vlib_main_t *vm, vnet_crypto_async_op_id_t opt)
async crypto inline functions
Definition: crypto.h:563
vec_foreach
#define vec_foreach(var, vec)
Vector iterator.
Definition: vec_bootstrap.h:213
ipsec_sa_t::integ_op_id
vnet_crypto_op_id_t integ_op_id
Definition: ipsec_sa.h:149
n_left
u32 n_left
Definition: interface_output.c:1096
iv
static u8 iv[]
Definition: aes_cbc.c:24
ipsec_sa_t::salt
u32 salt
Definition: ipsec_sa.h:172
ipsec_sa_t::seq_hi
u32 seq_hi
Definition: ipsec_sa.h:133
ip6_header_t
Definition: ip6_packet.h:294
ipsec_sa_t::linked_key_index
vnet_crypto_key_index_t linked_key_index
Definition: ipsec_sa.h:156
esp_prepare_async_frame
static_always_inline void esp_prepare_async_frame(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, vnet_crypto_async_frame_t *async_frame, ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp, u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len, u16 async_next, vlib_buffer_t *lb)
Definition: esp_encrypt.c:476
ipsec_per_thread_data_t::chained_integ_ops
vnet_crypto_op_t * chained_integ_ops
Definition: ipsec.h:103
esp6_encrypt_node
vlib_node_registration_t esp6_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_node)
Definition: esp_encrypt.c:1192
esp_no_crypto_trace_t::sa_index
u32 sa_index
Definition: esp_encrypt.c:1373
length
char const int length
Definition: cJSON.h:163
clib_memset
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t
Definition: main.h:102
ip_csum_update
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:295
vnet_link_t
enum vnet_link_t_ vnet_link_t
Link Type: A description of the protocol of packets on the link.
vlib_node_t
Definition: node.h:247
vlib_add_trace
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
esp_async_post_next_t::esp4_post_next
u32 esp4_post_next
Definition: esp.h:243
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
vnet_crypto_async_op_id_t
vnet_crypto_async_op_id_t
Definition: crypto.h:182
VNET_LINK_IP6
@ VNET_LINK_IP6
Definition: interface.h:348
ipsec_sa_t::ip6_hdr
ip6_header_t ip6_hdr
Definition: ipsec_sa.h:167
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
ip
vl_api_address_t ip
Definition: l2.api:558
esp_encrypt_post_trace_t
Definition: esp_encrypt.c:80
esp_encrypt_chain_integ
static_always_inline u32 esp_encrypt_chain_integ(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, ipsec_sa_t *sa0, vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz, u8 *start, u32 start_len, u8 *digest, u16 *n_ch)
Definition: esp_encrypt.c:334
vlib_init_function_t
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
Definition: init.h:51
ip_csum_t
uword ip_csum_t
Definition: ip_packet.h:245
vlib_buffer_t::data
u8 data[]
Packet data.
Definition: buffer.h:204
ipsec_sa_t::integ_alg
ipsec_integ_alg_t integ_alg
Definition: ipsec_sa.h:216
ESP_NO_CRYPTO_NEXT_DROP
@ ESP_NO_CRYPTO_NEXT_DROP
Definition: esp_encrypt.c:1390
vm
vlib_main_t * vm
Definition: esp_encrypt.c:1327
dpo_id_t_
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:172
nexts
u16 nexts[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:718
esp_encrypt_trace_t
Definition: esp_encrypt.c:69
ip4_header_bytes
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
tunnel_encap_fixup_4o4_w_chksum
static_always_inline void tunnel_encap_fixup_4o4_w_chksum(tunnel_encap_decap_flags_t flags, const ip4_header_t *inner, ip4_header_t *outer)
Definition: tunnel_dp.h:40
vnet.h
api_errno.h
ip6_ext_next_header
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
Definition: ip6_packet.h:549
esp_post_data_t::next_index
u16 next_index
Definition: esp.h:220
vlib_node_runtime_t
Definition: node.h:454
tunnel_encap_fixup_4o6
static_always_inline void tunnel_encap_fixup_4o6(tunnel_encap_decap_flags_t flags, const vlib_buffer_t *b, const ip4_header_t *inner, ip6_header_t *outer)
Definition: tunnel_dp.h:145
esp_prepare_sync_op
static void esp_prepare_sync_op(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, vnet_crypto_op_t **crypto_ops, vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi, u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi, vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len, esp_header_t *esp)
Definition: esp_encrypt.c:380
esp_async_recycle_failed_submit
static u32 esp_async_recycle_failed_submit(vlib_main_t *vm, vnet_crypto_async_frame_t *f, vlib_node_runtime_t *node, u32 err, u16 index, u32 *from, u16 *nexts, u16 drop_next_index)
Definition: esp.h:159
esp4_encrypt_node
vlib_node_registration_t esp4_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_node)
Definition: esp_encrypt.c:1143
from
from
Definition: nat44_ei_hairpinning.c:415
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
vnet_crypto_op_chunk_t::len
u32 len
Definition: crypto.h:252
ip_csum_fold
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:301
esp_post_data_t
Definition: esp.h:218
VLIB_TX
@ VLIB_TX
Definition: defs.h:47
ipsec_sa_t::spi
u32 spi
Definition: ipsec_sa.h:131
esp4_encrypt_tun_post_node
vlib_node_registration_t esp4_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node)
Definition: esp_encrypt.c:1262
ipsec_sa_t::crypto_enc_op_id
vnet_crypto_op_id_t crypto_enc_op_id
Definition: ipsec_sa.h:147
esp_async_post_next_t::esp6_post_next
u32 esp6_post_next
Definition: esp.h:244
esp6_encrypt_tun_node
vlib_node_registration_t esp6_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_node)
Definition: esp_encrypt.c:1283
type
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_increment_combined_counter
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
tunnel_dp.h
esp6_encrypt_tun_post_node
vlib_node_registration_t esp6_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node)
Definition: esp_encrypt.c:1314
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
ipsec_sa_t::crypto_key_index
vnet_crypto_key_index_t crypto_key_index
Definition: ipsec_sa.h:138
esp_seq_advance
static int esp_seq_advance(ipsec_sa_t *sa)
Definition: esp.h:96
ipsec_per_thread_data_t::crypto_ops
vnet_crypto_op_t * crypto_ops
Definition: ipsec.h:100
ipsec_sa_t::dpo
dpo_id_t dpo
Definition: ipsec_sa.h:136
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
VLIB_REGISTER_NODE
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
flags
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105