FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
ah_encrypt.c
Go to the documentation of this file.
1 /*
2  * ah_encrypt.c : IPSec AH encrypt node
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ah.h>
25 
26 #define foreach_ah_encrypt_next \
27  _ (DROP, "error-drop") \
28  _ (IP4_LOOKUP, "ip4-lookup") \
29  _ (IP6_LOOKUP, "ip6-lookup") \
30  _ (INTERFACE_OUTPUT, "interface-output")
31 
32 
33 #define _(v, s) AH_ENCRYPT_NEXT_##v,
34 typedef enum
35 {
37 #undef _
40 
41 #define foreach_ah_encrypt_error \
42  _(RX_PKTS, "AH pkts received") \
43  _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
44  _(SEQ_CYCLED, "sequence number cycled")
45 
46 
47 typedef enum
48 {
49 #define _(sym,str) AH_ENCRYPT_ERROR_##sym,
51 #undef _
54 
55 static char *ah_encrypt_error_strings[] = {
56 #define _(sym,string) string,
58 #undef _
59 };
60 
61 typedef struct
62 {
69 
70 /* packet trace format function */
71 static u8 *
72 format_ah_encrypt_trace (u8 * s, va_list * args)
73 {
74  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76  ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *);
77 
78  s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %u:%u integrity %U",
79  t->sa_index, t->spi, t->spi, t->seq_hi, t->seq_lo,
81  return s;
82 }
83 
86  vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
87 {
88  u32 n_fail, n_ops = vec_len (ops);
89  vnet_crypto_op_t *op = ops;
90 
91  if (n_ops == 0)
92  return;
93 
94  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
95 
96  while (n_fail)
97  {
98  ASSERT (op - ops < n_ops);
99 
100  if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
101  {
102  u32 bi = op->user_data;
103  b[bi]->error = node->errors[AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
104  nexts[bi] = AH_ENCRYPT_NEXT_DROP;
105  n_fail--;
106  }
107  op++;
108  }
109 }
110 
111 typedef struct
112 {
113  union
114  {
115  struct
116  {
119  };
120 
121  struct
122  {
125  };
126  };
131 
134  vlib_node_runtime_t * node, vlib_frame_t * frame,
135  int is_ip6)
136 {
137  u32 n_left, *from, thread_index;
138  int icv_size = 0;
139  from = vlib_frame_vector_args (frame);
140  n_left = frame->n_vectors;
141  ipsec_main_t *im = &ipsec_main;
142  ah_encrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
143  thread_index = vm->thread_index;
144  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
145  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
146  ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
147  ipsec_sa_t *sa0 = 0;
148  ip4_and_ah_header_t *ih0, *oh0 = 0;
149  ip6_and_ah_header_t *ih6_0, *oh6_0 = 0;
150  u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
151  const static ip4_header_t ip4_hdr_template = {
153  .protocol = IP_PROTOCOL_IPSEC_AH,
154  };
155  const static ip6_header_t ip6_hdr_template = {
157  .protocol = IP_PROTOCOL_IPSEC_AH,
158  };
159 
160  clib_memset (pkt_data, 0, VLIB_FRAME_SIZE * sizeof (pkt_data[0]));
161  vlib_get_buffers (vm, from, b, n_left);
164 
165  while (n_left > 0)
166  {
167  u8 ip_hdr_size;
168  u8 next_hdr_type;
169 
170  if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
171  {
172  if (current_sa_index != ~0)
174  current_sa_index,
175  current_sa_pkts,
176  current_sa_bytes);
177  current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
178  sa0 = pool_elt_at_index (im->sad, current_sa_index);
179 
180  current_sa_bytes = current_sa_pkts = 0;
181  }
182 
183  pd->sa_index = current_sa_index;
184  next[0] = AH_ENCRYPT_NEXT_DROP;
185 
186  if (PREDICT_FALSE (esp_seq_advance (sa0)))
187  {
188  b[0]->error = node->errors[AH_ENCRYPT_ERROR_SEQ_CYCLED];
189  pd->skip = 1;
190  goto next;
191  }
192 
193  current_sa_pkts += 1;
194  current_sa_bytes += b[0]->current_length;
195 
196  ssize_t adv;
197  ih0 = vlib_buffer_get_current (b[0]);
198  pd->ttl = ih0->ip4.ttl;
199  pd->tos = ih0->ip4.tos;
200 
201  if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
202  {
203  if (is_ip6)
204  adv = -sizeof (ip6_and_ah_header_t);
205  else
206  adv = -sizeof (ip4_and_ah_header_t);
207  }
208  else
209  {
210  adv = -sizeof (ah_header_t);
211  }
212 
213  icv_size = sa0->integ_icv_size;
214  const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6);
215  adv -= padding_len;
216  /* transport mode save the eth header before it is overwritten */
217  if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0)))
218  {
219  const u32 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
220  u8 *l2_hdr_in = (u8 *) vlib_buffer_get_current (b[0]) - l2_len;
221 
222  u8 *l2_hdr_out = l2_hdr_in + adv - icv_size;
223 
224  clib_memcpy_le32 (l2_hdr_out, l2_hdr_in, l2_len);
225  }
226 
227  vlib_buffer_advance (b[0], adv - icv_size);
228 
229  if (is_ip6)
230  {
231  ih6_0 = (ip6_and_ah_header_t *) ih0;
232  ip_hdr_size = sizeof (ip6_header_t);
233  oh6_0 = vlib_buffer_get_current (b[0]);
234  pd->current_data = b[0]->current_data;
235 
236  pd->hop_limit = ih6_0->ip6.hop_limit;
238  ih6_0->ip6.ip_version_traffic_class_and_flow_label;
239  if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
240  {
241  next_hdr_type = IP_PROTOCOL_IPV6;
242  }
243  else
244  {
245  next_hdr_type = ih6_0->ip6.protocol;
246  memmove (oh6_0, ih6_0, sizeof (ip6_header_t));
247  }
248 
249  clib_memcpy_fast (&oh6_0->ip6, &ip6_hdr_template, 8);
250  oh6_0->ah.reserved = 0;
251  oh6_0->ah.nexthdr = next_hdr_type;
252  oh6_0->ah.spi = clib_net_to_host_u32 (sa0->spi);
253  oh6_0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
254  oh6_0->ip6.payload_length =
255  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
256  sizeof (ip6_header_t));
257  oh6_0->ah.hdrlen =
258  (sizeof (ah_header_t) + icv_size + padding_len) / 4 - 2;
259  }
260  else
261  {
262  ip_hdr_size = sizeof (ip4_header_t);
263  oh0 = vlib_buffer_get_current (b[0]);
264  clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
265  pd->current_data = b[0]->current_data;
266 
267  if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
268  {
269  next_hdr_type = IP_PROTOCOL_IP_IN_IP;
270  }
271  else
272  {
273  next_hdr_type = ih0->ip4.protocol;
274  memmove (oh0, ih0, sizeof (ip4_header_t));
275  }
276 
277  clib_memcpy_fast (&oh0->ip4, &ip4_hdr_template,
278  sizeof (ip4_header_t) -
279  sizeof (ip4_address_pair_t));
280 
281  oh0->ip4.length =
282  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
283  oh0->ah.spi = clib_net_to_host_u32 (sa0->spi);
284  oh0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
285  oh0->ah.nexthdr = next_hdr_type;
286  oh0->ah.hdrlen =
287  (sizeof (ah_header_t) + icv_size + padding_len) / 4 - 2;
288  }
289 
290  if (PREDICT_TRUE (!is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
291  !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
292  {
293  clib_memcpy_fast (&oh0->ip4.address_pair,
294  &sa0->ip4_hdr.address_pair,
295  sizeof (ip4_address_t));
296 
297  next[0] = sa0->dpo.dpoi_next_node;
298  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
299  }
300  else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
301  ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
302  {
303  clib_memcpy_fast (&oh6_0->ip6.src_address,
304  &sa0->ip6_hdr.src_address,
305  sizeof (ip6_address_t) * 2);
306  next[0] = sa0->dpo.dpoi_next_node;
307  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
308  }
309 
310  if (PREDICT_TRUE (sa0->integ_op_id))
311  {
312  vnet_crypto_op_t *op;
314  vnet_crypto_op_init (op, sa0->integ_op_id);
315  op->src = vlib_buffer_get_current (b[0]);
316  op->len = b[0]->current_length;
317  op->digest = vlib_buffer_get_current (b[0]) + ip_hdr_size +
318  sizeof (ah_header_t);
319  clib_memset (op->digest, 0, icv_size);
320  op->digest_len = icv_size;
321  op->key_index = sa0->integ_key_index;
322  op->user_data = b - bufs;
323  if (ipsec_sa_is_set_USE_ESN (sa0))
324  {
325  u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
326 
327  op->len += sizeof (seq_hi);
328  clib_memcpy (op->src + b[0]->current_length, &seq_hi,
329  sizeof (seq_hi));
330  }
331  }
332 
333  if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
334  {
335  next[0] = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT;
336  vlib_buffer_advance (b[0], -sizeof (ethernet_header_t));
337  }
338 
339  next:
340  n_left -= 1;
341  next += 1;
342  pd += 1;
343  b += 1;
344  }
345 
346  n_left = frame->n_vectors;
347  next = nexts;
348  pd = pkt_data;
349  b = bufs;
350 
352  AH_ENCRYPT_ERROR_RX_PKTS, n_left);
354  current_sa_index, current_sa_pkts,
355  current_sa_bytes);
356 
357  ah_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
358 
359  while (n_left)
360  {
361  if (pd->skip)
362  goto trace;
363 
364  if (is_ip6)
365  {
366  oh6_0 = (ip6_and_ah_header_t *) (b[0]->data + pd->current_data);
367  oh6_0->ip6.hop_limit = pd->hop_limit;
368  oh6_0->ip6.ip_version_traffic_class_and_flow_label =
370  }
371  else
372  {
373  oh0 = (ip4_and_ah_header_t *) (b[0]->data + pd->current_data);
374  oh0->ip4.ttl = pd->ttl;
375  oh0->ip4.tos = pd->tos;
376  oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
377  }
378 
379  trace:
380  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
381  {
382  sa0 = vec_elt_at_index (im->sad, pd->sa_index);
383  ah_encrypt_trace_t *tr =
384  vlib_add_trace (vm, node, b[0], sizeof (*tr));
385  tr->spi = sa0->spi;
386  tr->seq_lo = sa0->seq;
387  tr->seq_hi = sa0->seq_hi;
388  tr->integ_alg = sa0->integ_alg;
389  tr->sa_index = pd->sa_index;
390  }
391 
392  n_left -= 1;
393  next += 1;
394  pd += 1;
395  b += 1;
396  }
397 
398  n_left = frame->n_vectors;
399  vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
400 
401  return n_left;
402 }
403 
405  vlib_node_runtime_t * node,
406  vlib_frame_t * from_frame)
407 {
408  return ah_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
409 }
410 
411 /* *INDENT-OFF* */
413  .name = "ah4-encrypt",
414  .vector_size = sizeof (u32),
415  .format_trace = format_ah_encrypt_trace,
417 
418  .n_errors = ARRAY_LEN(ah_encrypt_error_strings),
419  .error_strings = ah_encrypt_error_strings,
420 
421  .n_next_nodes = AH_ENCRYPT_N_NEXT,
422  .next_nodes = {
423 #define _(s,n) [AH_ENCRYPT_NEXT_##s] = n,
425 #undef _
426  },
427 };
428 /* *INDENT-ON* */
429 
431  vlib_node_runtime_t * node,
432  vlib_frame_t * from_frame)
433 {
434  return ah_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
435 }
436 
437 /* *INDENT-OFF* */
439  .name = "ah6-encrypt",
440  .vector_size = sizeof (u32),
441  .format_trace = format_ah_encrypt_trace,
443 
444  .n_errors = ARRAY_LEN(ah_encrypt_error_strings),
445  .error_strings = ah_encrypt_error_strings,
446 
447  .n_next_nodes = AH_ENCRYPT_N_NEXT,
448  .next_nodes = {
449 #define _(s,n) [AH_ENCRYPT_NEXT_##s] = n,
451 #undef _
452  },
453 };
454 /* *INDENT-ON* */
455 
456 /*
457  * fd.io coding-style-patch-verification: ON
458  *
459  * Local Variables:
460  * eval: (c-set-style "gnu")
461  * End:
462  */
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
Definition: crypto.c:46
u8 * format_ipsec_integ_alg(u8 *s, va_list *args)
Definition: ipsec_format.c:110
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:889
u32 flags
Definition: vhost_user.h:141
#define CLIB_UNUSED(x)
Definition: clib.h:83
ipsec_per_thread_data_t * ptd
Definition: ipsec.h:169
vnet_crypto_op_t * integ_ops
Definition: ipsec.h:89
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
#define PREDICT_TRUE(x)
Definition: clib.h:113
ipsec_integ_alg_t
Definition: ipsec_sa.h:58
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:572
ipsec_integ_alg_t integ_alg
Definition: ipsec_sa.h:150
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
vnet_crypto_op_t * crypto_ops
Definition: ipsec.h:88
vlib_node_registration_t ah6_encrypt_node
(constructor) VLIB_REGISTER_NODE (ah6_encrypt_node)
Definition: ah_encrypt.c:438
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u8 data[128]
Definition: ipsec.api:251
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
vnet_crypto_op_id_t integ_op_id
Definition: ipsec_sa.h:129
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
ip6_address_t src_address
Definition: ip6_packet.h:383
static uword ah_encrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip6)
Definition: ah_encrypt.c:133
unsigned char u8
Definition: types.h:56
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u32 seq_hi
Definition: ipsec_sa.h:119
#define clib_memcpy(d, s, n)
Definition: string.h:180
static int esp_seq_advance(ipsec_sa_t *sa)
Definition: esp.h:84
#define static_always_inline
Definition: clib.h:100
ipsec_main_t ipsec_main
Definition: ipsec.c:28
#define always_inline
Definition: clib.h:99
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
Definition: crypto.h:221
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:378
ipsec_integ_alg_t integ_alg
Definition: ah_encrypt.c:67
static u8 ah_calc_icv_padding_len(u8 icv_size, int is_ipv6)
Definition: ah.h:47
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
uword user_data
Definition: crypto.h:143
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
ip4_address_pair_t address_pair
Definition: ip4_packet.h:172
#define PREDICT_FALSE(x)
Definition: clib.h:112
ah_encrypt_next_t
Definition: ah_encrypt.c:34
vlib_node_registration_t ah4_encrypt_node
(constructor) VLIB_REGISTER_NODE (ah4_encrypt_node)
Definition: ah_encrypt.c:412
u32 node_index
Node index.
Definition: node.h:496
u32 ip_version_traffic_class_and_flow_label
Definition: ah_encrypt.c:118
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
static u8 * format_ah_encrypt_trace(u8 *s, va_list *args)
Definition: ah_encrypt.c:72
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:397
vlib_main_t * vm
Definition: buffer.c:323
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
static char * ah_encrypt_error_strings[]
Definition: ah_encrypt.c:55
#define ARRAY_LEN(x)
Definition: clib.h:63
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
Definition: ipsec_sa.c:27
#define ASSERT(truth)
static_always_inline void ah_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts)
Definition: ah_encrypt.c:85
#define foreach_ah_encrypt_error
Definition: ah_encrypt.c:41
ip6_header_t ip6_hdr
Definition: ipsec_sa.h:137
ipsec_sa_t * sad
Definition: ipsec.h:97
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
vnet_crypto_key_index_t integ_key_index
Definition: ipsec_sa.h:126
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:370
Definition: defs.h:47
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:186
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
vnet_crypto_op_status_t status
Definition: crypto.h:129
#define vnet_buffer(b)
Definition: buffer.h:365
dpo_id_t dpo
Definition: ipsec_sa.h:123
ip4_header_t ip4_hdr
Definition: ipsec_sa.h:136
Definition: ah.h:21
ah_encrypt_error_t
Definition: ah_encrypt.c:47
#define foreach_ah_encrypt_next
Definition: ah_encrypt.c:26
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:182
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u8 integ_icv_size
Definition: ipsec_sa.h:116
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
static_always_inline void clib_memcpy_le32(u8 *dst, u8 *src, u8 len)
Definition: string.h:289
signed short i16
Definition: types.h:46