FD.io VPP  v18.10-34-gcce845e
Vector Packet Processing
esp.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __ESP_H__
16 #define __ESP_H__
17 
18 #include <vnet/ip/ip.h>
19 #include <vnet/ipsec/ipsec.h>
20 
21 #include <openssl/hmac.h>
22 #include <openssl/rand.h>
23 #include <openssl/evp.h>
24 
25 typedef struct
26 {
29  u8 data[0];
30 } esp_header_t;
31 
32 typedef struct
33 {
36 } esp_footer_t;
37 
38 /* *INDENT-OFF* */
39 typedef CLIB_PACKED (struct {
40  ip4_header_t ip4;
41  esp_header_t esp;
42 }) ip4_and_esp_header_t;
43 /* *INDENT-ON* */
44 
45 /* *INDENT-OFF* */
46 typedef CLIB_PACKED (struct {
47  ip4_header_t ip4;
48  udp_header_t udp;
49  esp_header_t esp;
50 }) ip4_and_udp_and_esp_header_t;
51 /* *INDENT-ON* */
52 
53 /* *INDENT-OFF* */
54 typedef CLIB_PACKED (struct {
55  ip6_header_t ip6;
56  esp_header_t esp;
57 }) ip6_and_esp_header_t;
58 /* *INDENT-ON* */
59 
60 typedef struct
61 {
62  const EVP_CIPHER *type;
66 
67 typedef struct
68 {
69  const EVP_MD *md;
72 
73 typedef struct
74 {
75  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
76 #if OPENSSL_VERSION_NUMBER >= 0x10100000L
77  EVP_CIPHER_CTX *encrypt_ctx;
78 #else
79  EVP_CIPHER_CTX encrypt_ctx;
80 #endif
81  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
82 #if OPENSSL_VERSION_NUMBER >= 0x10100000L
83  EVP_CIPHER_CTX *decrypt_ctx;
84 #else
85  EVP_CIPHER_CTX decrypt_ctx;
86 #endif
87  CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
88 #if OPENSSL_VERSION_NUMBER >= 0x10100000L
89  HMAC_CTX *hmac_ctx;
90 #else
91  HMAC_CTX hmac_ctx;
92 #endif
97 
98 typedef struct
99 {
104 
106 
107 #define ESP_WINDOW_SIZE (64)
108 #define ESP_SEQ_MAX (4294967295UL)
109 
110 u8 *format_esp_header (u8 * s, va_list * args);
111 
112 always_inline int
114 {
115  u32 diff;
116 
117  if (PREDICT_TRUE (seq > sa->last_seq))
118  return 0;
119 
120  diff = sa->last_seq - seq;
121 
122  if (ESP_WINDOW_SIZE > diff)
123  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
124  else
125  return 1;
126 
127  return 0;
128 }
129 
130 always_inline int
132 {
133  u32 tl = sa->last_seq;
134  u32 th = sa->last_seq_hi;
135  u32 diff = tl - seq;
136 
137  if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1)))
138  {
139  if (seq >= (tl - ESP_WINDOW_SIZE + 1))
140  {
141  sa->seq_hi = th;
142  if (seq <= tl)
143  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
144  else
145  return 0;
146  }
147  else
148  {
149  sa->seq_hi = th + 1;
150  return 0;
151  }
152  }
153  else
154  {
155  if (seq >= (tl - ESP_WINDOW_SIZE + 1))
156  {
157  sa->seq_hi = th - 1;
158  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
159  }
160  else
161  {
162  sa->seq_hi = th;
163  if (seq <= tl)
164  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
165  else
166  return 0;
167  }
168  }
169 
170  return 0;
171 }
172 
173 /* TODO seq increment should be atomic to be accessed by multiple workers */
174 always_inline void
176 {
177  u32 pos;
178 
179  if (seq > sa->last_seq)
180  {
181  pos = seq - sa->last_seq;
182  if (pos < ESP_WINDOW_SIZE)
183  sa->replay_window = ((sa->replay_window) << pos) | 1;
184  else
185  sa->replay_window = 1;
186  sa->last_seq = seq;
187  }
188  else
189  {
190  pos = sa->last_seq - seq;
191  sa->replay_window |= (1ULL << pos);
192  }
193 }
194 
195 always_inline void
197 {
198  int wrap = sa->seq_hi - sa->last_seq_hi;
199  u32 pos;
200 
201  if (wrap == 0 && seq > sa->last_seq)
202  {
203  pos = seq - sa->last_seq;
204  if (pos < ESP_WINDOW_SIZE)
205  sa->replay_window = ((sa->replay_window) << pos) | 1;
206  else
207  sa->replay_window = 1;
208  sa->last_seq = seq;
209  }
210  else if (wrap > 0)
211  {
212  pos = ~seq + sa->last_seq + 1;
213  if (pos < ESP_WINDOW_SIZE)
214  sa->replay_window = ((sa->replay_window) << pos) | 1;
215  else
216  sa->replay_window = 1;
217  sa->last_seq = seq;
218  sa->last_seq_hi = sa->seq_hi;
219  }
220  else if (wrap < 0)
221  {
222  pos = ~seq + sa->last_seq + 1;
223  sa->replay_window |= (1ULL << pos);
224  }
225  else
226  {
227  pos = sa->last_seq - seq;
228  sa->replay_window |= (1ULL << pos);
229  }
230 }
231 
232 always_inline int
234 {
235  if (PREDICT_TRUE (sa->use_esn))
236  {
237  if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
238  {
239  if (PREDICT_FALSE
240  (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
241  return 1;
242  sa->seq_hi++;
243  }
244  sa->seq++;
245  }
246  else
247  {
248  if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
249  return 1;
250  sa->seq++;
251  }
252 
253  return 0;
254 }
255 
256 always_inline void
258 {
261 
262  memset (em, 0, sizeof (em[0]));
263 
265  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type =
266  EVP_aes_128_cbc ();
267  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type =
268  EVP_aes_192_cbc ();
269  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type =
270  EVP_aes_256_cbc ();
271  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].iv_size = 16;
272  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].iv_size = 16;
273  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].iv_size = 16;
274  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].block_size =
275  16;
276  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].block_size =
277  16;
278  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].block_size =
279  16;
280  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_DES_CBC].type =
281  EVP_des_cbc ();
282  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_3DES_CBC].type =
283  EVP_des_ede3_cbc ();
284  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_DES_CBC].block_size = 8;
285  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_3DES_CBC].block_size = 8;
286  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_DES_CBC].iv_size = 8;
287  em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_3DES_CBC].iv_size = 8;
288 
291 
292  i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
293  i->md = EVP_sha1 ();
294  i->trunc_size = 12;
295 
296  i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
297  i->md = EVP_sha256 ();
298  i->trunc_size = 12;
299 
300  i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
301  i->md = EVP_sha256 ();
302  i->trunc_size = 16;
303 
304  i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
305  i->md = EVP_sha384 ();
306  i->trunc_size = 24;
307 
308  i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
309  i->md = EVP_sha512 ();
310  i->trunc_size = 32;
311 
314  int thread_id;
315 
316  for (thread_id = 0; thread_id < tm->n_vlib_mains; thread_id++)
317  {
318 #if OPENSSL_VERSION_NUMBER >= 0x10100000L
319  em->per_thread_data[thread_id].encrypt_ctx = EVP_CIPHER_CTX_new ();
320  em->per_thread_data[thread_id].decrypt_ctx = EVP_CIPHER_CTX_new ();
321  em->per_thread_data[thread_id].hmac_ctx = HMAC_CTX_new ();
322 #else
323  EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx));
324  EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx));
325  HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx));
326 #endif
327  }
328 }
329 
330 always_inline unsigned int
332  u8 * key,
333  int key_len,
334  u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi)
335 {
337  u32 thread_index = vlib_get_thread_index ();
338 #if OPENSSL_VERSION_NUMBER >= 0x10100000L
339  HMAC_CTX *ctx = em->per_thread_data[thread_index].hmac_ctx;
340 #else
341  HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx);
342 #endif
343  const EVP_MD *md = NULL;
344  unsigned int len;
345 
346  ASSERT (alg < IPSEC_INTEG_N_ALG);
347 
348  if (PREDICT_FALSE (em->ipsec_proto_main_integ_algs[alg].md == 0))
349  return 0;
350 
351  if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg))
352  {
353  md = em->ipsec_proto_main_integ_algs[alg].md;
354  em->per_thread_data[thread_index].last_integ_alg = alg;
355  }
356 
357  HMAC_Init_ex (ctx, key, key_len, md, NULL);
358 
359  HMAC_Update (ctx, data, data_len);
360 
361  if (PREDICT_TRUE (use_esn))
362  HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi));
363  HMAC_Final (ctx, signature, &len);
364 
365  return em->ipsec_proto_main_integ_algs[alg].trunc_size;
366 }
367 
368 #endif /* __ESP_H__ */
369 
370 /*
371  * fd.io coding-style-patch-verification: ON
372  *
373  * Local Variables:
374  * eval: (c-set-style "gnu")
375  * End:
376  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:437
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
ipsec_proto_main_integ_alg_t * ipsec_proto_main_integ_algs
Definition: esp.h:101
#define PREDICT_TRUE(x)
Definition: clib.h:108
static void esp_replay_advance(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:175
#define NULL
Definition: clib.h:57
ipsec_crypto_alg_t last_decrypt_alg
Definition: esp.h:94
static unsigned int hmac_calc(ipsec_integ_alg_t alg, u8 *key, int key_len, u8 *data, int data_len, u8 *signature, u8 use_esn, u32 seq_hi)
Definition: esp.h:331
int i
const EVP_CIPHER * type
Definition: esp.h:62
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:448
ipsec_proto_main_crypto_alg_t * ipsec_proto_main_crypto_algs
Definition: esp.h:100
unsigned char u8
Definition: types.h:56
u32 seq_hi
Definition: ipsec.h:138
static int esp_seq_advance(ipsec_sa_t *sa)
Definition: esp.h:233
u64 replay_window
Definition: ipsec.h:141
memset(h->entries, 0, sizeof(h->entries[0])*entries)
static void esp_replay_advance_esn(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:196
#define always_inline
Definition: clib.h:94
u8 use_esn
Definition: ipsec.h:125
unsigned int u32
Definition: types.h:88
u32 last_seq
Definition: ipsec.h:139
ipsec_proto_main_per_thread_data_t * per_thread_data
Definition: esp.h:102
ipsec_integ_alg_t
Definition: ipsec.h:97
static int esp_replay_check_esn(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:131
long ctx[MAX_CONNS]
Definition: main.c:144
u32 last_seq_hi
Definition: ipsec.h:140
#define PREDICT_FALSE(x)
Definition: clib.h:107
ipsec_crypto_alg_t last_encrypt_alg
Definition: esp.h:93
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:211
EVP_CIPHER_CTX decrypt_ctx
Definition: esp.h:85
u8 * format_esp_header(u8 *s, va_list *args)
Definition: esp_format.c:23
#define ASSERT(truth)
static int esp_replay_check(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:113
ipsec_crypto_alg_t
Definition: ipsec.h:80
u32 seq
Definition: esp.h:28
EVP_CIPHER_CTX encrypt_ctx
Definition: esp.h:79
#define ESP_SEQ_MAX
Definition: esp.h:108
u32 spi
Definition: esp.h:27
u32 seq
Definition: ipsec.h:137
static void ipsec_proto_init()
Definition: esp.h:257
ipsec_integ_alg_t last_integ_alg
Definition: esp.h:95
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
const EVP_MD * md
Definition: esp.h:69
ipsec_proto_main_t ipsec_proto_main
Definition: esp_encrypt.c:26
#define ESP_WINDOW_SIZE
Definition: esp.h:107
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
typedef CLIB_PACKED(struct{ip4_header_t ip4;esp_header_t esp;}) ip4_and_esp_header_t
u8 use_anti_replay
Definition: ipsec.h:126