FD.io VPP  v17.04.2-2-ga8f93f8
Vector Packet Processing
esp.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __ESP_H__
16 #define __ESP_H__
17 
18 #include <openssl/hmac.h>
19 #include <openssl/rand.h>
20 #include <openssl/evp.h>
21 
22 typedef struct
23 {
26  u8 data[0];
27 } esp_header_t;
28 
29 typedef struct
30 {
33 } esp_footer_t;
34 
35 /* *INDENT-OFF* */
36 typedef CLIB_PACKED (struct {
37  ip4_header_t ip4;
38  esp_header_t esp;
39 }) ip4_and_esp_header_t;
40 /* *INDENT-ON* */
41 
42 /* *INDENT-OFF* */
43 typedef CLIB_PACKED (struct {
44  ip6_header_t ip6;
45  esp_header_t esp;
46 }) ip6_and_esp_header_t;
47 /* *INDENT-ON* */
48 
49 typedef struct
50 {
51  const EVP_CIPHER *type;
53 
54 typedef struct
55 {
56  const EVP_MD *md;
59 
60 typedef struct
61 {
62  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
63  EVP_CIPHER_CTX encrypt_ctx;
64  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
65  EVP_CIPHER_CTX decrypt_ctx;
66  CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
67  HMAC_CTX hmac_ctx;
72 
73 typedef struct
74 {
78 } esp_main_t;
79 
81 
82 #define ESP_WINDOW_SIZE (64)
83 #define ESP_SEQ_MAX (4294967295UL)
84 
85 
86 always_inline int
88 {
89  u32 diff;
90 
91  if (PREDICT_TRUE (seq > sa->last_seq))
92  return 0;
93 
94  diff = sa->last_seq - seq;
95 
96  if (ESP_WINDOW_SIZE > diff)
97  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
98  else
99  return 1;
100 
101  return 0;
102 }
103 
104 always_inline int
106 {
107  u32 tl = sa->last_seq;
108  u32 th = sa->last_seq_hi;
109  u32 diff = tl - seq;
110 
111  if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1)))
112  {
113  if (seq >= (tl - ESP_WINDOW_SIZE + 1))
114  {
115  sa->seq_hi = th;
116  if (seq <= tl)
117  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
118  else
119  return 0;
120  }
121  else
122  {
123  sa->seq_hi = th + 1;
124  return 0;
125  }
126  }
127  else
128  {
129  if (seq >= (tl - ESP_WINDOW_SIZE + 1))
130  {
131  sa->seq_hi = th - 1;
132  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
133  }
134  else
135  {
136  sa->seq_hi = th;
137  if (seq <= tl)
138  return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
139  else
140  return 0;
141  }
142  }
143 
144  return 0;
145 }
146 
147 /* TODO seq increment should be atomic to be accessed by multiple workers */
148 always_inline void
150 {
151  u32 pos;
152 
153  if (seq > sa->last_seq)
154  {
155  pos = seq - sa->last_seq;
156  if (pos < ESP_WINDOW_SIZE)
157  sa->replay_window = ((sa->replay_window) << pos) | 1;
158  else
159  sa->replay_window = 1;
160  sa->last_seq = seq;
161  }
162  else
163  {
164  pos = sa->last_seq - seq;
165  sa->replay_window |= (1ULL << pos);
166  }
167 }
168 
169 always_inline void
171 {
172  int wrap = sa->seq_hi - sa->last_seq_hi;
173  u32 pos;
174 
175  if (wrap == 0 && seq > sa->last_seq)
176  {
177  pos = seq - sa->last_seq;
178  if (pos < ESP_WINDOW_SIZE)
179  sa->replay_window = ((sa->replay_window) << pos) | 1;
180  else
181  sa->replay_window = 1;
182  sa->last_seq = seq;
183  }
184  else if (wrap > 0)
185  {
186  pos = ~seq + sa->last_seq + 1;
187  if (pos < ESP_WINDOW_SIZE)
188  sa->replay_window = ((sa->replay_window) << pos) | 1;
189  else
190  sa->replay_window = 1;
191  sa->last_seq = seq;
192  sa->last_seq_hi = sa->seq_hi;
193  }
194  else if (wrap < 0)
195  {
196  pos = ~seq + sa->last_seq + 1;
197  sa->replay_window |= (1ULL << pos);
198  }
199  else
200  {
201  pos = sa->last_seq - seq;
202  sa->replay_window |= (1ULL << pos);
203  }
204 }
205 
206 always_inline int
208 {
209  if (PREDICT_TRUE (sa->use_esn))
210  {
211  if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
212  {
213  if (PREDICT_FALSE
214  (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
215  return 1;
216  sa->seq_hi++;
217  }
218  sa->seq++;
219  }
220  else
221  {
222  if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
223  return 1;
224  sa->seq++;
225  }
226 
227  return 0;
228 }
229 
230 always_inline void
232 {
233  esp_main_t *em = &esp_main;
235 
236  memset (em, 0, sizeof (em[0]));
237 
239  em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = EVP_aes_128_cbc ();
240  em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = EVP_aes_192_cbc ();
241  em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = EVP_aes_256_cbc ();
242 
245 
246  i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
247  i->md = EVP_sha1 ();
248  i->trunc_size = 12;
249 
250  i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
251  i->md = EVP_sha256 ();
252  i->trunc_size = 12;
253 
254  i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
255  i->md = EVP_sha256 ();
256  i->trunc_size = 16;
257 
258  i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
259  i->md = EVP_sha384 ();
260  i->trunc_size = 24;
261 
262  i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
263  i->md = EVP_sha512 ();
264  i->trunc_size = 32;
265 
268  int thread_id;
269 
270  for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++)
271  {
272  EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx));
273  EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx));
274  HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx));
275  }
276 }
277 
278 always_inline unsigned int
280  u8 * key,
281  int key_len,
282  u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi)
283 {
284  esp_main_t *em = &esp_main;
285  u32 cpu_index = os_get_cpu_number ();
286  HMAC_CTX *ctx = &(em->per_thread_data[cpu_index].hmac_ctx);
287  const EVP_MD *md = NULL;
288  unsigned int len;
289 
290  ASSERT (alg < IPSEC_INTEG_N_ALG);
291 
292  if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0))
293  return 0;
294 
295  if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_integ_alg))
296  {
297  md = em->esp_integ_algs[alg].md;
298  em->per_thread_data[cpu_index].last_integ_alg = alg;
299  }
300 
301  HMAC_Init (ctx, key, key_len, md);
302 
303  HMAC_Update (ctx, data, data_len);
304 
305  if (PREDICT_TRUE (use_esn))
306  HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi));
307  HMAC_Final (ctx, signature, &len);
308 
309  return em->esp_integ_algs[alg].trunc_size;
310 }
311 
312 #endif /* __ESP_H__ */
313 
314 /*
315  * fd.io coding-style-patch-verification: ON
316  *
317  * Local Variables:
318  * eval: (c-set-style "gnu")
319  * End:
320  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:436
ipsec_crypto_alg_t last_decrypt_alg
Definition: esp.h:69
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:68
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
static void esp_replay_advance(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:149
static unsigned int hmac_calc(ipsec_integ_alg_t alg, u8 *key, int key_len, u8 *data, int data_len, u8 *signature, u8 use_esn, u32 seq_hi)
Definition: esp.h:279
#define PREDICT_TRUE(x)
Definition: clib.h:98
#define NULL
Definition: clib.h:55
static int esp_replay_check(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:87
ipsec_integ_alg_t last_integ_alg
Definition: esp.h:70
const EVP_CIPHER * type
Definition: esp.h:51
esp_main_t esp_main
Definition: esp.h:80
esp_crypto_alg_t * esp_crypto_algs
Definition: esp.h:75
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:447
#define ESP_SEQ_MAX
Definition: esp.h:83
esp_integ_alg_t * esp_integ_algs
Definition: esp.h:76
static void esp_replay_advance_esn(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:170
u32 seq_hi
Definition: ipsec.h:126
u8 trunc_size
Definition: esp.h:57
u64 replay_window
Definition: ipsec.h:129
#define always_inline
Definition: clib.h:84
u8 use_esn
Definition: ipsec.h:114
typedef CLIB_PACKED(struct{ip4_header_t ip4;esp_header_t esp;}) ip4_and_esp_header_t
u32 last_seq
Definition: ipsec.h:127
ipsec_integ_alg_t
Definition: ipsec.h:86
EVP_CIPHER_CTX encrypt_ctx
Definition: esp.h:63
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
u32 last_seq_hi
Definition: ipsec.h:128
#define PREDICT_FALSE(x)
Definition: clib.h:97
static int esp_replay_check_esn(ipsec_sa_t *sa, u32 seq)
Definition: esp.h:105
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
ipsec_crypto_alg_t
Definition: ipsec.h:68
u32 seq
Definition: esp.h:25
u32 spi
Definition: esp.h:24
u32 seq
Definition: ipsec.h:125
ipsec_crypto_alg_t last_encrypt_alg
Definition: esp.h:68
unsigned char u8
Definition: types.h:56
#define ESP_WINDOW_SIZE
Definition: esp.h:82
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
Definition: esp.h:73
static int esp_seq_advance(ipsec_sa_t *sa)
Definition: esp.h:207
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u8 use_anti_replay
Definition: ipsec.h:115
static void esp_init()
Definition: esp.h:231
esp_main_per_thread_data_t * per_thread_data
Definition: esp.h:77
const EVP_MD * md
Definition: esp.h:56
EVP_CIPHER_CTX decrypt_ctx
Definition: esp.h:65