diff options
author | Steffen Klassert <steffen.klassert@secunet.com> | 2011-03-07 19:07:14 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-13 23:22:29 -0400 |
commit | 0dc49e9b28a7253ff05be2794d747f8ea5f1f423 (patch) | |
tree | 9cb4574b77949df1cf2f9cc0015e0751b59ad0aa /net/ipv4/esp4.c | |
parent | 1ce3644ade9c865c755bf0f6a4e109b7bb6eb60f (diff) |
esp4: Add support for IPsec extended sequence numbers
This patch adds IPsec extended sequence numbers support to esp4.
We use the authencesn crypto algorithm to handle esp with separate
encryption/authentication algorithms.
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/esp4.c')
-rw-r--r-- | net/ipv4/esp4.c | 100 |
1 files changed, 82 insertions, 18 deletions
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 882dbbb7d799..03f994bcf7de 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -33,11 +33,14 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); | |||
33 | * | 33 | * |
34 | * TODO: Use spare space in skb for this where possible. | 34 | * TODO: Use spare space in skb for this where possible. |
35 | */ | 35 | */ |
36 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) | 36 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) |
37 | { | 37 | { |
38 | unsigned int len; | 38 | unsigned int len; |
39 | 39 | ||
40 | len = crypto_aead_ivsize(aead); | 40 | len = seqhilen; |
41 | |||
42 | len += crypto_aead_ivsize(aead); | ||
43 | |||
41 | if (len) { | 44 | if (len) { |
42 | len += crypto_aead_alignmask(aead) & | 45 | len += crypto_aead_alignmask(aead) & |
43 | ~(crypto_tfm_ctx_alignment() - 1); | 46 | ~(crypto_tfm_ctx_alignment() - 1); |
@@ -52,10 +55,15 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) | |||
52 | return kmalloc(len, GFP_ATOMIC); | 55 | return kmalloc(len, GFP_ATOMIC); |
53 | } | 56 | } |
54 | 57 | ||
55 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) | 58 | static inline __be32 *esp_tmp_seqhi(void *tmp) |
59 | { | ||
60 | return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); | ||
61 | } | ||
62 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) | ||
56 | { | 63 | { |
57 | return crypto_aead_ivsize(aead) ? | 64 | return crypto_aead_ivsize(aead) ? |
58 | PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; | 65 | PTR_ALIGN((u8 *)tmp + seqhilen, |
66 | crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; | ||
59 | } | 67 | } |
60 | 68 | ||
61 | static inline struct aead_givcrypt_request *esp_tmp_givreq( | 69 | static inline struct aead_givcrypt_request *esp_tmp_givreq( |
@@ -122,6 +130,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
122 | int plen; | 130 | int plen; |
123 | int tfclen; | 131 | int tfclen; |
124 | int nfrags; | 132 | int nfrags; |
133 | int assoclen; | ||
134 | int sglists; | ||
135 | int seqhilen; | ||
136 | __be32 *seqhi; | ||
125 | 137 | ||
126 | /* skb is pure payload to encrypt */ | 138 | /* skb is pure payload to encrypt */ |
127 | 139 | ||
@@ -151,14 +163,25 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
151 | goto error; | 163 | goto error; |
152 | nfrags = err; | 164 | nfrags = err; |
153 | 165 | ||
154 | tmp = esp_alloc_tmp(aead, nfrags + 1); | 166 | assoclen = sizeof(*esph); |
167 | sglists = 1; | ||
168 | seqhilen = 0; | ||
169 | |||
170 | if (x->props.flags & XFRM_STATE_ESN) { | ||
171 | sglists += 2; | ||
172 | seqhilen += sizeof(__be32); | ||
173 | assoclen += seqhilen; | ||
174 | } | ||
175 | |||
176 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | ||
155 | if (!tmp) | 177 | if (!tmp) |
156 | goto error; | 178 | goto error; |
157 | 179 | ||
158 | iv = esp_tmp_iv(aead, tmp); | 180 | seqhi = esp_tmp_seqhi(tmp); |
181 | iv = esp_tmp_iv(aead, tmp, seqhilen); | ||
159 | req = esp_tmp_givreq(aead, iv); | 182 | req = esp_tmp_givreq(aead, iv); |
160 | asg = esp_givreq_sg(aead, req); | 183 | asg = esp_givreq_sg(aead, req); |
161 | sg = asg + 1; | 184 | sg = asg + sglists; |
162 | 185 | ||
163 | /* Fill padding... */ | 186 | /* Fill padding... */ |
164 | tail = skb_tail_pointer(trailer); | 187 | tail = skb_tail_pointer(trailer); |
@@ -221,11 +244,19 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
221 | skb_to_sgvec(skb, sg, | 244 | skb_to_sgvec(skb, sg, |
222 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, | 245 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, |
223 | clen + alen); | 246 | clen + alen); |
224 | sg_init_one(asg, esph, sizeof(*esph)); | 247 | |
248 | if ((x->props.flags & XFRM_STATE_ESN)) { | ||
249 | sg_init_table(asg, 3); | ||
250 | sg_set_buf(asg, &esph->spi, sizeof(__be32)); | ||
251 | *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); | ||
252 | sg_set_buf(asg + 1, seqhi, seqhilen); | ||
253 | sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); | ||
254 | } else | ||
255 | sg_init_one(asg, esph, sizeof(*esph)); | ||
225 | 256 | ||
226 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); | 257 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); |
227 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); | 258 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); |
228 | aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); | 259 | aead_givcrypt_set_assoc(req, asg, assoclen); |
229 | aead_givcrypt_set_giv(req, esph->enc_data, | 260 | aead_givcrypt_set_giv(req, esph->enc_data, |
230 | XFRM_SKB_CB(skb)->seq.output.low); | 261 | XFRM_SKB_CB(skb)->seq.output.low); |
231 | 262 | ||
@@ -346,6 +377,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
346 | struct sk_buff *trailer; | 377 | struct sk_buff *trailer; |
347 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); | 378 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); |
348 | int nfrags; | 379 | int nfrags; |
380 | int assoclen; | ||
381 | int sglists; | ||
382 | int seqhilen; | ||
383 | __be32 *seqhi; | ||
349 | void *tmp; | 384 | void *tmp; |
350 | u8 *iv; | 385 | u8 *iv; |
351 | struct scatterlist *sg; | 386 | struct scatterlist *sg; |
@@ -362,16 +397,27 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
362 | goto out; | 397 | goto out; |
363 | nfrags = err; | 398 | nfrags = err; |
364 | 399 | ||
400 | assoclen = sizeof(*esph); | ||
401 | sglists = 1; | ||
402 | seqhilen = 0; | ||
403 | |||
404 | if (x->props.flags & XFRM_STATE_ESN) { | ||
405 | sglists += 2; | ||
406 | seqhilen += sizeof(__be32); | ||
407 | assoclen += seqhilen; | ||
408 | } | ||
409 | |||
365 | err = -ENOMEM; | 410 | err = -ENOMEM; |
366 | tmp = esp_alloc_tmp(aead, nfrags + 1); | 411 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); |
367 | if (!tmp) | 412 | if (!tmp) |
368 | goto out; | 413 | goto out; |
369 | 414 | ||
370 | ESP_SKB_CB(skb)->tmp = tmp; | 415 | ESP_SKB_CB(skb)->tmp = tmp; |
371 | iv = esp_tmp_iv(aead, tmp); | 416 | seqhi = esp_tmp_seqhi(tmp); |
417 | iv = esp_tmp_iv(aead, tmp, seqhilen); | ||
372 | req = esp_tmp_req(aead, iv); | 418 | req = esp_tmp_req(aead, iv); |
373 | asg = esp_req_sg(aead, req); | 419 | asg = esp_req_sg(aead, req); |
374 | sg = asg + 1; | 420 | sg = asg + sglists; |
375 | 421 | ||
376 | skb->ip_summed = CHECKSUM_NONE; | 422 | skb->ip_summed = CHECKSUM_NONE; |
377 | 423 | ||
@@ -382,11 +428,19 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
382 | 428 | ||
383 | sg_init_table(sg, nfrags); | 429 | sg_init_table(sg, nfrags); |
384 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); | 430 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); |
385 | sg_init_one(asg, esph, sizeof(*esph)); | 431 | |
432 | if ((x->props.flags & XFRM_STATE_ESN)) { | ||
433 | sg_init_table(asg, 3); | ||
434 | sg_set_buf(asg, &esph->spi, sizeof(__be32)); | ||
435 | *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; | ||
436 | sg_set_buf(asg + 1, seqhi, seqhilen); | ||
437 | sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); | ||
438 | } else | ||
439 | sg_init_one(asg, esph, sizeof(*esph)); | ||
386 | 440 | ||
387 | aead_request_set_callback(req, 0, esp_input_done, skb); | 441 | aead_request_set_callback(req, 0, esp_input_done, skb); |
388 | aead_request_set_crypt(req, sg, sg, elen, iv); | 442 | aead_request_set_crypt(req, sg, sg, elen, iv); |
389 | aead_request_set_assoc(req, asg, sizeof(*esph)); | 443 | aead_request_set_assoc(req, asg, assoclen); |
390 | 444 | ||
391 | err = crypto_aead_decrypt(req); | 445 | err = crypto_aead_decrypt(req); |
392 | if (err == -EINPROGRESS) | 446 | if (err == -EINPROGRESS) |
@@ -500,10 +554,20 @@ static int esp_init_authenc(struct xfrm_state *x) | |||
500 | goto error; | 554 | goto error; |
501 | 555 | ||
502 | err = -ENAMETOOLONG; | 556 | err = -ENAMETOOLONG; |
503 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", | 557 | |
504 | x->aalg ? x->aalg->alg_name : "digest_null", | 558 | if ((x->props.flags & XFRM_STATE_ESN)) { |
505 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | 559 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, |
506 | goto error; | 560 | "authencesn(%s,%s)", |
561 | x->aalg ? x->aalg->alg_name : "digest_null", | ||
562 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
563 | goto error; | ||
564 | } else { | ||
565 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, | ||
566 | "authenc(%s,%s)", | ||
567 | x->aalg ? x->aalg->alg_name : "digest_null", | ||
568 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
569 | goto error; | ||
570 | } | ||
507 | 571 | ||
508 | aead = crypto_alloc_aead(authenc_name, 0, 0); | 572 | aead = crypto_alloc_aead(authenc_name, 0, 0); |
509 | err = PTR_ERR(aead); | 573 | err = PTR_ERR(aead); |