aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/esp4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/esp4.c')
-rw-r--r--net/ipv4/esp4.c143
1 files changed, 112 insertions, 31 deletions
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 14ca1f1c3fb0..a5b413416da3 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,8 @@ struct esp_skb_cb {
23 23
24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
25 25
26static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
27
26/* 28/*
27 * Allocate an AEAD request structure with extra space for SG and IV. 29 * Allocate an AEAD request structure with extra space for SG and IV.
28 * 30 *
@@ -31,11 +33,14 @@ struct esp_skb_cb {
31 * 33 *
32 * TODO: Use spare space in skb for this where possible. 34 * TODO: Use spare space in skb for this where possible.
33 */ 35 */
34static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) 36static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
35{ 37{
36 unsigned int len; 38 unsigned int len;
37 39
38 len = crypto_aead_ivsize(aead); 40 len = seqhilen;
41
42 len += crypto_aead_ivsize(aead);
43
39 if (len) { 44 if (len) {
40 len += crypto_aead_alignmask(aead) & 45 len += crypto_aead_alignmask(aead) &
41 ~(crypto_tfm_ctx_alignment() - 1); 46 ~(crypto_tfm_ctx_alignment() - 1);
@@ -50,10 +55,15 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
50 return kmalloc(len, GFP_ATOMIC); 55 return kmalloc(len, GFP_ATOMIC);
51} 56}
52 57
53static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) 58static inline __be32 *esp_tmp_seqhi(void *tmp)
59{
60 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
61}
62static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
54{ 63{
55 return crypto_aead_ivsize(aead) ? 64 return crypto_aead_ivsize(aead) ?
56 PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; 65 PTR_ALIGN((u8 *)tmp + seqhilen,
66 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
57} 67}
58 68
59static inline struct aead_givcrypt_request *esp_tmp_givreq( 69static inline struct aead_givcrypt_request *esp_tmp_givreq(
@@ -117,46 +127,75 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
117 int blksize; 127 int blksize;
118 int clen; 128 int clen;
119 int alen; 129 int alen;
130 int plen;
131 int tfclen;
120 int nfrags; 132 int nfrags;
133 int assoclen;
134 int sglists;
135 int seqhilen;
136 __be32 *seqhi;
121 137
122 /* skb is pure payload to encrypt */ 138 /* skb is pure payload to encrypt */
123 139
124 err = -ENOMEM; 140 err = -ENOMEM;
125 141
126 /* Round to block size */
127 clen = skb->len;
128
129 esp = x->data; 142 esp = x->data;
130 aead = esp->aead; 143 aead = esp->aead;
131 alen = crypto_aead_authsize(aead); 144 alen = crypto_aead_authsize(aead);
132 145
146 tfclen = 0;
147 if (x->tfcpad) {
148 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
149 u32 padto;
150
151 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
152 if (skb->len < padto)
153 tfclen = padto - skb->len;
154 }
133 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 155 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
134 clen = ALIGN(clen + 2, blksize); 156 clen = ALIGN(skb->len + 2 + tfclen, blksize);
135 if (esp->padlen) 157 if (esp->padlen)
136 clen = ALIGN(clen, esp->padlen); 158 clen = ALIGN(clen, esp->padlen);
159 plen = clen - skb->len - tfclen;
137 160
138 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) 161 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
162 if (err < 0)
139 goto error; 163 goto error;
140 nfrags = err; 164 nfrags = err;
141 165
142 tmp = esp_alloc_tmp(aead, nfrags + 1); 166 assoclen = sizeof(*esph);
167 sglists = 1;
168 seqhilen = 0;
169
170 if (x->props.flags & XFRM_STATE_ESN) {
171 sglists += 2;
172 seqhilen += sizeof(__be32);
173 assoclen += seqhilen;
174 }
175
176 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
143 if (!tmp) 177 if (!tmp)
144 goto error; 178 goto error;
145 179
146 iv = esp_tmp_iv(aead, tmp); 180 seqhi = esp_tmp_seqhi(tmp);
181 iv = esp_tmp_iv(aead, tmp, seqhilen);
147 req = esp_tmp_givreq(aead, iv); 182 req = esp_tmp_givreq(aead, iv);
148 asg = esp_givreq_sg(aead, req); 183 asg = esp_givreq_sg(aead, req);
149 sg = asg + 1; 184 sg = asg + sglists;
150 185
151 /* Fill padding... */ 186 /* Fill padding... */
152 tail = skb_tail_pointer(trailer); 187 tail = skb_tail_pointer(trailer);
188 if (tfclen) {
189 memset(tail, 0, tfclen);
190 tail += tfclen;
191 }
153 do { 192 do {
154 int i; 193 int i;
155 for (i=0; i<clen-skb->len - 2; i++) 194 for (i = 0; i < plen - 2; i++)
156 tail[i] = i + 1; 195 tail[i] = i + 1;
157 } while (0); 196 } while (0);
158 tail[clen - skb->len - 2] = (clen - skb->len) - 2; 197 tail[plen - 2] = plen - 2;
159 tail[clen - skb->len - 1] = *skb_mac_header(skb); 198 tail[plen - 1] = *skb_mac_header(skb);
160 pskb_put(skb, trailer, clen - skb->len + alen); 199 pskb_put(skb, trailer, clen - skb->len + alen);
161 200
162 skb_push(skb, -skb_network_offset(skb)); 201 skb_push(skb, -skb_network_offset(skb));
@@ -199,19 +238,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
199 } 238 }
200 239
201 esph->spi = x->id.spi; 240 esph->spi = x->id.spi;
202 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); 241 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
203 242
204 sg_init_table(sg, nfrags); 243 sg_init_table(sg, nfrags);
205 skb_to_sgvec(skb, sg, 244 skb_to_sgvec(skb, sg,
206 esph->enc_data + crypto_aead_ivsize(aead) - skb->data, 245 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
207 clen + alen); 246 clen + alen);
208 sg_init_one(asg, esph, sizeof(*esph)); 247
248 if ((x->props.flags & XFRM_STATE_ESN)) {
249 sg_init_table(asg, 3);
250 sg_set_buf(asg, &esph->spi, sizeof(__be32));
251 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
252 sg_set_buf(asg + 1, seqhi, seqhilen);
253 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
254 } else
255 sg_init_one(asg, esph, sizeof(*esph));
209 256
210 aead_givcrypt_set_callback(req, 0, esp_output_done, skb); 257 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
211 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 258 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
212 aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); 259 aead_givcrypt_set_assoc(req, asg, assoclen);
213 aead_givcrypt_set_giv(req, esph->enc_data, 260 aead_givcrypt_set_giv(req, esph->enc_data,
214 XFRM_SKB_CB(skb)->seq.output); 261 XFRM_SKB_CB(skb)->seq.output.low);
215 262
216 ESP_SKB_CB(skb)->tmp = tmp; 263 ESP_SKB_CB(skb)->tmp = tmp;
217 err = crypto_aead_givencrypt(req); 264 err = crypto_aead_givencrypt(req);
@@ -229,7 +276,7 @@ error:
229 276
230static int esp_input_done2(struct sk_buff *skb, int err) 277static int esp_input_done2(struct sk_buff *skb, int err)
231{ 278{
232 struct iphdr *iph; 279 const struct iphdr *iph;
233 struct xfrm_state *x = xfrm_input_state(skb); 280 struct xfrm_state *x = xfrm_input_state(skb);
234 struct esp_data *esp = x->data; 281 struct esp_data *esp = x->data;
235 struct crypto_aead *aead = esp->aead; 282 struct crypto_aead *aead = esp->aead;
@@ -330,6 +377,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
330 struct sk_buff *trailer; 377 struct sk_buff *trailer;
331 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); 378 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
332 int nfrags; 379 int nfrags;
380 int assoclen;
381 int sglists;
382 int seqhilen;
383 __be32 *seqhi;
333 void *tmp; 384 void *tmp;
334 u8 *iv; 385 u8 *iv;
335 struct scatterlist *sg; 386 struct scatterlist *sg;
@@ -346,16 +397,27 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
346 goto out; 397 goto out;
347 nfrags = err; 398 nfrags = err;
348 399
400 assoclen = sizeof(*esph);
401 sglists = 1;
402 seqhilen = 0;
403
404 if (x->props.flags & XFRM_STATE_ESN) {
405 sglists += 2;
406 seqhilen += sizeof(__be32);
407 assoclen += seqhilen;
408 }
409
349 err = -ENOMEM; 410 err = -ENOMEM;
350 tmp = esp_alloc_tmp(aead, nfrags + 1); 411 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
351 if (!tmp) 412 if (!tmp)
352 goto out; 413 goto out;
353 414
354 ESP_SKB_CB(skb)->tmp = tmp; 415 ESP_SKB_CB(skb)->tmp = tmp;
355 iv = esp_tmp_iv(aead, tmp); 416 seqhi = esp_tmp_seqhi(tmp);
417 iv = esp_tmp_iv(aead, tmp, seqhilen);
356 req = esp_tmp_req(aead, iv); 418 req = esp_tmp_req(aead, iv);
357 asg = esp_req_sg(aead, req); 419 asg = esp_req_sg(aead, req);
358 sg = asg + 1; 420 sg = asg + sglists;
359 421
360 skb->ip_summed = CHECKSUM_NONE; 422 skb->ip_summed = CHECKSUM_NONE;
361 423
@@ -366,11 +428,19 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
366 428
367 sg_init_table(sg, nfrags); 429 sg_init_table(sg, nfrags);
368 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); 430 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
369 sg_init_one(asg, esph, sizeof(*esph)); 431
432 if ((x->props.flags & XFRM_STATE_ESN)) {
433 sg_init_table(asg, 3);
434 sg_set_buf(asg, &esph->spi, sizeof(__be32));
435 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
436 sg_set_buf(asg + 1, seqhi, seqhilen);
437 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
438 } else
439 sg_init_one(asg, esph, sizeof(*esph));
370 440
371 aead_request_set_callback(req, 0, esp_input_done, skb); 441 aead_request_set_callback(req, 0, esp_input_done, skb);
372 aead_request_set_crypt(req, sg, sg, elen, iv); 442 aead_request_set_crypt(req, sg, sg, elen, iv);
373 aead_request_set_assoc(req, asg, sizeof(*esph)); 443 aead_request_set_assoc(req, asg, assoclen);
374 444
375 err = crypto_aead_decrypt(req); 445 err = crypto_aead_decrypt(req);
376 if (err == -EINPROGRESS) 446 if (err == -EINPROGRESS)
@@ -414,7 +484,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
414static void esp4_err(struct sk_buff *skb, u32 info) 484static void esp4_err(struct sk_buff *skb, u32 info)
415{ 485{
416 struct net *net = dev_net(skb->dev); 486 struct net *net = dev_net(skb->dev);
417 struct iphdr *iph = (struct iphdr *)skb->data; 487 const struct iphdr *iph = (const struct iphdr *)skb->data;
418 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
419 struct xfrm_state *x; 489 struct xfrm_state *x;
420 490
@@ -422,7 +492,8 @@ static void esp4_err(struct sk_buff *skb, u32 info)
422 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
423 return; 493 return;
424 494
425 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 495 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
496 esph->spi, IPPROTO_ESP, AF_INET);
426 if (!x) 497 if (!x)
427 return; 498 return;
428 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 499 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
@@ -484,10 +555,20 @@ static int esp_init_authenc(struct xfrm_state *x)
484 goto error; 555 goto error;
485 556
486 err = -ENAMETOOLONG; 557 err = -ENAMETOOLONG;
487 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", 558
488 x->aalg ? x->aalg->alg_name : "digest_null", 559 if ((x->props.flags & XFRM_STATE_ESN)) {
489 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) 560 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
490 goto error; 561 "authencesn(%s,%s)",
562 x->aalg ? x->aalg->alg_name : "digest_null",
563 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
564 goto error;
565 } else {
566 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
567 "authenc(%s,%s)",
568 x->aalg ? x->aalg->alg_name : "digest_null",
569 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
570 goto error;
571 }
491 572
492 aead = crypto_alloc_aead(authenc_name, 0, 0); 573 aead = crypto_alloc_aead(authenc_name, 0, 0);
493 err = PTR_ERR(aead); 574 err = PTR_ERR(aead);