aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2008-01-28 22:35:05 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-31 22:27:02 -0500
commit38320c70d282be1997a5204c7c7fe14c3aa6bfaa (patch)
treeaa86d8c3e26a3cc6d283a31b21116a35c7659f8f /net/ipv4
parentbf164cc054d568fa7889ffab41d3b091f5758c75 (diff)
[IPSEC]: Use crypto_aead and authenc in ESP
This patch converts ESP to use the crypto_aead interface and in particular the authenc algorithm. This lays the foundations for future support of combined mode algorithms. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/esp4.c497
2 files changed, 303 insertions, 195 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 24e2b7294bf8..19880b086e71 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -343,6 +343,7 @@ config INET_ESP
343 tristate "IP: ESP transformation" 343 tristate "IP: ESP transformation"
344 select XFRM 344 select XFRM
345 select CRYPTO 345 select CRYPTO
346 select CRYPTO_AEAD
346 select CRYPTO_HMAC 347 select CRYPTO_HMAC
347 select CRYPTO_MD5 348 select CRYPTO_MD5
348 select CRYPTO_CBC 349 select CRYPTO_CBC
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 28ea5c77ca23..c4047223bfbe 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,27 +1,118 @@
1#include <crypto/aead.h>
2#include <crypto/authenc.h>
1#include <linux/err.h> 3#include <linux/err.h>
2#include <linux/module.h> 4#include <linux/module.h>
3#include <net/ip.h> 5#include <net/ip.h>
4#include <net/xfrm.h> 6#include <net/xfrm.h>
5#include <net/esp.h> 7#include <net/esp.h>
6#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
7#include <linux/crypto.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/pfkeyv2.h> 10#include <linux/pfkeyv2.h>
10#include <linux/random.h> 11#include <linux/rtnetlink.h>
12#include <linux/slab.h>
11#include <linux/spinlock.h> 13#include <linux/spinlock.h>
12#include <linux/in6.h> 14#include <linux/in6.h>
13#include <net/icmp.h> 15#include <net/icmp.h>
14#include <net/protocol.h> 16#include <net/protocol.h>
15#include <net/udp.h> 17#include <net/udp.h>
16 18
19struct esp_skb_cb {
20 struct xfrm_skb_cb xfrm;
21 void *tmp;
22};
23
24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
25
26/*
27 * Allocate an AEAD request structure with extra space for SG and IV.
28 *
29 * For alignment considerations the IV is placed at the front, followed
30 * by the request and finally the SG list.
31 *
32 * TODO: Use spare space in skb for this where possible.
33 */
34static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
35{
36 unsigned int len;
37
38 len = crypto_aead_ivsize(aead);
39 if (len) {
40 len += crypto_aead_alignmask(aead) &
41 ~(crypto_tfm_ctx_alignment() - 1);
42 len = ALIGN(len, crypto_tfm_ctx_alignment());
43 }
44
45 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
46 len = ALIGN(len, __alignof__(struct scatterlist));
47
48 len += sizeof(struct scatterlist) * nfrags;
49
50 return kmalloc(len, GFP_ATOMIC);
51}
52
53static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
54{
55 return crypto_aead_ivsize(aead) ?
56 PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
57}
58
59static inline struct aead_givcrypt_request *esp_tmp_givreq(
60 struct crypto_aead *aead, u8 *iv)
61{
62 struct aead_givcrypt_request *req;
63
64 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
65 crypto_tfm_ctx_alignment());
66 aead_givcrypt_set_tfm(req, aead);
67 return req;
68}
69
70static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
71{
72 struct aead_request *req;
73
74 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
75 crypto_tfm_ctx_alignment());
76 aead_request_set_tfm(req, aead);
77 return req;
78}
79
80static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
81 struct aead_request *req)
82{
83 return (void *)ALIGN((unsigned long)(req + 1) +
84 crypto_aead_reqsize(aead),
85 __alignof__(struct scatterlist));
86}
87
88static inline struct scatterlist *esp_givreq_sg(
89 struct crypto_aead *aead, struct aead_givcrypt_request *req)
90{
91 return (void *)ALIGN((unsigned long)(req + 1) +
92 crypto_aead_reqsize(aead),
93 __alignof__(struct scatterlist));
94}
95
96static void esp_output_done(struct crypto_async_request *base, int err)
97{
98 struct sk_buff *skb = base->data;
99
100 kfree(ESP_SKB_CB(skb)->tmp);
101 xfrm_output_resume(skb, err);
102}
103
17static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 104static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
18{ 105{
19 int err; 106 int err;
20 struct ip_esp_hdr *esph; 107 struct ip_esp_hdr *esph;
21 struct crypto_blkcipher *tfm; 108 struct crypto_aead *aead;
22 struct blkcipher_desc desc; 109 struct aead_givcrypt_request *req;
110 struct scatterlist *sg;
111 struct scatterlist *asg;
23 struct esp_data *esp; 112 struct esp_data *esp;
24 struct sk_buff *trailer; 113 struct sk_buff *trailer;
114 void *tmp;
115 u8 *iv;
25 u8 *tail; 116 u8 *tail;
26 int blksize; 117 int blksize;
27 int clen; 118 int clen;
@@ -36,18 +127,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
36 clen = skb->len; 127 clen = skb->len;
37 128
38 esp = x->data; 129 esp = x->data;
39 alen = esp->auth.icv_trunc_len; 130 aead = esp->aead;
40 tfm = esp->conf.tfm; 131 alen = crypto_aead_authsize(aead);
41 desc.tfm = tfm; 132
42 desc.flags = 0; 133 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
44 clen = ALIGN(clen + 2, blksize); 134 clen = ALIGN(clen + 2, blksize);
45 if (esp->conf.padlen) 135 if (esp->padlen)
46 clen = ALIGN(clen, esp->conf.padlen); 136 clen = ALIGN(clen, esp->padlen);
137
138 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
139 goto error;
140 nfrags = err;
47 141
48 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) 142 tmp = esp_alloc_tmp(aead, nfrags + 1);
143 if (!tmp)
49 goto error; 144 goto error;
50 145
146 iv = esp_tmp_iv(aead, tmp);
147 req = esp_tmp_givreq(aead, iv);
148 asg = esp_givreq_sg(aead, req);
149 sg = asg + 1;
150
51 /* Fill padding... */ 151 /* Fill padding... */
52 tail = skb_tail_pointer(trailer); 152 tail = skb_tail_pointer(trailer);
53 do { 153 do {
@@ -56,28 +156,34 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
56 tail[i] = i + 1; 156 tail[i] = i + 1;
57 } while (0); 157 } while (0);
58 tail[clen - skb->len - 2] = (clen - skb->len) - 2; 158 tail[clen - skb->len - 2] = (clen - skb->len) - 2;
59 pskb_put(skb, trailer, clen - skb->len); 159 tail[clen - skb->len - 1] = *skb_mac_header(skb);
160 pskb_put(skb, trailer, clen - skb->len + alen);
60 161
61 skb_push(skb, -skb_network_offset(skb)); 162 skb_push(skb, -skb_network_offset(skb));
62 esph = ip_esp_hdr(skb); 163 esph = ip_esp_hdr(skb);
63 *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
64 *skb_mac_header(skb) = IPPROTO_ESP; 164 *skb_mac_header(skb) = IPPROTO_ESP;
65 165
66 spin_lock_bh(&x->lock);
67
68 /* this is non-NULL only with UDP Encapsulation */ 166 /* this is non-NULL only with UDP Encapsulation */
69 if (x->encap) { 167 if (x->encap) {
70 struct xfrm_encap_tmpl *encap = x->encap; 168 struct xfrm_encap_tmpl *encap = x->encap;
71 struct udphdr *uh; 169 struct udphdr *uh;
72 __be32 *udpdata32; 170 __be32 *udpdata32;
171 unsigned int sport, dport;
172 int encap_type;
173
174 spin_lock_bh(&x->lock);
175 sport = encap->encap_sport;
176 dport = encap->encap_dport;
177 encap_type = encap->encap_type;
178 spin_unlock_bh(&x->lock);
73 179
74 uh = (struct udphdr *)esph; 180 uh = (struct udphdr *)esph;
75 uh->source = encap->encap_sport; 181 uh->source = sport;
76 uh->dest = encap->encap_dport; 182 uh->dest = dport;
77 uh->len = htons(skb->len + alen - skb_transport_offset(skb)); 183 uh->len = htons(skb->len - skb_transport_offset(skb));
78 uh->check = 0; 184 uh->check = 0;
79 185
80 switch (encap->encap_type) { 186 switch (encap_type) {
81 default: 187 default:
82 case UDP_ENCAP_ESPINUDP: 188 case UDP_ENCAP_ESPINUDP:
83 esph = (struct ip_esp_hdr *)(uh + 1); 189 esph = (struct ip_esp_hdr *)(uh + 1);
@@ -95,131 +201,45 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
95 esph->spi = x->id.spi; 201 esph->spi = x->id.spi;
96 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 202 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
97 203
98 if (esp->conf.ivlen) { 204 sg_init_table(sg, nfrags);
99 if (unlikely(!esp->conf.ivinitted)) { 205 skb_to_sgvec(skb, sg,
100 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 206 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
101 esp->conf.ivinitted = 1; 207 clen + alen);
102 } 208 sg_init_one(asg, esph, sizeof(*esph));
103 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); 209
104 } 210 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
105 211 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
106 do { 212 aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
107 struct scatterlist *sg = &esp->sgbuf[0]; 213 aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
108 214
109 if (unlikely(nfrags > ESP_NUM_FAST_SG)) { 215 ESP_SKB_CB(skb)->tmp = tmp;
110 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); 216 err = crypto_aead_givencrypt(req);
111 if (!sg) 217 if (err == -EINPROGRESS)
112 goto unlock; 218 goto error;
113 }
114 sg_init_table(sg, nfrags);
115 skb_to_sgvec(skb, sg,
116 esph->enc_data +
117 esp->conf.ivlen -
118 skb->data, clen);
119 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
120 if (unlikely(sg != &esp->sgbuf[0]))
121 kfree(sg);
122 } while (0);
123
124 if (unlikely(err))
125 goto unlock;
126
127 if (esp->conf.ivlen) {
128 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
129 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
130 }
131 219
132 if (esp->auth.icv_full_len) { 220 if (err == -EBUSY)
133 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, 221 err = NET_XMIT_DROP;
134 sizeof(*esph) + esp->conf.ivlen + clen);
135 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
136 }
137 222
138unlock: 223 kfree(tmp);
139 spin_unlock_bh(&x->lock);
140 224
141error: 225error:
142 return err; 226 return err;
143} 227}
144 228
145/* 229static int esp_input_done2(struct sk_buff *skb, int err)
146 * Note: detecting truncated vs. non-truncated authentication data is very
147 * expensive, so we only support truncated data, which is the recommended
148 * and common case.
149 */
150static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
151{ 230{
152 struct iphdr *iph; 231 struct iphdr *iph;
153 struct ip_esp_hdr *esph; 232 struct xfrm_state *x = xfrm_input_state(skb);
154 struct esp_data *esp = x->data; 233 struct esp_data *esp = x->data;
155 struct crypto_blkcipher *tfm = esp->conf.tfm; 234 struct crypto_aead *aead = esp->aead;
156 struct blkcipher_desc desc = { .tfm = tfm }; 235 int alen = crypto_aead_authsize(aead);
157 struct sk_buff *trailer; 236 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
158 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); 237 int elen = skb->len - hlen;
159 int alen = esp->auth.icv_trunc_len;
160 int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
161 int nfrags;
162 int ihl; 238 int ihl;
163 u8 nexthdr[2]; 239 u8 nexthdr[2];
164 struct scatterlist *sg;
165 int padlen; 240 int padlen;
166 int err = -EINVAL;
167 241
168 if (!pskb_may_pull(skb, sizeof(*esph))) 242 kfree(ESP_SKB_CB(skb)->tmp);
169 goto out;
170
171 if (elen <= 0 || (elen & (blksize-1)))
172 goto out;
173
174 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
175 goto out;
176 nfrags = err;
177
178 skb->ip_summed = CHECKSUM_NONE;
179
180 spin_lock(&x->lock);
181
182 /* If integrity check is required, do this. */
183 if (esp->auth.icv_full_len) {
184 u8 sum[alen];
185
186 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
187 if (err)
188 goto unlock;
189
190 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
191 BUG();
192
193 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
194 err = -EBADMSG;
195 goto unlock;
196 }
197 }
198
199 esph = (struct ip_esp_hdr *)skb->data;
200
201 /* Get ivec. This can be wrong, check against another impls. */
202 if (esp->conf.ivlen)
203 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
204
205 sg = &esp->sgbuf[0];
206
207 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
208 err = -ENOMEM;
209 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
210 if (!sg)
211 goto unlock;
212 }
213 sg_init_table(sg, nfrags);
214 skb_to_sgvec(skb, sg,
215 sizeof(*esph) + esp->conf.ivlen,
216 elen);
217 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
218 if (unlikely(sg != &esp->sgbuf[0]))
219 kfree(sg);
220
221unlock:
222 spin_unlock(&x->lock);
223 243
224 if (unlikely(err)) 244 if (unlikely(err))
225 goto out; 245 goto out;
@@ -229,15 +249,11 @@ unlock:
229 249
230 err = -EINVAL; 250 err = -EINVAL;
231 padlen = nexthdr[0]; 251 padlen = nexthdr[0];
232 if (padlen+2 >= elen) 252 if (padlen + 2 + alen >= elen)
233 goto out; 253 goto out;
234 254
235 /* ... check padding bits here. Silly. :-) */ 255 /* ... check padding bits here. Silly. :-) */
236 256
237 /* RFC4303: Drop dummy packets without any error */
238 if (nexthdr[1] == IPPROTO_NONE)
239 goto out;
240
241 iph = ip_hdr(skb); 257 iph = ip_hdr(skb);
242 ihl = iph->ihl * 4; 258 ihl = iph->ihl * 4;
243 259
@@ -279,10 +295,87 @@ unlock:
279 } 295 }
280 296
281 pskb_trim(skb, skb->len - alen - padlen - 2); 297 pskb_trim(skb, skb->len - alen - padlen - 2);
282 __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); 298 __skb_pull(skb, hlen);
283 skb_set_transport_header(skb, -ihl); 299 skb_set_transport_header(skb, -ihl);
284 300
285 return nexthdr[1]; 301 err = nexthdr[1];
302
303 /* RFC4303: Drop dummy packets without any error */
304 if (err == IPPROTO_NONE)
305 err = -EINVAL;
306
307out:
308 return err;
309}
310
311static void esp_input_done(struct crypto_async_request *base, int err)
312{
313 struct sk_buff *skb = base->data;
314
315 xfrm_input_resume(skb, esp_input_done2(skb, err));
316}
317
318/*
319 * Note: detecting truncated vs. non-truncated authentication data is very
320 * expensive, so we only support truncated data, which is the recommended
321 * and common case.
322 */
323static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
324{
325 struct ip_esp_hdr *esph;
326 struct esp_data *esp = x->data;
327 struct crypto_aead *aead = esp->aead;
328 struct aead_request *req;
329 struct sk_buff *trailer;
330 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
331 int nfrags;
332 void *tmp;
333 u8 *iv;
334 struct scatterlist *sg;
335 struct scatterlist *asg;
336 int err = -EINVAL;
337
338 if (!pskb_may_pull(skb, sizeof(*esph)))
339 goto out;
340
341 if (elen <= 0)
342 goto out;
343
344 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
345 goto out;
346 nfrags = err;
347
348 err = -ENOMEM;
349 tmp = esp_alloc_tmp(aead, nfrags + 1);
350 if (!tmp)
351 goto out;
352
353 ESP_SKB_CB(skb)->tmp = tmp;
354 iv = esp_tmp_iv(aead, tmp);
355 req = esp_tmp_req(aead, iv);
356 asg = esp_req_sg(aead, req);
357 sg = asg + 1;
358
359 skb->ip_summed = CHECKSUM_NONE;
360
361 esph = (struct ip_esp_hdr *)skb->data;
362
363 /* Get ivec. This can be wrong, check against another impls. */
364 iv = esph->enc_data;
365
366 sg_init_table(sg, nfrags);
367 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
368 sg_init_one(asg, esph, sizeof(*esph));
369
370 aead_request_set_callback(req, 0, esp_input_done, skb);
371 aead_request_set_crypt(req, sg, sg, elen, iv);
372 aead_request_set_assoc(req, asg, sizeof(*esph));
373
374 err = crypto_aead_decrypt(req);
375 if (err == -EINPROGRESS)
376 goto out;
377
378 err = esp_input_done2(skb, err);
286 379
287out: 380out:
288 return err; 381 return err;
@@ -291,11 +384,11 @@ out:
291static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) 384static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
292{ 385{
293 struct esp_data *esp = x->data; 386 struct esp_data *esp = x->data;
294 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 387 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
295 u32 align = max_t(u32, blksize, esp->conf.padlen); 388 u32 align = max_t(u32, blksize, esp->padlen);
296 u32 rem; 389 u32 rem;
297 390
298 mtu -= x->props.header_len + esp->auth.icv_trunc_len; 391 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
299 rem = mtu & (align - 1); 392 rem = mtu & (align - 1);
300 mtu &= ~(align - 1); 393 mtu &= ~(align - 1);
301 394
@@ -342,80 +435,98 @@ static void esp_destroy(struct xfrm_state *x)
342 if (!esp) 435 if (!esp)
343 return; 436 return;
344 437
345 crypto_free_blkcipher(esp->conf.tfm); 438 crypto_free_aead(esp->aead);
346 esp->conf.tfm = NULL;
347 kfree(esp->conf.ivec);
348 esp->conf.ivec = NULL;
349 crypto_free_hash(esp->auth.tfm);
350 esp->auth.tfm = NULL;
351 kfree(esp->auth.work_icv);
352 esp->auth.work_icv = NULL;
353 kfree(esp); 439 kfree(esp);
354} 440}
355 441
356static int esp_init_state(struct xfrm_state *x) 442static int esp_init_state(struct xfrm_state *x)
357{ 443{
358 struct esp_data *esp = NULL; 444 struct esp_data *esp = NULL;
359 struct crypto_blkcipher *tfm; 445 struct crypto_aead *aead;
446 struct crypto_authenc_key_param *param;
447 struct rtattr *rta;
448 char *key;
449 char *p;
450 char authenc_name[CRYPTO_MAX_ALG_NAME];
360 u32 align; 451 u32 align;
452 unsigned int keylen;
453 int err;
361 454
362 if (x->ealg == NULL) 455 if (x->ealg == NULL)
363 goto error; 456 return -EINVAL;
457
458 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
459 x->aalg ? x->aalg->alg_name : "digest_null",
460 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
461 return -ENAMETOOLONG;
364 462
365 esp = kzalloc(sizeof(*esp), GFP_KERNEL); 463 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
366 if (esp == NULL) 464 if (esp == NULL)
367 return -ENOMEM; 465 return -ENOMEM;
368 466
467 x->data = esp;
468
469 aead = crypto_alloc_aead(authenc_name, 0, 0);
470 err = PTR_ERR(aead);
471 if (IS_ERR(aead))
472 goto error;
473
474 esp->aead = aead;
475
476 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
477 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
478 err = -ENOMEM;
479 key = kmalloc(keylen, GFP_KERNEL);
480 if (!key)
481 goto error;
482
483 p = key;
484 rta = (void *)p;
485 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
486 rta->rta_len = RTA_LENGTH(sizeof(*param));
487 param = RTA_DATA(rta);
488 p += RTA_SPACE(sizeof(*param));
489
369 if (x->aalg) { 490 if (x->aalg) {
370 struct xfrm_algo_desc *aalg_desc; 491 struct xfrm_algo_desc *aalg_desc;
371 struct crypto_hash *hash;
372
373 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
374 CRYPTO_ALG_ASYNC);
375 if (IS_ERR(hash))
376 goto error;
377 492
378 esp->auth.tfm = hash; 493 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
379 if (crypto_hash_setkey(hash, x->aalg->alg_key, 494 p += (x->aalg->alg_key_len + 7) / 8;
380 (x->aalg->alg_key_len + 7) / 8))
381 goto error;
382 495
383 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 496 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
384 BUG_ON(!aalg_desc); 497 BUG_ON(!aalg_desc);
385 498
499 err = -EINVAL;
386 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 500 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
387 crypto_hash_digestsize(hash)) { 501 crypto_aead_authsize(aead)) {
388 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 502 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
389 x->aalg->alg_name, 503 x->aalg->alg_name,
390 crypto_hash_digestsize(hash), 504 crypto_aead_authsize(aead),
391 aalg_desc->uinfo.auth.icv_fullbits/8); 505 aalg_desc->uinfo.auth.icv_fullbits/8);
392 goto error; 506 goto free_key;
393 } 507 }
394 508
395 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 509 err = crypto_aead_setauthsize(
396 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 510 aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
397 511 if (err)
398 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); 512 goto free_key;
399 if (!esp->auth.work_icv)
400 goto error;
401 } 513 }
402 514
403 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); 515 esp->padlen = 0;
404 if (IS_ERR(tfm)) 516
405 goto error; 517 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
406 esp->conf.tfm = tfm; 518 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
407 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); 519
408 esp->conf.padlen = 0; 520 err = crypto_aead_setkey(aead, key, keylen);
409 if (esp->conf.ivlen) { 521
410 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 522free_key:
411 if (unlikely(esp->conf.ivec == NULL)) 523 kfree(key);
412 goto error; 524
413 esp->conf.ivinitted = 0; 525 if (err)
414 }
415 if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key,
416 (x->ealg->alg_key_len + 7) / 8))
417 goto error; 526 goto error;
418 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 527
528 x->props.header_len = sizeof(struct ip_esp_hdr) +
529 crypto_aead_ivsize(aead);
419 if (x->props.mode == XFRM_MODE_TUNNEL) 530 if (x->props.mode == XFRM_MODE_TUNNEL)
420 x->props.header_len += sizeof(struct iphdr); 531 x->props.header_len += sizeof(struct iphdr);
421 else if (x->props.mode == XFRM_MODE_BEET) 532 else if (x->props.mode == XFRM_MODE_BEET)
@@ -434,18 +545,14 @@ static int esp_init_state(struct xfrm_state *x)
434 break; 545 break;
435 } 546 }
436 } 547 }
437 x->data = esp; 548
438 align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 549 align = ALIGN(crypto_aead_blocksize(aead), 4);
439 if (esp->conf.padlen) 550 if (esp->padlen)
440 align = max_t(u32, align, esp->conf.padlen); 551 align = max_t(u32, align, esp->padlen);
441 x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; 552 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
442 return 0;
443 553
444error: 554error:
445 x->data = esp; 555 return err;
446 esp_destroy(x);
447 x->data = NULL;
448 return -EINVAL;
449} 556}
450 557
451static struct xfrm_type esp_type = 558static struct xfrm_type esp_type =