diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2008-01-28 22:35:05 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-31 22:27:02 -0500 |
commit | 38320c70d282be1997a5204c7c7fe14c3aa6bfaa (patch) | |
tree | aa86d8c3e26a3cc6d283a31b21116a35c7659f8f /net/ipv6 | |
parent | bf164cc054d568fa7889ffab41d3b091f5758c75 (diff) |
[IPSEC]: Use crypto_aead and authenc in ESP
This patch converts ESP to use the crypto_aead interface and in particular
the authenc algorithm. This lays the foundations for future support of
combined mode algorithms.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/Kconfig | 1 | ||||
-rw-r--r-- | net/ipv6/esp6.c | 460 |
2 files changed, 280 insertions, 181 deletions
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index eb0b8085949b..3ffb0323668c 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -85,6 +85,7 @@ config INET6_ESP | |||
85 | depends on IPV6 | 85 | depends on IPV6 |
86 | select XFRM | 86 | select XFRM |
87 | select CRYPTO | 87 | select CRYPTO |
88 | select CRYPTO_AEAD | ||
88 | select CRYPTO_HMAC | 89 | select CRYPTO_HMAC |
89 | select CRYPTO_MD5 | 90 | select CRYPTO_MD5 |
90 | select CRYPTO_CBC | 91 | select CRYPTO_CBC |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 5bd5292ad9fa..dc821acf3d33 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -24,33 +24,124 @@ | |||
24 | * This file is derived from net/ipv4/esp.c | 24 | * This file is derived from net/ipv4/esp.c |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <crypto/aead.h> | ||
28 | #include <crypto/authenc.h> | ||
27 | #include <linux/err.h> | 29 | #include <linux/err.h> |
28 | #include <linux/module.h> | 30 | #include <linux/module.h> |
29 | #include <net/ip.h> | 31 | #include <net/ip.h> |
30 | #include <net/xfrm.h> | 32 | #include <net/xfrm.h> |
31 | #include <net/esp.h> | 33 | #include <net/esp.h> |
32 | #include <linux/scatterlist.h> | 34 | #include <linux/scatterlist.h> |
33 | #include <linux/crypto.h> | ||
34 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
35 | #include <linux/pfkeyv2.h> | 36 | #include <linux/pfkeyv2.h> |
36 | #include <linux/random.h> | 37 | #include <linux/random.h> |
38 | #include <linux/slab.h> | ||
37 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
38 | #include <net/icmp.h> | 40 | #include <net/icmp.h> |
39 | #include <net/ipv6.h> | 41 | #include <net/ipv6.h> |
40 | #include <net/protocol.h> | 42 | #include <net/protocol.h> |
41 | #include <linux/icmpv6.h> | 43 | #include <linux/icmpv6.h> |
42 | 44 | ||
45 | struct esp_skb_cb { | ||
46 | struct xfrm_skb_cb xfrm; | ||
47 | void *tmp; | ||
48 | }; | ||
49 | |||
50 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) | ||
51 | |||
52 | /* | ||
53 | * Allocate an AEAD request structure with extra space for SG and IV. | ||
54 | * | ||
55 | * For alignment considerations the IV is placed at the front, followed | ||
56 | * by the request and finally the SG list. | ||
57 | * | ||
58 | * TODO: Use spare space in skb for this where possible. | ||
59 | */ | ||
60 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) | ||
61 | { | ||
62 | unsigned int len; | ||
63 | |||
64 | len = crypto_aead_ivsize(aead); | ||
65 | if (len) { | ||
66 | len += crypto_aead_alignmask(aead) & | ||
67 | ~(crypto_tfm_ctx_alignment() - 1); | ||
68 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | ||
69 | } | ||
70 | |||
71 | len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); | ||
72 | len = ALIGN(len, __alignof__(struct scatterlist)); | ||
73 | |||
74 | len += sizeof(struct scatterlist) * nfrags; | ||
75 | |||
76 | return kmalloc(len, GFP_ATOMIC); | ||
77 | } | ||
78 | |||
79 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) | ||
80 | { | ||
81 | return crypto_aead_ivsize(aead) ? | ||
82 | PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; | ||
83 | } | ||
84 | |||
85 | static inline struct aead_givcrypt_request *esp_tmp_givreq( | ||
86 | struct crypto_aead *aead, u8 *iv) | ||
87 | { | ||
88 | struct aead_givcrypt_request *req; | ||
89 | |||
90 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
91 | crypto_tfm_ctx_alignment()); | ||
92 | aead_givcrypt_set_tfm(req, aead); | ||
93 | return req; | ||
94 | } | ||
95 | |||
96 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) | ||
97 | { | ||
98 | struct aead_request *req; | ||
99 | |||
100 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
101 | crypto_tfm_ctx_alignment()); | ||
102 | aead_request_set_tfm(req, aead); | ||
103 | return req; | ||
104 | } | ||
105 | |||
106 | static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, | ||
107 | struct aead_request *req) | ||
108 | { | ||
109 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
110 | crypto_aead_reqsize(aead), | ||
111 | __alignof__(struct scatterlist)); | ||
112 | } | ||
113 | |||
114 | static inline struct scatterlist *esp_givreq_sg( | ||
115 | struct crypto_aead *aead, struct aead_givcrypt_request *req) | ||
116 | { | ||
117 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
118 | crypto_aead_reqsize(aead), | ||
119 | __alignof__(struct scatterlist)); | ||
120 | } | ||
121 | |||
122 | static void esp_output_done(struct crypto_async_request *base, int err) | ||
123 | { | ||
124 | struct sk_buff *skb = base->data; | ||
125 | |||
126 | kfree(ESP_SKB_CB(skb)->tmp); | ||
127 | xfrm_output_resume(skb, err); | ||
128 | } | ||
129 | |||
43 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | 130 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) |
44 | { | 131 | { |
45 | int err; | 132 | int err; |
46 | struct ip_esp_hdr *esph; | 133 | struct ip_esp_hdr *esph; |
47 | struct crypto_blkcipher *tfm; | 134 | struct crypto_aead *aead; |
48 | struct blkcipher_desc desc; | 135 | struct aead_givcrypt_request *req; |
136 | struct scatterlist *sg; | ||
137 | struct scatterlist *asg; | ||
49 | struct sk_buff *trailer; | 138 | struct sk_buff *trailer; |
139 | void *tmp; | ||
50 | int blksize; | 140 | int blksize; |
51 | int clen; | 141 | int clen; |
52 | int alen; | 142 | int alen; |
53 | int nfrags; | 143 | int nfrags; |
144 | u8 *iv; | ||
54 | u8 *tail; | 145 | u8 *tail; |
55 | struct esp_data *esp = x->data; | 146 | struct esp_data *esp = x->data; |
56 | 147 | ||
@@ -60,18 +151,26 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
60 | /* Round to block size */ | 151 | /* Round to block size */ |
61 | clen = skb->len; | 152 | clen = skb->len; |
62 | 153 | ||
63 | alen = esp->auth.icv_trunc_len; | 154 | aead = esp->aead; |
64 | tfm = esp->conf.tfm; | 155 | alen = crypto_aead_authsize(aead); |
65 | desc.tfm = tfm; | 156 | |
66 | desc.flags = 0; | 157 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); |
67 | blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); | ||
68 | clen = ALIGN(clen + 2, blksize); | 158 | clen = ALIGN(clen + 2, blksize); |
69 | if (esp->conf.padlen) | 159 | if (esp->padlen) |
70 | clen = ALIGN(clen, esp->conf.padlen); | 160 | clen = ALIGN(clen, esp->padlen); |
71 | 161 | ||
72 | if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { | 162 | if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) |
73 | goto error; | 163 | goto error; |
74 | } | 164 | nfrags = err; |
165 | |||
166 | tmp = esp_alloc_tmp(aead, nfrags + 1); | ||
167 | if (!tmp) | ||
168 | goto error; | ||
169 | |||
170 | iv = esp_tmp_iv(aead, tmp); | ||
171 | req = esp_tmp_givreq(aead, iv); | ||
172 | asg = esp_givreq_sg(aead, req); | ||
173 | sg = asg + 1; | ||
75 | 174 | ||
76 | /* Fill padding... */ | 175 | /* Fill padding... */ |
77 | tail = skb_tail_pointer(trailer); | 176 | tail = skb_tail_pointer(trailer); |
@@ -81,86 +180,113 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
81 | tail[i] = i + 1; | 180 | tail[i] = i + 1; |
82 | } while (0); | 181 | } while (0); |
83 | tail[clen-skb->len - 2] = (clen - skb->len) - 2; | 182 | tail[clen-skb->len - 2] = (clen - skb->len) - 2; |
84 | pskb_put(skb, trailer, clen - skb->len); | 183 | tail[clen - skb->len - 1] = *skb_mac_header(skb); |
184 | pskb_put(skb, trailer, clen - skb->len + alen); | ||
85 | 185 | ||
86 | skb_push(skb, -skb_network_offset(skb)); | 186 | skb_push(skb, -skb_network_offset(skb)); |
87 | esph = ip_esp_hdr(skb); | 187 | esph = ip_esp_hdr(skb); |
88 | *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb); | ||
89 | *skb_mac_header(skb) = IPPROTO_ESP; | 188 | *skb_mac_header(skb) = IPPROTO_ESP; |
90 | 189 | ||
91 | esph->spi = x->id.spi; | 190 | esph->spi = x->id.spi; |
92 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); | 191 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); |
93 | 192 | ||
94 | spin_lock_bh(&x->lock); | 193 | sg_init_table(sg, nfrags); |
194 | skb_to_sgvec(skb, sg, | ||
195 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, | ||
196 | clen + alen); | ||
197 | sg_init_one(asg, esph, sizeof(*esph)); | ||
95 | 198 | ||
96 | if (esp->conf.ivlen) { | 199 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); |
97 | if (unlikely(!esp->conf.ivinitted)) { | 200 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); |
98 | get_random_bytes(esp->conf.ivec, esp->conf.ivlen); | 201 | aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); |
99 | esp->conf.ivinitted = 1; | 202 | aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq); |
100 | } | ||
101 | crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); | ||
102 | } | ||
103 | 203 | ||
104 | do { | 204 | ESP_SKB_CB(skb)->tmp = tmp; |
105 | struct scatterlist *sg = &esp->sgbuf[0]; | 205 | err = crypto_aead_givencrypt(req); |
206 | if (err == -EINPROGRESS) | ||
207 | goto error; | ||
106 | 208 | ||
107 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { | 209 | if (err == -EBUSY) |
108 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); | 210 | err = NET_XMIT_DROP; |
109 | if (!sg) | 211 | |
110 | goto unlock; | 212 | kfree(tmp); |
111 | } | 213 | |
112 | sg_init_table(sg, nfrags); | 214 | error: |
113 | skb_to_sgvec(skb, sg, | 215 | return err; |
114 | esph->enc_data + | 216 | } |
115 | esp->conf.ivlen - | 217 | |
116 | skb->data, clen); | 218 | static int esp_input_done2(struct sk_buff *skb, int err) |
117 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | 219 | { |
118 | if (unlikely(sg != &esp->sgbuf[0])) | 220 | struct xfrm_state *x = xfrm_input_state(skb); |
119 | kfree(sg); | 221 | struct esp_data *esp = x->data; |
120 | } while (0); | 222 | struct crypto_aead *aead = esp->aead; |
223 | int alen = crypto_aead_authsize(aead); | ||
224 | int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); | ||
225 | int elen = skb->len - hlen; | ||
226 | int hdr_len = skb_network_header_len(skb); | ||
227 | int padlen; | ||
228 | u8 nexthdr[2]; | ||
229 | |||
230 | kfree(ESP_SKB_CB(skb)->tmp); | ||
121 | 231 | ||
122 | if (unlikely(err)) | 232 | if (unlikely(err)) |
123 | goto unlock; | 233 | goto out; |
124 | 234 | ||
125 | if (esp->conf.ivlen) { | 235 | if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) |
126 | memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); | 236 | BUG(); |
127 | crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); | ||
128 | } | ||
129 | 237 | ||
130 | if (esp->auth.icv_full_len) { | 238 | err = -EINVAL; |
131 | err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, | 239 | padlen = nexthdr[0]; |
132 | sizeof(*esph) + esp->conf.ivlen + clen); | 240 | if (padlen + 2 + alen >= elen) { |
133 | memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); | 241 | LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage " |
242 | "padlen=%d, elen=%d\n", padlen + 2, elen - alen); | ||
243 | goto out; | ||
134 | } | 244 | } |
135 | 245 | ||
136 | unlock: | 246 | /* ... check padding bits here. Silly. :-) */ |
137 | spin_unlock_bh(&x->lock); | ||
138 | 247 | ||
139 | error: | 248 | pskb_trim(skb, skb->len - alen - padlen - 2); |
249 | __skb_pull(skb, hlen); | ||
250 | skb_set_transport_header(skb, -hdr_len); | ||
251 | |||
252 | err = nexthdr[1]; | ||
253 | |||
254 | /* RFC4303: Drop dummy packets without any error */ | ||
255 | if (err == IPPROTO_NONE) | ||
256 | err = -EINVAL; | ||
257 | |||
258 | out: | ||
140 | return err; | 259 | return err; |
141 | } | 260 | } |
142 | 261 | ||
262 | static void esp_input_done(struct crypto_async_request *base, int err) | ||
263 | { | ||
264 | struct sk_buff *skb = base->data; | ||
265 | |||
266 | xfrm_input_resume(skb, esp_input_done2(skb, err)); | ||
267 | } | ||
268 | |||
143 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | 269 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) |
144 | { | 270 | { |
145 | struct ipv6hdr *iph; | ||
146 | struct ip_esp_hdr *esph; | 271 | struct ip_esp_hdr *esph; |
147 | struct esp_data *esp = x->data; | 272 | struct esp_data *esp = x->data; |
148 | struct crypto_blkcipher *tfm = esp->conf.tfm; | 273 | struct crypto_aead *aead = esp->aead; |
149 | struct blkcipher_desc desc = { .tfm = tfm }; | 274 | struct aead_request *req; |
150 | struct sk_buff *trailer; | 275 | struct sk_buff *trailer; |
151 | int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); | 276 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); |
152 | int alen = esp->auth.icv_trunc_len; | ||
153 | int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen; | ||
154 | int hdr_len = skb_network_header_len(skb); | ||
155 | int nfrags; | 277 | int nfrags; |
156 | int ret = 0; | 278 | int ret = 0; |
279 | void *tmp; | ||
280 | u8 *iv; | ||
281 | struct scatterlist *sg; | ||
282 | struct scatterlist *asg; | ||
157 | 283 | ||
158 | if (!pskb_may_pull(skb, sizeof(*esph))) { | 284 | if (!pskb_may_pull(skb, sizeof(*esph))) { |
159 | ret = -EINVAL; | 285 | ret = -EINVAL; |
160 | goto out; | 286 | goto out; |
161 | } | 287 | } |
162 | 288 | ||
163 | if (elen <= 0 || (elen & (blksize-1))) { | 289 | if (elen <= 0) { |
164 | ret = -EINVAL; | 290 | ret = -EINVAL; |
165 | goto out; | 291 | goto out; |
166 | } | 292 | } |
@@ -170,86 +296,38 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
170 | goto out; | 296 | goto out; |
171 | } | 297 | } |
172 | 298 | ||
173 | skb->ip_summed = CHECKSUM_NONE; | 299 | ret = -ENOMEM; |
174 | 300 | tmp = esp_alloc_tmp(aead, nfrags + 1); | |
175 | spin_lock(&x->lock); | 301 | if (!tmp) |
176 | 302 | goto out; | |
177 | /* If integrity check is required, do this. */ | ||
178 | if (esp->auth.icv_full_len) { | ||
179 | u8 sum[alen]; | ||
180 | |||
181 | ret = esp_mac_digest(esp, skb, 0, skb->len - alen); | ||
182 | if (ret) | ||
183 | goto unlock; | ||
184 | 303 | ||
185 | if (skb_copy_bits(skb, skb->len - alen, sum, alen)) | 304 | ESP_SKB_CB(skb)->tmp = tmp; |
186 | BUG(); | 305 | iv = esp_tmp_iv(aead, tmp); |
306 | req = esp_tmp_req(aead, iv); | ||
307 | asg = esp_req_sg(aead, req); | ||
308 | sg = asg + 1; | ||
187 | 309 | ||
188 | if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { | 310 | skb->ip_summed = CHECKSUM_NONE; |
189 | ret = -EBADMSG; | ||
190 | goto unlock; | ||
191 | } | ||
192 | } | ||
193 | 311 | ||
194 | esph = (struct ip_esp_hdr *)skb->data; | 312 | esph = (struct ip_esp_hdr *)skb->data; |
195 | iph = ipv6_hdr(skb); | ||
196 | 313 | ||
197 | /* Get ivec. This can be wrong, check against another impls. */ | 314 | /* Get ivec. This can be wrong, check against another impls. */ |
198 | if (esp->conf.ivlen) | 315 | iv = esph->enc_data; |
199 | crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); | ||
200 | |||
201 | { | ||
202 | struct scatterlist *sg = &esp->sgbuf[0]; | ||
203 | |||
204 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { | ||
205 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); | ||
206 | if (!sg) { | ||
207 | ret = -ENOMEM; | ||
208 | goto unlock; | ||
209 | } | ||
210 | } | ||
211 | sg_init_table(sg, nfrags); | ||
212 | skb_to_sgvec(skb, sg, | ||
213 | sizeof(*esph) + esp->conf.ivlen, | ||
214 | elen); | ||
215 | ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | ||
216 | if (unlikely(sg != &esp->sgbuf[0])) | ||
217 | kfree(sg); | ||
218 | } | ||
219 | 316 | ||
220 | unlock: | 317 | sg_init_table(sg, nfrags); |
221 | spin_unlock(&x->lock); | 318 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); |
319 | sg_init_one(asg, esph, sizeof(*esph)); | ||
222 | 320 | ||
223 | if (unlikely(ret)) | 321 | aead_request_set_callback(req, 0, esp_input_done, skb); |
224 | goto out; | 322 | aead_request_set_crypt(req, sg, sg, elen, iv); |
323 | aead_request_set_assoc(req, asg, sizeof(*esph)); | ||
225 | 324 | ||
226 | { | 325 | ret = crypto_aead_decrypt(req); |
227 | u8 nexthdr[2]; | 326 | if (ret == -EINPROGRESS) |
228 | u8 padlen; | 327 | goto out; |
229 | |||
230 | if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) | ||
231 | BUG(); | ||
232 | |||
233 | padlen = nexthdr[0]; | ||
234 | if (padlen+2 >= elen) { | ||
235 | LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen); | ||
236 | ret = -EINVAL; | ||
237 | goto out; | ||
238 | } | ||
239 | /* ... check padding bits here. Silly. :-) */ | ||
240 | |||
241 | /* RFC4303: Drop dummy packets without any error */ | ||
242 | if (nexthdr[1] == IPPROTO_NONE) { | ||
243 | ret = -EINVAL; | ||
244 | goto out; | ||
245 | } | ||
246 | 328 | ||
247 | pskb_trim(skb, skb->len - alen - padlen - 2); | 329 | ret = esp_input_done2(skb, ret); |
248 | ret = nexthdr[1]; | ||
249 | } | ||
250 | 330 | ||
251 | __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); | ||
252 | skb_set_transport_header(skb, -hdr_len); | ||
253 | out: | 331 | out: |
254 | return ret; | 332 | return ret; |
255 | } | 333 | } |
@@ -257,11 +335,11 @@ out: | |||
257 | static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) | 335 | static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) |
258 | { | 336 | { |
259 | struct esp_data *esp = x->data; | 337 | struct esp_data *esp = x->data; |
260 | u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); | 338 | u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); |
261 | u32 align = max_t(u32, blksize, esp->conf.padlen); | 339 | u32 align = max_t(u32, blksize, esp->padlen); |
262 | u32 rem; | 340 | u32 rem; |
263 | 341 | ||
264 | mtu -= x->props.header_len + esp->auth.icv_trunc_len; | 342 | mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); |
265 | rem = mtu & (align - 1); | 343 | rem = mtu & (align - 1); |
266 | mtu &= ~(align - 1); | 344 | mtu &= ~(align - 1); |
267 | 345 | ||
@@ -300,81 +378,101 @@ static void esp6_destroy(struct xfrm_state *x) | |||
300 | if (!esp) | 378 | if (!esp) |
301 | return; | 379 | return; |
302 | 380 | ||
303 | crypto_free_blkcipher(esp->conf.tfm); | 381 | crypto_free_aead(esp->aead); |
304 | esp->conf.tfm = NULL; | ||
305 | kfree(esp->conf.ivec); | ||
306 | esp->conf.ivec = NULL; | ||
307 | crypto_free_hash(esp->auth.tfm); | ||
308 | esp->auth.tfm = NULL; | ||
309 | kfree(esp->auth.work_icv); | ||
310 | esp->auth.work_icv = NULL; | ||
311 | kfree(esp); | 382 | kfree(esp); |
312 | } | 383 | } |
313 | 384 | ||
314 | static int esp6_init_state(struct xfrm_state *x) | 385 | static int esp6_init_state(struct xfrm_state *x) |
315 | { | 386 | { |
316 | struct esp_data *esp = NULL; | 387 | struct esp_data *esp = NULL; |
317 | struct crypto_blkcipher *tfm; | 388 | struct crypto_aead *aead; |
389 | struct crypto_authenc_key_param *param; | ||
390 | struct rtattr *rta; | ||
391 | char *key; | ||
392 | char *p; | ||
393 | char authenc_name[CRYPTO_MAX_ALG_NAME]; | ||
394 | u32 align; | ||
395 | unsigned int keylen; | ||
396 | int err; | ||
318 | 397 | ||
319 | if (x->ealg == NULL) | 398 | if (x->ealg == NULL) |
320 | goto error; | 399 | return -EINVAL; |
321 | 400 | ||
322 | if (x->encap) | 401 | if (x->encap) |
323 | goto error; | 402 | return -EINVAL; |
403 | |||
404 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", | ||
405 | x->aalg ? x->aalg->alg_name : "digest_null", | ||
406 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
407 | return -ENAMETOOLONG; | ||
324 | 408 | ||
325 | esp = kzalloc(sizeof(*esp), GFP_KERNEL); | 409 | esp = kzalloc(sizeof(*esp), GFP_KERNEL); |
326 | if (esp == NULL) | 410 | if (esp == NULL) |
327 | return -ENOMEM; | 411 | return -ENOMEM; |
328 | 412 | ||
413 | x->data = esp; | ||
414 | |||
415 | aead = crypto_alloc_aead(authenc_name, 0, 0); | ||
416 | err = PTR_ERR(aead); | ||
417 | if (IS_ERR(aead)) | ||
418 | goto error; | ||
419 | |||
420 | esp->aead = aead; | ||
421 | |||
422 | keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + | ||
423 | (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); | ||
424 | err = -ENOMEM; | ||
425 | key = kmalloc(keylen, GFP_KERNEL); | ||
426 | if (!key) | ||
427 | goto error; | ||
428 | |||
429 | p = key; | ||
430 | rta = (void *)p; | ||
431 | rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; | ||
432 | rta->rta_len = RTA_LENGTH(sizeof(*param)); | ||
433 | param = RTA_DATA(rta); | ||
434 | p += RTA_SPACE(sizeof(*param)); | ||
435 | |||
329 | if (x->aalg) { | 436 | if (x->aalg) { |
330 | struct xfrm_algo_desc *aalg_desc; | 437 | struct xfrm_algo_desc *aalg_desc; |
331 | struct crypto_hash *hash; | ||
332 | |||
333 | hash = crypto_alloc_hash(x->aalg->alg_name, 0, | ||
334 | CRYPTO_ALG_ASYNC); | ||
335 | if (IS_ERR(hash)) | ||
336 | goto error; | ||
337 | 438 | ||
338 | esp->auth.tfm = hash; | 439 | memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); |
339 | if (crypto_hash_setkey(hash, x->aalg->alg_key, | 440 | p += (x->aalg->alg_key_len + 7) / 8; |
340 | (x->aalg->alg_key_len + 7) / 8)) | ||
341 | goto error; | ||
342 | 441 | ||
343 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); | 442 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); |
344 | BUG_ON(!aalg_desc); | 443 | BUG_ON(!aalg_desc); |
345 | 444 | ||
445 | err = -EINVAL; | ||
346 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != | 446 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
347 | crypto_hash_digestsize(hash)) { | 447 | crypto_aead_authsize(aead)) { |
348 | NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", | 448 | NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", |
349 | x->aalg->alg_name, | 449 | x->aalg->alg_name, |
350 | crypto_hash_digestsize(hash), | 450 | crypto_aead_authsize(aead), |
351 | aalg_desc->uinfo.auth.icv_fullbits/8); | 451 | aalg_desc->uinfo.auth.icv_fullbits/8); |
352 | goto error; | 452 | goto free_key; |
353 | } | 453 | } |
354 | 454 | ||
355 | esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; | 455 | err = crypto_aead_setauthsize( |
356 | esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; | 456 | aead, aalg_desc->uinfo.auth.icv_truncbits / 8); |
357 | 457 | if (err) | |
358 | esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); | 458 | goto free_key; |
359 | if (!esp->auth.work_icv) | ||
360 | goto error; | ||
361 | } | ||
362 | tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); | ||
363 | if (IS_ERR(tfm)) | ||
364 | goto error; | ||
365 | esp->conf.tfm = tfm; | ||
366 | esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); | ||
367 | esp->conf.padlen = 0; | ||
368 | if (esp->conf.ivlen) { | ||
369 | esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); | ||
370 | if (unlikely(esp->conf.ivec == NULL)) | ||
371 | goto error; | ||
372 | esp->conf.ivinitted = 0; | ||
373 | } | 459 | } |
374 | if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key, | 460 | |
375 | (x->ealg->alg_key_len + 7) / 8)) | 461 | esp->padlen = 0; |
462 | |||
463 | param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); | ||
464 | memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); | ||
465 | |||
466 | err = crypto_aead_setkey(aead, key, keylen); | ||
467 | |||
468 | free_key: | ||
469 | kfree(key); | ||
470 | |||
471 | if (err) | ||
376 | goto error; | 472 | goto error; |
377 | x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; | 473 | |
474 | x->props.header_len = sizeof(struct ip_esp_hdr) + | ||
475 | crypto_aead_ivsize(aead); | ||
378 | switch (x->props.mode) { | 476 | switch (x->props.mode) { |
379 | case XFRM_MODE_BEET: | 477 | case XFRM_MODE_BEET: |
380 | case XFRM_MODE_TRANSPORT: | 478 | case XFRM_MODE_TRANSPORT: |
@@ -385,14 +483,14 @@ static int esp6_init_state(struct xfrm_state *x) | |||
385 | default: | 483 | default: |
386 | goto error; | 484 | goto error; |
387 | } | 485 | } |
388 | x->data = esp; | 486 | |
389 | return 0; | 487 | align = ALIGN(crypto_aead_blocksize(aead), 4); |
488 | if (esp->padlen) | ||
489 | align = max_t(u32, align, esp->padlen); | ||
490 | x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); | ||
390 | 491 | ||
391 | error: | 492 | error: |
392 | x->data = esp; | 493 | return err; |
393 | esp6_destroy(x); | ||
394 | x->data = NULL; | ||
395 | return -EINVAL; | ||
396 | } | 494 | } |
397 | 495 | ||
398 | static struct xfrm_type esp6_type = | 496 | static struct xfrm_type esp6_type = |