aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/esp4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/esp4.c')
-rw-r--r--net/ipv4/esp4.c105
1 files changed, 61 insertions, 44 deletions
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 9bbdd4494551..13b29360d102 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,4 +1,4 @@
1#include <linux/config.h> 1#include <linux/err.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <net/ip.h> 3#include <net/ip.h>
4#include <net/xfrm.h> 4#include <net/xfrm.h>
@@ -17,7 +17,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
17 int err; 17 int err;
18 struct iphdr *top_iph; 18 struct iphdr *top_iph;
19 struct ip_esp_hdr *esph; 19 struct ip_esp_hdr *esph;
20 struct crypto_tfm *tfm; 20 struct crypto_blkcipher *tfm;
21 struct blkcipher_desc desc;
21 struct esp_data *esp; 22 struct esp_data *esp;
22 struct sk_buff *trailer; 23 struct sk_buff *trailer;
23 int blksize; 24 int blksize;
@@ -37,7 +38,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
37 esp = x->data; 38 esp = x->data;
38 alen = esp->auth.icv_trunc_len; 39 alen = esp->auth.icv_trunc_len;
39 tfm = esp->conf.tfm; 40 tfm = esp->conf.tfm;
40 blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 41 desc.tfm = tfm;
42 desc.flags = 0;
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
41 clen = ALIGN(clen + 2, blksize); 44 clen = ALIGN(clen + 2, blksize);
42 if (esp->conf.padlen) 45 if (esp->conf.padlen)
43 clen = ALIGN(clen, esp->conf.padlen); 46 clen = ALIGN(clen, esp->conf.padlen);
@@ -92,8 +95,13 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
92 esph->seq_no = htonl(++x->replay.oseq); 95 esph->seq_no = htonl(++x->replay.oseq);
93 xfrm_aevent_doreplay(x); 96 xfrm_aevent_doreplay(x);
94 97
95 if (esp->conf.ivlen) 98 if (esp->conf.ivlen) {
96 crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 99 if (unlikely(!esp->conf.ivinitted)) {
100 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
101 esp->conf.ivinitted = 1;
102 }
103 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
104 }
97 105
98 do { 106 do {
99 struct scatterlist *sg = &esp->sgbuf[0]; 107 struct scatterlist *sg = &esp->sgbuf[0];
@@ -104,26 +112,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
104 goto error; 112 goto error;
105 } 113 }
106 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 114 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
107 crypto_cipher_encrypt(tfm, sg, sg, clen); 115 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
108 if (unlikely(sg != &esp->sgbuf[0])) 116 if (unlikely(sg != &esp->sgbuf[0]))
109 kfree(sg); 117 kfree(sg);
110 } while (0); 118 } while (0);
111 119
120 if (unlikely(err))
121 goto error;
122
112 if (esp->conf.ivlen) { 123 if (esp->conf.ivlen) {
113 memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 124 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
114 crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 125 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
115 } 126 }
116 127
117 if (esp->auth.icv_full_len) { 128 if (esp->auth.icv_full_len) {
118 esp->auth.icv(esp, skb, (u8*)esph-skb->data, 129 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
119 sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 130 sizeof(*esph) + esp->conf.ivlen + clen);
120 pskb_put(skb, trailer, alen); 131 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
121 } 132 }
122 133
123 ip_send_check(top_iph); 134 ip_send_check(top_iph);
124 135
125 err = 0;
126
127error: 136error:
128 return err; 137 return err;
129} 138}
@@ -138,8 +147,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
138 struct iphdr *iph; 147 struct iphdr *iph;
139 struct ip_esp_hdr *esph; 148 struct ip_esp_hdr *esph;
140 struct esp_data *esp = x->data; 149 struct esp_data *esp = x->data;
150 struct crypto_blkcipher *tfm = esp->conf.tfm;
151 struct blkcipher_desc desc = { .tfm = tfm };
141 struct sk_buff *trailer; 152 struct sk_buff *trailer;
142 int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 153 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
143 int alen = esp->auth.icv_trunc_len; 154 int alen = esp->auth.icv_trunc_len;
144 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 155 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
145 int nfrags; 156 int nfrags;
@@ -147,6 +158,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
147 u8 nexthdr[2]; 158 u8 nexthdr[2];
148 struct scatterlist *sg; 159 struct scatterlist *sg;
149 int padlen; 160 int padlen;
161 int err;
150 162
151 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) 163 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
152 goto out; 164 goto out;
@@ -156,15 +168,16 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
156 168
157 /* If integrity check is required, do this. */ 169 /* If integrity check is required, do this. */
158 if (esp->auth.icv_full_len) { 170 if (esp->auth.icv_full_len) {
159 u8 sum[esp->auth.icv_full_len]; 171 u8 sum[alen];
160 u8 sum1[alen]; 172
161 173 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
162 esp->auth.icv(esp, skb, 0, skb->len-alen, sum); 174 if (err)
175 goto out;
163 176
164 if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 177 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
165 BUG(); 178 BUG();
166 179
167 if (unlikely(memcmp(sum, sum1, alen))) { 180 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
168 x->stats.integrity_failed++; 181 x->stats.integrity_failed++;
169 goto out; 182 goto out;
170 } 183 }
@@ -179,7 +192,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
179 192
180 /* Get ivec. This can be wrong, check against another impls. */ 193 /* Get ivec. This can be wrong, check against another impls. */
181 if (esp->conf.ivlen) 194 if (esp->conf.ivlen)
182 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 195 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
183 196
184 sg = &esp->sgbuf[0]; 197 sg = &esp->sgbuf[0];
185 198
@@ -189,9 +202,11 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
189 goto out; 202 goto out;
190 } 203 }
191 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); 204 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
192 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 205 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
193 if (unlikely(sg != &esp->sgbuf[0])) 206 if (unlikely(sg != &esp->sgbuf[0]))
194 kfree(sg); 207 kfree(sg);
208 if (unlikely(err))
209 return err;
195 210
196 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 211 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
197 BUG(); 212 BUG();
@@ -238,7 +253,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
238 * as per draft-ietf-ipsec-udp-encaps-06, 253 * as per draft-ietf-ipsec-udp-encaps-06,
239 * section 3.1.2 254 * section 3.1.2
240 */ 255 */
241 if (!x->props.mode) 256 if (x->props.mode == XFRM_MODE_TRANSPORT)
242 skb->ip_summed = CHECKSUM_UNNECESSARY; 257 skb->ip_summed = CHECKSUM_UNNECESSARY;
243 } 258 }
244 259
@@ -255,9 +270,9 @@ out:
255static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 270static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
256{ 271{
257 struct esp_data *esp = x->data; 272 struct esp_data *esp = x->data;
258 u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 273 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
259 274
260 if (x->props.mode) { 275 if (x->props.mode == XFRM_MODE_TUNNEL) {
261 mtu = ALIGN(mtu + 2, blksize); 276 mtu = ALIGN(mtu + 2, blksize);
262 } else { 277 } else {
263 /* The worst case. */ 278 /* The worst case. */
@@ -294,11 +309,11 @@ static void esp_destroy(struct xfrm_state *x)
294 if (!esp) 309 if (!esp)
295 return; 310 return;
296 311
297 crypto_free_tfm(esp->conf.tfm); 312 crypto_free_blkcipher(esp->conf.tfm);
298 esp->conf.tfm = NULL; 313 esp->conf.tfm = NULL;
299 kfree(esp->conf.ivec); 314 kfree(esp->conf.ivec);
300 esp->conf.ivec = NULL; 315 esp->conf.ivec = NULL;
301 crypto_free_tfm(esp->auth.tfm); 316 crypto_free_hash(esp->auth.tfm);
302 esp->auth.tfm = NULL; 317 esp->auth.tfm = NULL;
303 kfree(esp->auth.work_icv); 318 kfree(esp->auth.work_icv);
304 esp->auth.work_icv = NULL; 319 esp->auth.work_icv = NULL;
@@ -308,6 +323,7 @@ static void esp_destroy(struct xfrm_state *x)
308static int esp_init_state(struct xfrm_state *x) 323static int esp_init_state(struct xfrm_state *x)
309{ 324{
310 struct esp_data *esp = NULL; 325 struct esp_data *esp = NULL;
326 struct crypto_blkcipher *tfm;
311 327
312 /* null auth and encryption can have zero length keys */ 328 /* null auth and encryption can have zero length keys */
313 if (x->aalg) { 329 if (x->aalg) {
@@ -317,30 +333,33 @@ static int esp_init_state(struct xfrm_state *x)
317 if (x->ealg == NULL) 333 if (x->ealg == NULL)
318 goto error; 334 goto error;
319 335
320 esp = kmalloc(sizeof(*esp), GFP_KERNEL); 336 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
321 if (esp == NULL) 337 if (esp == NULL)
322 return -ENOMEM; 338 return -ENOMEM;
323 339
324 memset(esp, 0, sizeof(*esp));
325
326 if (x->aalg) { 340 if (x->aalg) {
327 struct xfrm_algo_desc *aalg_desc; 341 struct xfrm_algo_desc *aalg_desc;
342 struct crypto_hash *hash;
328 343
329 esp->auth.key = x->aalg->alg_key; 344 esp->auth.key = x->aalg->alg_key;
330 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 345 esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
331 esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 346 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
332 if (esp->auth.tfm == NULL) 347 CRYPTO_ALG_ASYNC);
348 if (IS_ERR(hash))
349 goto error;
350
351 esp->auth.tfm = hash;
352 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
333 goto error; 353 goto error;
334 esp->auth.icv = esp_hmac_digest;
335 354
336 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 355 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
337 BUG_ON(!aalg_desc); 356 BUG_ON(!aalg_desc);
338 357
339 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 358 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
340 crypto_tfm_alg_digestsize(esp->auth.tfm)) { 359 crypto_hash_digestsize(hash)) {
341 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 360 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
342 x->aalg->alg_name, 361 x->aalg->alg_name,
343 crypto_tfm_alg_digestsize(esp->auth.tfm), 362 crypto_hash_digestsize(hash),
344 aalg_desc->uinfo.auth.icv_fullbits/8); 363 aalg_desc->uinfo.auth.icv_fullbits/8);
345 goto error; 364 goto error;
346 } 365 }
@@ -354,24 +373,22 @@ static int esp_init_state(struct xfrm_state *x)
354 } 373 }
355 esp->conf.key = x->ealg->alg_key; 374 esp->conf.key = x->ealg->alg_key;
356 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 375 esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
357 if (x->props.ealgo == SADB_EALG_NULL) 376 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
358 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 377 if (IS_ERR(tfm))
359 else
360 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
361 if (esp->conf.tfm == NULL)
362 goto error; 378 goto error;
363 esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 379 esp->conf.tfm = tfm;
380 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
364 esp->conf.padlen = 0; 381 esp->conf.padlen = 0;
365 if (esp->conf.ivlen) { 382 if (esp->conf.ivlen) {
366 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 383 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
367 if (unlikely(esp->conf.ivec == NULL)) 384 if (unlikely(esp->conf.ivec == NULL))
368 goto error; 385 goto error;
369 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 386 esp->conf.ivinitted = 0;
370 } 387 }
371 if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 388 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
372 goto error; 389 goto error;
373 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 390 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
374 if (x->props.mode) 391 if (x->props.mode == XFRM_MODE_TUNNEL)
375 x->props.header_len += sizeof(struct iphdr); 392 x->props.header_len += sizeof(struct iphdr);
376 if (x->encap) { 393 if (x->encap) {
377 struct xfrm_encap_tmpl *encap = x->encap; 394 struct xfrm_encap_tmpl *encap = x->encap;