aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/ah4.c36
-rw-r--r--net/ipv4/esp4.c85
-rw-r--r--net/ipv4/ipcomp.c25
4 files changed, 88 insertions, 59 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8514106761b0..3b5d504a74be 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -386,6 +386,7 @@ config INET_ESP
386 select CRYPTO 386 select CRYPTO
387 select CRYPTO_HMAC 387 select CRYPTO_HMAC
388 select CRYPTO_MD5 388 select CRYPTO_MD5
389 select CRYPTO_CBC
389 select CRYPTO_SHA1 390 select CRYPTO_SHA1
390 select CRYPTO_DES 391 select CRYPTO_DES
391 ---help--- 392 ---help---
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 1366bc6ce6a5..2b98943e6b02 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include <linux/module.h> 2#include <linux/module.h>
2#include <net/ip.h> 3#include <net/ip.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
@@ -97,7 +98,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
97 ah->spi = x->id.spi; 98 ah->spi = x->id.spi;
98 ah->seq_no = htonl(++x->replay.oseq); 99 ah->seq_no = htonl(++x->replay.oseq);
99 xfrm_aevent_doreplay(x); 100 xfrm_aevent_doreplay(x);
100 ahp->icv(ahp, skb, ah->auth_data); 101 err = ah_mac_digest(ahp, skb, ah->auth_data);
102 if (err)
103 goto error;
104 memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
101 105
102 top_iph->tos = iph->tos; 106 top_iph->tos = iph->tos;
103 top_iph->ttl = iph->ttl; 107 top_iph->ttl = iph->ttl;
@@ -119,6 +123,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
119{ 123{
120 int ah_hlen; 124 int ah_hlen;
121 int ihl; 125 int ihl;
126 int err = -EINVAL;
122 struct iphdr *iph; 127 struct iphdr *iph;
123 struct ip_auth_hdr *ah; 128 struct ip_auth_hdr *ah;
124 struct ah_data *ahp; 129 struct ah_data *ahp;
@@ -166,8 +171,11 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
166 171
167 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 172 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
168 skb_push(skb, ihl); 173 skb_push(skb, ihl);
169 ahp->icv(ahp, skb, ah->auth_data); 174 err = ah_mac_digest(ahp, skb, ah->auth_data);
170 if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 175 if (err)
176 goto out;
177 err = -EINVAL;
178 if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
171 x->stats.integrity_failed++; 179 x->stats.integrity_failed++;
172 goto out; 180 goto out;
173 } 181 }
@@ -179,7 +187,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
179 return 0; 187 return 0;
180 188
181out: 189out:
182 return -EINVAL; 190 return err;
183} 191}
184 192
185static void ah4_err(struct sk_buff *skb, u32 info) 193static void ah4_err(struct sk_buff *skb, u32 info)
@@ -204,6 +212,7 @@ static int ah_init_state(struct xfrm_state *x)
204{ 212{
205 struct ah_data *ahp = NULL; 213 struct ah_data *ahp = NULL;
206 struct xfrm_algo_desc *aalg_desc; 214 struct xfrm_algo_desc *aalg_desc;
215 struct crypto_hash *tfm;
207 216
208 if (!x->aalg) 217 if (!x->aalg)
209 goto error; 218 goto error;
@@ -221,24 +230,27 @@ static int ah_init_state(struct xfrm_state *x)
221 230
222 ahp->key = x->aalg->alg_key; 231 ahp->key = x->aalg->alg_key;
223 ahp->key_len = (x->aalg->alg_key_len+7)/8; 232 ahp->key_len = (x->aalg->alg_key_len+7)/8;
224 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 233 tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
225 if (!ahp->tfm) 234 if (IS_ERR(tfm))
235 goto error;
236
237 ahp->tfm = tfm;
238 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
226 goto error; 239 goto error;
227 ahp->icv = ah_hmac_digest;
228 240
229 /* 241 /*
230 * Lookup the algorithm description maintained by xfrm_algo, 242 * Lookup the algorithm description maintained by xfrm_algo,
231 * verify crypto transform properties, and store information 243 * verify crypto transform properties, and store information
232 * we need for AH processing. This lookup cannot fail here 244 * we need for AH processing. This lookup cannot fail here
233 * after a successful crypto_alloc_tfm(). 245 * after a successful crypto_alloc_hash().
234 */ 246 */
235 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 247 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
236 BUG_ON(!aalg_desc); 248 BUG_ON(!aalg_desc);
237 249
238 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 250 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
239 crypto_tfm_alg_digestsize(ahp->tfm)) { 251 crypto_hash_digestsize(tfm)) {
240 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 252 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
241 x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 253 x->aalg->alg_name, crypto_hash_digestsize(tfm),
242 aalg_desc->uinfo.auth.icv_fullbits/8); 254 aalg_desc->uinfo.auth.icv_fullbits/8);
243 goto error; 255 goto error;
244 } 256 }
@@ -262,7 +274,7 @@ static int ah_init_state(struct xfrm_state *x)
262error: 274error:
263 if (ahp) { 275 if (ahp) {
264 kfree(ahp->work_icv); 276 kfree(ahp->work_icv);
265 crypto_free_tfm(ahp->tfm); 277 crypto_free_hash(ahp->tfm);
266 kfree(ahp); 278 kfree(ahp);
267 } 279 }
268 return -EINVAL; 280 return -EINVAL;
@@ -277,7 +289,7 @@ static void ah_destroy(struct xfrm_state *x)
277 289
278 kfree(ahp->work_icv); 290 kfree(ahp->work_icv);
279 ahp->work_icv = NULL; 291 ahp->work_icv = NULL;
280 crypto_free_tfm(ahp->tfm); 292 crypto_free_hash(ahp->tfm);
281 ahp->tfm = NULL; 293 ahp->tfm = NULL;
282 kfree(ahp); 294 kfree(ahp);
283} 295}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index fc2f8ce441de..b428489f6ccd 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include <linux/module.h> 2#include <linux/module.h>
2#include <net/ip.h> 3#include <net/ip.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
@@ -16,7 +17,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
16 int err; 17 int err;
17 struct iphdr *top_iph; 18 struct iphdr *top_iph;
18 struct ip_esp_hdr *esph; 19 struct ip_esp_hdr *esph;
19 struct crypto_tfm *tfm; 20 struct crypto_blkcipher *tfm;
21 struct blkcipher_desc desc;
20 struct esp_data *esp; 22 struct esp_data *esp;
21 struct sk_buff *trailer; 23 struct sk_buff *trailer;
22 int blksize; 24 int blksize;
@@ -36,7 +38,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
36 esp = x->data; 38 esp = x->data;
37 alen = esp->auth.icv_trunc_len; 39 alen = esp->auth.icv_trunc_len;
38 tfm = esp->conf.tfm; 40 tfm = esp->conf.tfm;
39 blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 41 desc.tfm = tfm;
42 desc.flags = 0;
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
40 clen = ALIGN(clen + 2, blksize); 44 clen = ALIGN(clen + 2, blksize);
41 if (esp->conf.padlen) 45 if (esp->conf.padlen)
42 clen = ALIGN(clen, esp->conf.padlen); 46 clen = ALIGN(clen, esp->conf.padlen);
@@ -92,7 +96,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
92 xfrm_aevent_doreplay(x); 96 xfrm_aevent_doreplay(x);
93 97
94 if (esp->conf.ivlen) 98 if (esp->conf.ivlen)
95 crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 99 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
96 100
97 do { 101 do {
98 struct scatterlist *sg = &esp->sgbuf[0]; 102 struct scatterlist *sg = &esp->sgbuf[0];
@@ -103,26 +107,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
103 goto error; 107 goto error;
104 } 108 }
105 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 109 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
106 crypto_cipher_encrypt(tfm, sg, sg, clen); 110 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
107 if (unlikely(sg != &esp->sgbuf[0])) 111 if (unlikely(sg != &esp->sgbuf[0]))
108 kfree(sg); 112 kfree(sg);
109 } while (0); 113 } while (0);
110 114
115 if (unlikely(err))
116 goto error;
117
111 if (esp->conf.ivlen) { 118 if (esp->conf.ivlen) {
112 memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 119 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
113 crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 120 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
114 } 121 }
115 122
116 if (esp->auth.icv_full_len) { 123 if (esp->auth.icv_full_len) {
117 esp->auth.icv(esp, skb, (u8*)esph-skb->data, 124 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
118 sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 125 sizeof(*esph) + esp->conf.ivlen + clen);
119 pskb_put(skb, trailer, alen); 126 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
120 } 127 }
121 128
122 ip_send_check(top_iph); 129 ip_send_check(top_iph);
123 130
124 err = 0;
125
126error: 131error:
127 return err; 132 return err;
128} 133}
@@ -137,8 +142,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
137 struct iphdr *iph; 142 struct iphdr *iph;
138 struct ip_esp_hdr *esph; 143 struct ip_esp_hdr *esph;
139 struct esp_data *esp = x->data; 144 struct esp_data *esp = x->data;
145 struct crypto_blkcipher *tfm = esp->conf.tfm;
146 struct blkcipher_desc desc = { .tfm = tfm };
140 struct sk_buff *trailer; 147 struct sk_buff *trailer;
141 int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 148 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
142 int alen = esp->auth.icv_trunc_len; 149 int alen = esp->auth.icv_trunc_len;
143 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 150 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
144 int nfrags; 151 int nfrags;
@@ -146,6 +153,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
146 u8 nexthdr[2]; 153 u8 nexthdr[2];
147 struct scatterlist *sg; 154 struct scatterlist *sg;
148 int padlen; 155 int padlen;
156 int err;
149 157
150 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) 158 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
151 goto out; 159 goto out;
@@ -155,15 +163,16 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
155 163
156 /* If integrity check is required, do this. */ 164 /* If integrity check is required, do this. */
157 if (esp->auth.icv_full_len) { 165 if (esp->auth.icv_full_len) {
158 u8 sum[esp->auth.icv_full_len]; 166 u8 sum[alen];
159 u8 sum1[alen];
160
161 esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
162 167
163 if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 168 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
169 if (err)
170 goto out;
171
172 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
164 BUG(); 173 BUG();
165 174
166 if (unlikely(memcmp(sum, sum1, alen))) { 175 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
167 x->stats.integrity_failed++; 176 x->stats.integrity_failed++;
168 goto out; 177 goto out;
169 } 178 }
@@ -178,7 +187,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
178 187
179 /* Get ivec. This can be wrong, check against another impls. */ 188 /* Get ivec. This can be wrong, check against another impls. */
180 if (esp->conf.ivlen) 189 if (esp->conf.ivlen)
181 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 190 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
182 191
183 sg = &esp->sgbuf[0]; 192 sg = &esp->sgbuf[0];
184 193
@@ -188,9 +197,11 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
188 goto out; 197 goto out;
189 } 198 }
190 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); 199 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
191 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 200 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
192 if (unlikely(sg != &esp->sgbuf[0])) 201 if (unlikely(sg != &esp->sgbuf[0]))
193 kfree(sg); 202 kfree(sg);
203 if (unlikely(err))
204 return err;
194 205
195 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 206 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
196 BUG(); 207 BUG();
@@ -254,7 +265,7 @@ out:
254static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 265static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
255{ 266{
256 struct esp_data *esp = x->data; 267 struct esp_data *esp = x->data;
257 u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 268 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
258 269
259 if (x->props.mode) { 270 if (x->props.mode) {
260 mtu = ALIGN(mtu + 2, blksize); 271 mtu = ALIGN(mtu + 2, blksize);
@@ -293,11 +304,11 @@ static void esp_destroy(struct xfrm_state *x)
293 if (!esp) 304 if (!esp)
294 return; 305 return;
295 306
296 crypto_free_tfm(esp->conf.tfm); 307 crypto_free_blkcipher(esp->conf.tfm);
297 esp->conf.tfm = NULL; 308 esp->conf.tfm = NULL;
298 kfree(esp->conf.ivec); 309 kfree(esp->conf.ivec);
299 esp->conf.ivec = NULL; 310 esp->conf.ivec = NULL;
300 crypto_free_tfm(esp->auth.tfm); 311 crypto_free_hash(esp->auth.tfm);
301 esp->auth.tfm = NULL; 312 esp->auth.tfm = NULL;
302 kfree(esp->auth.work_icv); 313 kfree(esp->auth.work_icv);
303 esp->auth.work_icv = NULL; 314 esp->auth.work_icv = NULL;
@@ -307,6 +318,7 @@ static void esp_destroy(struct xfrm_state *x)
307static int esp_init_state(struct xfrm_state *x) 318static int esp_init_state(struct xfrm_state *x)
308{ 319{
309 struct esp_data *esp = NULL; 320 struct esp_data *esp = NULL;
321 struct crypto_blkcipher *tfm;
310 322
311 /* null auth and encryption can have zero length keys */ 323 /* null auth and encryption can have zero length keys */
312 if (x->aalg) { 324 if (x->aalg) {
@@ -322,22 +334,27 @@ static int esp_init_state(struct xfrm_state *x)
322 334
323 if (x->aalg) { 335 if (x->aalg) {
324 struct xfrm_algo_desc *aalg_desc; 336 struct xfrm_algo_desc *aalg_desc;
337 struct crypto_hash *hash;
325 338
326 esp->auth.key = x->aalg->alg_key; 339 esp->auth.key = x->aalg->alg_key;
327 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 340 esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
328 esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 341 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
329 if (esp->auth.tfm == NULL) 342 CRYPTO_ALG_ASYNC);
343 if (IS_ERR(hash))
344 goto error;
345
346 esp->auth.tfm = hash;
347 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
330 goto error; 348 goto error;
331 esp->auth.icv = esp_hmac_digest;
332 349
333 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 350 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
334 BUG_ON(!aalg_desc); 351 BUG_ON(!aalg_desc);
335 352
336 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 353 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
337 crypto_tfm_alg_digestsize(esp->auth.tfm)) { 354 crypto_hash_digestsize(hash)) {
338 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 355 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
339 x->aalg->alg_name, 356 x->aalg->alg_name,
340 crypto_tfm_alg_digestsize(esp->auth.tfm), 357 crypto_hash_digestsize(hash),
341 aalg_desc->uinfo.auth.icv_fullbits/8); 358 aalg_desc->uinfo.auth.icv_fullbits/8);
342 goto error; 359 goto error;
343 } 360 }
@@ -351,13 +368,11 @@ static int esp_init_state(struct xfrm_state *x)
351 } 368 }
352 esp->conf.key = x->ealg->alg_key; 369 esp->conf.key = x->ealg->alg_key;
353 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 370 esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
354 if (x->props.ealgo == SADB_EALG_NULL) 371 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
355 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 372 if (IS_ERR(tfm))
356 else
357 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
358 if (esp->conf.tfm == NULL)
359 goto error; 373 goto error;
360 esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 374 esp->conf.tfm = tfm;
375 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
361 esp->conf.padlen = 0; 376 esp->conf.padlen = 0;
362 if (esp->conf.ivlen) { 377 if (esp->conf.ivlen) {
363 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 378 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@@ -365,7 +380,7 @@ static int esp_init_state(struct xfrm_state *x)
365 goto error; 380 goto error;
366 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 381 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
367 } 382 }
368 if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 383 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
369 goto error; 384 goto error;
370 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 385 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
371 if (x->props.mode) 386 if (x->props.mode)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index a0c28b2b756e..5bb9c9f03fb6 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -32,7 +32,7 @@
32 32
33struct ipcomp_tfms { 33struct ipcomp_tfms {
34 struct list_head list; 34 struct list_head list;
35 struct crypto_tfm **tfms; 35 struct crypto_comp **tfms;
36 int users; 36 int users;
37}; 37};
38 38
@@ -46,7 +46,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
46 int err, plen, dlen; 46 int err, plen, dlen;
47 struct ipcomp_data *ipcd = x->data; 47 struct ipcomp_data *ipcd = x->data;
48 u8 *start, *scratch; 48 u8 *start, *scratch;
49 struct crypto_tfm *tfm; 49 struct crypto_comp *tfm;
50 int cpu; 50 int cpu;
51 51
52 plen = skb->len; 52 plen = skb->len;
@@ -107,7 +107,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
107 struct iphdr *iph = skb->nh.iph; 107 struct iphdr *iph = skb->nh.iph;
108 struct ipcomp_data *ipcd = x->data; 108 struct ipcomp_data *ipcd = x->data;
109 u8 *start, *scratch; 109 u8 *start, *scratch;
110 struct crypto_tfm *tfm; 110 struct crypto_comp *tfm;
111 int cpu; 111 int cpu;
112 112
113 ihlen = iph->ihl * 4; 113 ihlen = iph->ihl * 4;
@@ -302,7 +302,7 @@ static void **ipcomp_alloc_scratches(void)
302 return scratches; 302 return scratches;
303} 303}
304 304
305static void ipcomp_free_tfms(struct crypto_tfm **tfms) 305static void ipcomp_free_tfms(struct crypto_comp **tfms)
306{ 306{
307 struct ipcomp_tfms *pos; 307 struct ipcomp_tfms *pos;
308 int cpu; 308 int cpu;
@@ -324,28 +324,28 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
324 return; 324 return;
325 325
326 for_each_possible_cpu(cpu) { 326 for_each_possible_cpu(cpu) {
327 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 327 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
328 crypto_free_tfm(tfm); 328 crypto_free_comp(tfm);
329 } 329 }
330 free_percpu(tfms); 330 free_percpu(tfms);
331} 331}
332 332
333static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) 333static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
334{ 334{
335 struct ipcomp_tfms *pos; 335 struct ipcomp_tfms *pos;
336 struct crypto_tfm **tfms; 336 struct crypto_comp **tfms;
337 int cpu; 337 int cpu;
338 338
339 /* This can be any valid CPU ID so we don't need locking. */ 339 /* This can be any valid CPU ID so we don't need locking. */
340 cpu = raw_smp_processor_id(); 340 cpu = raw_smp_processor_id();
341 341
342 list_for_each_entry(pos, &ipcomp_tfms_list, list) { 342 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
343 struct crypto_tfm *tfm; 343 struct crypto_comp *tfm;
344 344
345 tfms = pos->tfms; 345 tfms = pos->tfms;
346 tfm = *per_cpu_ptr(tfms, cpu); 346 tfm = *per_cpu_ptr(tfms, cpu);
347 347
348 if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 348 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
349 pos->users++; 349 pos->users++;
350 return tfms; 350 return tfms;
351 } 351 }
@@ -359,12 +359,13 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
359 INIT_LIST_HEAD(&pos->list); 359 INIT_LIST_HEAD(&pos->list);
360 list_add(&pos->list, &ipcomp_tfms_list); 360 list_add(&pos->list, &ipcomp_tfms_list);
361 361
362 pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 362 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
363 if (!tfms) 363 if (!tfms)
364 goto error; 364 goto error;
365 365
366 for_each_possible_cpu(cpu) { 366 for_each_possible_cpu(cpu) {
367 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 367 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
368 CRYPTO_ALG_ASYNC);
368 if (!tfm) 369 if (!tfm)
369 goto error; 370 goto error;
370 *per_cpu_ptr(tfms, cpu) = tfm; 371 *per_cpu_ptr(tfms, cpu) = tfm;