aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-22 20:10:23 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-22 20:10:23 -0400
commit28eb177dfa5982d132edceed891cb3885df258bb (patch)
tree5f8fdc37ad1d8d0793e9c47da7d908b97c814ffb /net
parentfd8ae94eea9bb4269d6dff1b47b9dc741bd70d0b (diff)
parentdb392219c5f572610645696e3672f6ea38783a65 (diff)
Merge branch 'master' into upstream
Conflicts: net/ieee80211/ieee80211_crypt_tkip.c net/ieee80211/ieee80211_crypt_wep.c
Diffstat (limited to 'net')
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c32
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c100
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c35
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/ah4.c36
-rw-r--r--net/ipv4/esp4.c85
-rw-r--r--net/ipv4/ipcomp.c25
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/ah6.c35
-rw-r--r--net/ipv6/esp6.c90
-rw-r--r--net/ipv6/ipcomp6.c25
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/sm_make_chunk.c37
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c95
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c24
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c29
-rw-r--r--net/xfrm/xfrm_algo.c94
-rw-r--r--net/xfrm/xfrm_user.c2
21 files changed, 443 insertions, 319 deletions
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 098c66846339..35aa3426c3fa 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -9,6 +9,7 @@
9 * more details. 9 * more details.
10 */ 10 */
11 11
12#include <linux/err.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -48,7 +49,7 @@ struct ieee80211_ccmp_data {
48 49
49 int key_idx; 50 int key_idx;
50 51
51 struct crypto_tfm *tfm; 52 struct crypto_cipher *tfm;
52 53
53 /* scratch buffers for virt_to_page() (crypto API) */ 54 /* scratch buffers for virt_to_page() (crypto API) */
54 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], 55 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
@@ -56,20 +57,10 @@ struct ieee80211_ccmp_data {
56 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; 57 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
57}; 58};
58 59
59static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, 60static inline void ieee80211_ccmp_aes_encrypt(struct crypto_cipher *tfm,
60 const u8 pt[16], u8 ct[16]) 61 const u8 pt[16], u8 ct[16])
61{ 62{
62 struct scatterlist src, dst; 63 crypto_cipher_encrypt_one(tfm, ct, pt);
63
64 src.page = virt_to_page(pt);
65 src.offset = offset_in_page(pt);
66 src.length = AES_BLOCK_LEN;
67
68 dst.page = virt_to_page(ct);
69 dst.offset = offset_in_page(ct);
70 dst.length = AES_BLOCK_LEN;
71
72 crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN);
73} 64}
74 65
75static void *ieee80211_ccmp_init(int key_idx) 66static void *ieee80211_ccmp_init(int key_idx)
@@ -81,10 +72,11 @@ static void *ieee80211_ccmp_init(int key_idx)
81 goto fail; 72 goto fail;
82 priv->key_idx = key_idx; 73 priv->key_idx = key_idx;
83 74
84 priv->tfm = crypto_alloc_tfm("aes", 0); 75 priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
85 if (priv->tfm == NULL) { 76 if (IS_ERR(priv->tfm)) {
86 printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate " 77 printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
87 "crypto API aes\n"); 78 "crypto API aes\n");
79 priv->tfm = NULL;
88 goto fail; 80 goto fail;
89 } 81 }
90 82
@@ -93,7 +85,7 @@ static void *ieee80211_ccmp_init(int key_idx)
93 fail: 85 fail:
94 if (priv) { 86 if (priv) {
95 if (priv->tfm) 87 if (priv->tfm)
96 crypto_free_tfm(priv->tfm); 88 crypto_free_cipher(priv->tfm);
97 kfree(priv); 89 kfree(priv);
98 } 90 }
99 91
@@ -104,7 +96,7 @@ static void ieee80211_ccmp_deinit(void *priv)
104{ 96{
105 struct ieee80211_ccmp_data *_priv = priv; 97 struct ieee80211_ccmp_data *_priv = priv;
106 if (_priv && _priv->tfm) 98 if (_priv && _priv->tfm)
107 crypto_free_tfm(_priv->tfm); 99 crypto_free_cipher(_priv->tfm);
108 kfree(priv); 100 kfree(priv);
109} 101}
110 102
@@ -115,7 +107,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len)
115 b[i] ^= a[i]; 107 b[i] ^= a[i];
116} 108}
117 109
118static void ccmp_init_blocks(struct crypto_tfm *tfm, 110static void ccmp_init_blocks(struct crypto_cipher *tfm,
119 struct ieee80211_hdr_4addr *hdr, 111 struct ieee80211_hdr_4addr *hdr,
120 u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) 112 u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0)
121{ 113{
@@ -398,7 +390,7 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv)
398{ 390{
399 struct ieee80211_ccmp_data *data = priv; 391 struct ieee80211_ccmp_data *data = priv;
400 int keyidx; 392 int keyidx;
401 struct crypto_tfm *tfm = data->tfm; 393 struct crypto_cipher *tfm = data->tfm;
402 394
403 keyidx = data->key_idx; 395 keyidx = data->key_idx;
404 memset(data, 0, sizeof(*data)); 396 memset(data, 0, sizeof(*data));
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index f2df2f5b3e4c..259572dfd4f1 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -9,6 +9,7 @@
9 * more details. 9 * more details.
10 */ 10 */
11 11
12#include <linux/err.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -52,10 +53,10 @@ struct ieee80211_tkip_data {
52 53
53 int key_idx; 54 int key_idx;
54 55
55 struct crypto_tfm *tx_tfm_arc4; 56 struct crypto_blkcipher *rx_tfm_arc4;
56 struct crypto_tfm *tx_tfm_michael; 57 struct crypto_hash *rx_tfm_michael;
57 struct crypto_tfm *rx_tfm_arc4; 58 struct crypto_blkcipher *tx_tfm_arc4;
58 struct crypto_tfm *rx_tfm_michael; 59 struct crypto_hash *tx_tfm_michael;
59 60
60 /* scratch buffers for virt_to_page() (crypto API) */ 61 /* scratch buffers for virt_to_page() (crypto API) */
61 u8 rx_hdr[16], tx_hdr[16]; 62 u8 rx_hdr[16], tx_hdr[16];
@@ -87,31 +88,37 @@ static void *ieee80211_tkip_init(int key_idx)
87 88
88 priv->key_idx = key_idx; 89 priv->key_idx = key_idx;
89 90
90 priv->tx_tfm_arc4 = crypto_alloc_tfm("arc4", 0); 91 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
91 if (priv->tx_tfm_arc4 == NULL) { 92 CRYPTO_ALG_ASYNC);
93 if (IS_ERR(priv->tx_tfm_arc4)) {
92 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 94 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
93 "crypto API arc4\n"); 95 "crypto API arc4\n");
96 priv->tfm_arc4 = NULL;
94 goto fail; 97 goto fail;
95 } 98 }
96 99
97 priv->tx_tfm_michael = crypto_alloc_tfm("michael_mic", 0); 100 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
98 if (priv->tx_tfm_michael == NULL) { 101 CRYPTO_ALG_ASYNC);
102 if (IS_ERR(priv->tx_tfm_michael)) {
99 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 103 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
100 "crypto API michael_mic\n"); 104 "crypto API michael_mic\n");
101 goto fail; 105 goto fail;
102 } 106 }
103 107
104 priv->rx_tfm_arc4 = crypto_alloc_tfm("arc4", 0); 108 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
105 if (priv->rx_tfm_arc4 == NULL) { 109 CRYPTO_ALG_ASYNC);
110 if (IS_ERR(priv->rx_tfm_arc4)) {
106 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 111 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
107 "crypto API arc4\n"); 112 "crypto API arc4\n");
108 goto fail; 113 goto fail;
109 } 114 }
110 115
111 priv->rx_tfm_michael = crypto_alloc_tfm("michael_mic", 0); 116 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
112 if (priv->rx_tfm_michael == NULL) { 117 CRYPTO_ALG_ASYNC);
118 if (IS_ERR(priv->rx_tfm_michael)) {
113 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 119 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
114 "crypto API michael_mic\n"); 120 "crypto API michael_mic\n");
121 priv->tfm_michael = NULL;
115 goto fail; 122 goto fail;
116 } 123 }
117 124
@@ -120,13 +127,13 @@ static void *ieee80211_tkip_init(int key_idx)
120 fail: 127 fail:
121 if (priv) { 128 if (priv) {
122 if (priv->tx_tfm_michael) 129 if (priv->tx_tfm_michael)
123 crypto_free_tfm(priv->tx_tfm_michael); 130 crypto_free_hash(priv->tx_tfm_michael);
124 if (priv->tx_tfm_arc4) 131 if (priv->tx_tfm_arc4)
125 crypto_free_tfm(priv->tx_tfm_arc4); 132 crypto_free_blkcipher(priv->tx_tfm_arc4);
126 if (priv->rx_tfm_michael) 133 if (priv->rx_tfm_michael)
127 crypto_free_tfm(priv->rx_tfm_michael); 134 crypto_free_hash(priv->rx_tfm_michael);
128 if (priv->rx_tfm_arc4) 135 if (priv->rx_tfm_arc4)
129 crypto_free_tfm(priv->rx_tfm_arc4); 136 crypto_free_blkcipher(priv->rx_tfm_arc4);
130 kfree(priv); 137 kfree(priv);
131 } 138 }
132 139
@@ -138,13 +145,13 @@ static void ieee80211_tkip_deinit(void *priv)
138 struct ieee80211_tkip_data *_priv = priv; 145 struct ieee80211_tkip_data *_priv = priv;
139 if (_priv) { 146 if (_priv) {
140 if (_priv->tx_tfm_michael) 147 if (_priv->tx_tfm_michael)
141 crypto_free_tfm(_priv->tx_tfm_michael); 148 crypto_free_hash(_priv->tx_tfm_michael);
142 if (_priv->tx_tfm_arc4) 149 if (_priv->tx_tfm_arc4)
143 crypto_free_tfm(_priv->tx_tfm_arc4); 150 crypto_free_blkcipher(_priv->tx_tfm_arc4);
144 if (_priv->rx_tfm_michael) 151 if (_priv->rx_tfm_michael)
145 crypto_free_tfm(_priv->rx_tfm_michael); 152 crypto_free_hash(_priv->rx_tfm_michael);
146 if (_priv->rx_tfm_arc4) 153 if (_priv->rx_tfm_arc4)
147 crypto_free_tfm(_priv->rx_tfm_arc4); 154 crypto_free_blkcipher(_priv->rx_tfm_arc4);
148 } 155 }
149 kfree(priv); 156 kfree(priv);
150} 157}
@@ -344,6 +351,7 @@ static int ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
344static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 351static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
345{ 352{
346 struct ieee80211_tkip_data *tkey = priv; 353 struct ieee80211_tkip_data *tkey = priv;
354 struct blkcipher_desc desc = { .tfm = tkey->tx_tfm_arc4 };
347 int len; 355 int len;
348 u8 rc4key[16], *pos, *icv; 356 u8 rc4key[16], *pos, *icv;
349 u32 crc; 357 u32 crc;
@@ -377,31 +385,17 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
377 icv[2] = crc >> 16; 385 icv[2] = crc >> 16;
378 icv[3] = crc >> 24; 386 icv[3] = crc >> 24;
379 387
380 crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); 388 crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
381 sg.page = virt_to_page(pos); 389 sg.page = virt_to_page(pos);
382 sg.offset = offset_in_page(pos); 390 sg.offset = offset_in_page(pos);
383 sg.length = len + 4; 391 sg.length = len + 4;
384 crypto_cipher_encrypt(tkey->tx_tfm_arc4, &sg, &sg, len + 4); 392 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
385
386 return 0;
387}
388
389/*
390 * deal with seq counter wrapping correctly.
391 * refer to timer_after() for jiffies wrapping handling
392 */
393static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
394 u32 iv32_o, u16 iv16_o)
395{
396 if ((s32)iv32_n - (s32)iv32_o < 0 ||
397 (iv32_n == iv32_o && iv16_n <= iv16_o))
398 return 1;
399 return 0;
400} 393}
401 394
402static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) 395static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
403{ 396{
404 struct ieee80211_tkip_data *tkey = priv; 397 struct ieee80211_tkip_data *tkey = priv;
398 struct blkcipher_desc desc = { .tfm = tkey->rx_tfm_arc4 };
405 u8 rc4key[16]; 399 u8 rc4key[16];
406 u8 keyidx, *pos; 400 u8 keyidx, *pos;
407 u32 iv32; 401 u32 iv32;
@@ -472,11 +466,18 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
472 466
473 plen = skb->len - hdr_len - 12; 467 plen = skb->len - hdr_len - 12;
474 468
475 crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); 469 crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
476 sg.page = virt_to_page(pos); 470 sg.page = virt_to_page(pos);
477 sg.offset = offset_in_page(pos); 471 sg.offset = offset_in_page(pos);
478 sg.length = plen + 4; 472 sg.length = plen + 4;
479 crypto_cipher_decrypt(tkey->rx_tfm_arc4, &sg, &sg, plen + 4); 473 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
474 if (net_ratelimit()) {
475 printk(KERN_DEBUG ": TKIP: failed to decrypt "
476 "received packet from " MAC_FMT "\n",
477 MAC_ARG(hdr->addr2));
478 }
479 return -7;
480 }
480 481
481 crc = ~crc32_le(~0, pos, plen); 482 crc = ~crc32_le(~0, pos, plen);
482 icv[0] = crc; 483 icv[0] = crc;
@@ -510,9 +511,10 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
510 return keyidx; 511 return keyidx;
511} 512}
512 513
513static int michael_mic(struct crypto_tfm *tfm_michael, u8 * key, u8 * hdr, 514static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
514 u8 * data, size_t data_len, u8 * mic) 515 u8 * data, size_t data_len, u8 * mic)
515{ 516{
517 struct hash_desc desc;
516 struct scatterlist sg[2]; 518 struct scatterlist sg[2];
517 519
518 if (tfm_michael == NULL) { 520 if (tfm_michael == NULL) {
@@ -527,12 +529,12 @@ static int michael_mic(struct crypto_tfm *tfm_michael, u8 * key, u8 * hdr,
527 sg[1].offset = offset_in_page(data); 529 sg[1].offset = offset_in_page(data);
528 sg[1].length = data_len; 530 sg[1].length = data_len;
529 531
530 crypto_digest_init(tfm_michael); 532 if (crypto_hash_setkey(tfm_michael, key, 8))
531 crypto_digest_setkey(tfm_michael, key, 8); 533 return -1;
532 crypto_digest_update(tfm_michael, sg, 2);
533 crypto_digest_final(tfm_michael, mic);
534 534
535 return 0; 535 desc.tfm = tfm_michael;
536 desc.flags = 0;
537 return crypto_hash_digest(&desc, sg, data_len + 16, mic);
536} 538}
537 539
538static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) 540static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
@@ -656,10 +658,10 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
656{ 658{
657 struct ieee80211_tkip_data *tkey = priv; 659 struct ieee80211_tkip_data *tkey = priv;
658 int keyidx; 660 int keyidx;
659 struct crypto_tfm *tfm = tkey->tx_tfm_michael; 661 struct crypto_hash *tfm = tkey->tx_tfm_michael;
660 struct crypto_tfm *tfm2 = tkey->tx_tfm_arc4; 662 struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
661 struct crypto_tfm *tfm3 = tkey->rx_tfm_michael; 663 struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
662 struct crypto_tfm *tfm4 = tkey->rx_tfm_arc4; 664 struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
663 665
664 keyidx = tkey->key_idx; 666 keyidx = tkey->key_idx;
665 memset(tkey, 0, sizeof(*tkey)); 667 memset(tkey, 0, sizeof(*tkey));
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index b435b28857ed..9eeec13c28b0 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -9,6 +9,7 @@
9 * more details. 9 * more details.
10 */ 10 */
11 11
12#include <linux/err.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -32,8 +33,8 @@ struct prism2_wep_data {
32 u8 key[WEP_KEY_LEN + 1]; 33 u8 key[WEP_KEY_LEN + 1];
33 u8 key_len; 34 u8 key_len;
34 u8 key_idx; 35 u8 key_idx;
35 struct crypto_tfm *tx_tfm; 36 struct crypto_blkcipher *tx_tfm;
36 struct crypto_tfm *rx_tfm; 37 struct crypto_blkcipher *rx_tfm;
37}; 38};
38 39
39static void *prism2_wep_init(int keyidx) 40static void *prism2_wep_init(int keyidx)
@@ -45,15 +46,16 @@ static void *prism2_wep_init(int keyidx)
45 goto fail; 46 goto fail;
46 priv->key_idx = keyidx; 47 priv->key_idx = keyidx;
47 48
48 priv->tx_tfm = crypto_alloc_tfm("arc4", 0); 49 priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
49 if (priv->tx_tfm == NULL) { 50 if (IS_ERR(priv->tx_tfm)) {
50 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " 51 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
51 "crypto API arc4\n"); 52 "crypto API arc4\n");
53 priv->tfm = NULL;
52 goto fail; 54 goto fail;
53 } 55 }
54 56
55 priv->rx_tfm = crypto_alloc_tfm("arc4", 0); 57 priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
56 if (priv->rx_tfm == NULL) { 58 if (IS_ERR(priv->rx_tfm)) {
57 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " 59 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
58 "crypto API arc4\n"); 60 "crypto API arc4\n");
59 goto fail; 61 goto fail;
@@ -66,9 +68,9 @@ static void *prism2_wep_init(int keyidx)
66 fail: 68 fail:
67 if (priv) { 69 if (priv) {
68 if (priv->tx_tfm) 70 if (priv->tx_tfm)
69 crypto_free_tfm(priv->tx_tfm); 71 crypto_free_blkcipher(priv->tx_tfm);
70 if (priv->rx_tfm) 72 if (priv->rx_tfm)
71 crypto_free_tfm(priv->rx_tfm); 73 crypto_free_blkcipher(priv->rx_tfm);
72 kfree(priv); 74 kfree(priv);
73 } 75 }
74 return NULL; 76 return NULL;
@@ -79,9 +81,9 @@ static void prism2_wep_deinit(void *priv)
79 struct prism2_wep_data *_priv = priv; 81 struct prism2_wep_data *_priv = priv;
80 if (_priv) { 82 if (_priv) {
81 if (_priv->tx_tfm) 83 if (_priv->tx_tfm)
82 crypto_free_tfm(_priv->tx_tfm); 84 crypto_free_blkcipher(_priv->tx_tfm);
83 if (_priv->rx_tfm) 85 if (_priv->rx_tfm)
84 crypto_free_tfm(_priv->rx_tfm); 86 crypto_free_blkcipher(_priv->rx_tfm);
85 } 87 }
86 kfree(priv); 88 kfree(priv);
87} 89}
@@ -133,6 +135,7 @@ static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len,
133static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 135static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
134{ 136{
135 struct prism2_wep_data *wep = priv; 137 struct prism2_wep_data *wep = priv;
138 struct blkcipher_desc desc = { .tfm = wep->tx_tfm };
136 u32 crc, klen, len; 139 u32 crc, klen, len;
137 u8 *pos, *icv; 140 u8 *pos, *icv;
138 struct scatterlist sg; 141 struct scatterlist sg;
@@ -164,13 +167,11 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
164 icv[2] = crc >> 16; 167 icv[2] = crc >> 16;
165 icv[3] = crc >> 24; 168 icv[3] = crc >> 24;
166 169
167 crypto_cipher_setkey(wep->tx_tfm, key, klen); 170 crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
168 sg.page = virt_to_page(pos); 171 sg.page = virt_to_page(pos);
169 sg.offset = offset_in_page(pos); 172 sg.offset = offset_in_page(pos);
170 sg.length = len + 4; 173 sg.length = len + 4;
171 crypto_cipher_encrypt(wep->tx_tfm, &sg, &sg, len + 4); 174 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
172
173 return 0;
174} 175}
175 176
176/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of 177/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
@@ -183,6 +184,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
183static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) 184static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
184{ 185{
185 struct prism2_wep_data *wep = priv; 186 struct prism2_wep_data *wep = priv;
187 struct blkcipher_desc desc = { .tfm = wep->rx_tfm };
186 u32 crc, klen, plen; 188 u32 crc, klen, plen;
187 u8 key[WEP_KEY_LEN + 3]; 189 u8 key[WEP_KEY_LEN + 3];
188 u8 keyidx, *pos, icv[4]; 190 u8 keyidx, *pos, icv[4];
@@ -207,11 +209,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
207 /* Apply RC4 to data and compute CRC32 over decrypted data */ 209 /* Apply RC4 to data and compute CRC32 over decrypted data */
208 plen = skb->len - hdr_len - 8; 210 plen = skb->len - hdr_len - 8;
209 211
210 crypto_cipher_setkey(wep->rx_tfm, key, klen); 212 crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
211 sg.page = virt_to_page(pos); 213 sg.page = virt_to_page(pos);
212 sg.offset = offset_in_page(pos); 214 sg.offset = offset_in_page(pos);
213 sg.length = plen + 4; 215 sg.length = plen + 4;
214 crypto_cipher_decrypt(wep->rx_tfm, &sg, &sg, plen + 4); 216 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
217 return -7;
215 218
216 crc = ~crc32_le(~0, pos, plen); 219 crc = ~crc32_le(~0, pos, plen);
217 icv[0] = crc; 220 icv[0] = crc;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8514106761b0..3b5d504a74be 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -386,6 +386,7 @@ config INET_ESP
386 select CRYPTO 386 select CRYPTO
387 select CRYPTO_HMAC 387 select CRYPTO_HMAC
388 select CRYPTO_MD5 388 select CRYPTO_MD5
389 select CRYPTO_CBC
389 select CRYPTO_SHA1 390 select CRYPTO_SHA1
390 select CRYPTO_DES 391 select CRYPTO_DES
391 ---help--- 392 ---help---
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 1366bc6ce6a5..2b98943e6b02 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include <linux/module.h> 2#include <linux/module.h>
2#include <net/ip.h> 3#include <net/ip.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
@@ -97,7 +98,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
97 ah->spi = x->id.spi; 98 ah->spi = x->id.spi;
98 ah->seq_no = htonl(++x->replay.oseq); 99 ah->seq_no = htonl(++x->replay.oseq);
99 xfrm_aevent_doreplay(x); 100 xfrm_aevent_doreplay(x);
100 ahp->icv(ahp, skb, ah->auth_data); 101 err = ah_mac_digest(ahp, skb, ah->auth_data);
102 if (err)
103 goto error;
104 memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
101 105
102 top_iph->tos = iph->tos; 106 top_iph->tos = iph->tos;
103 top_iph->ttl = iph->ttl; 107 top_iph->ttl = iph->ttl;
@@ -119,6 +123,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
119{ 123{
120 int ah_hlen; 124 int ah_hlen;
121 int ihl; 125 int ihl;
126 int err = -EINVAL;
122 struct iphdr *iph; 127 struct iphdr *iph;
123 struct ip_auth_hdr *ah; 128 struct ip_auth_hdr *ah;
124 struct ah_data *ahp; 129 struct ah_data *ahp;
@@ -166,8 +171,11 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
166 171
167 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 172 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
168 skb_push(skb, ihl); 173 skb_push(skb, ihl);
169 ahp->icv(ahp, skb, ah->auth_data); 174 err = ah_mac_digest(ahp, skb, ah->auth_data);
170 if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 175 if (err)
176 goto out;
177 err = -EINVAL;
178 if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
171 x->stats.integrity_failed++; 179 x->stats.integrity_failed++;
172 goto out; 180 goto out;
173 } 181 }
@@ -179,7 +187,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
179 return 0; 187 return 0;
180 188
181out: 189out:
182 return -EINVAL; 190 return err;
183} 191}
184 192
185static void ah4_err(struct sk_buff *skb, u32 info) 193static void ah4_err(struct sk_buff *skb, u32 info)
@@ -204,6 +212,7 @@ static int ah_init_state(struct xfrm_state *x)
204{ 212{
205 struct ah_data *ahp = NULL; 213 struct ah_data *ahp = NULL;
206 struct xfrm_algo_desc *aalg_desc; 214 struct xfrm_algo_desc *aalg_desc;
215 struct crypto_hash *tfm;
207 216
208 if (!x->aalg) 217 if (!x->aalg)
209 goto error; 218 goto error;
@@ -221,24 +230,27 @@ static int ah_init_state(struct xfrm_state *x)
221 230
222 ahp->key = x->aalg->alg_key; 231 ahp->key = x->aalg->alg_key;
223 ahp->key_len = (x->aalg->alg_key_len+7)/8; 232 ahp->key_len = (x->aalg->alg_key_len+7)/8;
224 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 233 tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
225 if (!ahp->tfm) 234 if (IS_ERR(tfm))
235 goto error;
236
237 ahp->tfm = tfm;
238 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
226 goto error; 239 goto error;
227 ahp->icv = ah_hmac_digest;
228 240
229 /* 241 /*
230 * Lookup the algorithm description maintained by xfrm_algo, 242 * Lookup the algorithm description maintained by xfrm_algo,
231 * verify crypto transform properties, and store information 243 * verify crypto transform properties, and store information
232 * we need for AH processing. This lookup cannot fail here 244 * we need for AH processing. This lookup cannot fail here
233 * after a successful crypto_alloc_tfm(). 245 * after a successful crypto_alloc_hash().
234 */ 246 */
235 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 247 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
236 BUG_ON(!aalg_desc); 248 BUG_ON(!aalg_desc);
237 249
238 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 250 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
239 crypto_tfm_alg_digestsize(ahp->tfm)) { 251 crypto_hash_digestsize(tfm)) {
240 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 252 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
241 x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 253 x->aalg->alg_name, crypto_hash_digestsize(tfm),
242 aalg_desc->uinfo.auth.icv_fullbits/8); 254 aalg_desc->uinfo.auth.icv_fullbits/8);
243 goto error; 255 goto error;
244 } 256 }
@@ -262,7 +274,7 @@ static int ah_init_state(struct xfrm_state *x)
262error: 274error:
263 if (ahp) { 275 if (ahp) {
264 kfree(ahp->work_icv); 276 kfree(ahp->work_icv);
265 crypto_free_tfm(ahp->tfm); 277 crypto_free_hash(ahp->tfm);
266 kfree(ahp); 278 kfree(ahp);
267 } 279 }
268 return -EINVAL; 280 return -EINVAL;
@@ -277,7 +289,7 @@ static void ah_destroy(struct xfrm_state *x)
277 289
278 kfree(ahp->work_icv); 290 kfree(ahp->work_icv);
279 ahp->work_icv = NULL; 291 ahp->work_icv = NULL;
280 crypto_free_tfm(ahp->tfm); 292 crypto_free_hash(ahp->tfm);
281 ahp->tfm = NULL; 293 ahp->tfm = NULL;
282 kfree(ahp); 294 kfree(ahp);
283} 295}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index fc2f8ce441de..b428489f6ccd 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,3 +1,4 @@
1#include <linux/err.h>
1#include <linux/module.h> 2#include <linux/module.h>
2#include <net/ip.h> 3#include <net/ip.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
@@ -16,7 +17,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
16 int err; 17 int err;
17 struct iphdr *top_iph; 18 struct iphdr *top_iph;
18 struct ip_esp_hdr *esph; 19 struct ip_esp_hdr *esph;
19 struct crypto_tfm *tfm; 20 struct crypto_blkcipher *tfm;
21 struct blkcipher_desc desc;
20 struct esp_data *esp; 22 struct esp_data *esp;
21 struct sk_buff *trailer; 23 struct sk_buff *trailer;
22 int blksize; 24 int blksize;
@@ -36,7 +38,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
36 esp = x->data; 38 esp = x->data;
37 alen = esp->auth.icv_trunc_len; 39 alen = esp->auth.icv_trunc_len;
38 tfm = esp->conf.tfm; 40 tfm = esp->conf.tfm;
39 blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 41 desc.tfm = tfm;
42 desc.flags = 0;
43 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
40 clen = ALIGN(clen + 2, blksize); 44 clen = ALIGN(clen + 2, blksize);
41 if (esp->conf.padlen) 45 if (esp->conf.padlen)
42 clen = ALIGN(clen, esp->conf.padlen); 46 clen = ALIGN(clen, esp->conf.padlen);
@@ -92,7 +96,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
92 xfrm_aevent_doreplay(x); 96 xfrm_aevent_doreplay(x);
93 97
94 if (esp->conf.ivlen) 98 if (esp->conf.ivlen)
95 crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 99 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
96 100
97 do { 101 do {
98 struct scatterlist *sg = &esp->sgbuf[0]; 102 struct scatterlist *sg = &esp->sgbuf[0];
@@ -103,26 +107,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
103 goto error; 107 goto error;
104 } 108 }
105 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 109 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
106 crypto_cipher_encrypt(tfm, sg, sg, clen); 110 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
107 if (unlikely(sg != &esp->sgbuf[0])) 111 if (unlikely(sg != &esp->sgbuf[0]))
108 kfree(sg); 112 kfree(sg);
109 } while (0); 113 } while (0);
110 114
115 if (unlikely(err))
116 goto error;
117
111 if (esp->conf.ivlen) { 118 if (esp->conf.ivlen) {
112 memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 119 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
113 crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 120 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
114 } 121 }
115 122
116 if (esp->auth.icv_full_len) { 123 if (esp->auth.icv_full_len) {
117 esp->auth.icv(esp, skb, (u8*)esph-skb->data, 124 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
118 sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 125 sizeof(*esph) + esp->conf.ivlen + clen);
119 pskb_put(skb, trailer, alen); 126 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
120 } 127 }
121 128
122 ip_send_check(top_iph); 129 ip_send_check(top_iph);
123 130
124 err = 0;
125
126error: 131error:
127 return err; 132 return err;
128} 133}
@@ -137,8 +142,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
137 struct iphdr *iph; 142 struct iphdr *iph;
138 struct ip_esp_hdr *esph; 143 struct ip_esp_hdr *esph;
139 struct esp_data *esp = x->data; 144 struct esp_data *esp = x->data;
145 struct crypto_blkcipher *tfm = esp->conf.tfm;
146 struct blkcipher_desc desc = { .tfm = tfm };
140 struct sk_buff *trailer; 147 struct sk_buff *trailer;
141 int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 148 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
142 int alen = esp->auth.icv_trunc_len; 149 int alen = esp->auth.icv_trunc_len;
143 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 150 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
144 int nfrags; 151 int nfrags;
@@ -146,6 +153,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
146 u8 nexthdr[2]; 153 u8 nexthdr[2];
147 struct scatterlist *sg; 154 struct scatterlist *sg;
148 int padlen; 155 int padlen;
156 int err;
149 157
150 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) 158 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
151 goto out; 159 goto out;
@@ -155,15 +163,16 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
155 163
156 /* If integrity check is required, do this. */ 164 /* If integrity check is required, do this. */
157 if (esp->auth.icv_full_len) { 165 if (esp->auth.icv_full_len) {
158 u8 sum[esp->auth.icv_full_len]; 166 u8 sum[alen];
159 u8 sum1[alen];
160
161 esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
162 167
163 if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 168 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
169 if (err)
170 goto out;
171
172 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
164 BUG(); 173 BUG();
165 174
166 if (unlikely(memcmp(sum, sum1, alen))) { 175 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
167 x->stats.integrity_failed++; 176 x->stats.integrity_failed++;
168 goto out; 177 goto out;
169 } 178 }
@@ -178,7 +187,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
178 187
179 /* Get ivec. This can be wrong, check against another impls. */ 188 /* Get ivec. This can be wrong, check against another impls. */
180 if (esp->conf.ivlen) 189 if (esp->conf.ivlen)
181 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 190 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
182 191
183 sg = &esp->sgbuf[0]; 192 sg = &esp->sgbuf[0];
184 193
@@ -188,9 +197,11 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
188 goto out; 197 goto out;
189 } 198 }
190 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); 199 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
191 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 200 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
192 if (unlikely(sg != &esp->sgbuf[0])) 201 if (unlikely(sg != &esp->sgbuf[0]))
193 kfree(sg); 202 kfree(sg);
203 if (unlikely(err))
204 return err;
194 205
195 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 206 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
196 BUG(); 207 BUG();
@@ -254,7 +265,7 @@ out:
254static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 265static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
255{ 266{
256 struct esp_data *esp = x->data; 267 struct esp_data *esp = x->data;
257 u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 268 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
258 269
259 if (x->props.mode) { 270 if (x->props.mode) {
260 mtu = ALIGN(mtu + 2, blksize); 271 mtu = ALIGN(mtu + 2, blksize);
@@ -293,11 +304,11 @@ static void esp_destroy(struct xfrm_state *x)
293 if (!esp) 304 if (!esp)
294 return; 305 return;
295 306
296 crypto_free_tfm(esp->conf.tfm); 307 crypto_free_blkcipher(esp->conf.tfm);
297 esp->conf.tfm = NULL; 308 esp->conf.tfm = NULL;
298 kfree(esp->conf.ivec); 309 kfree(esp->conf.ivec);
299 esp->conf.ivec = NULL; 310 esp->conf.ivec = NULL;
300 crypto_free_tfm(esp->auth.tfm); 311 crypto_free_hash(esp->auth.tfm);
301 esp->auth.tfm = NULL; 312 esp->auth.tfm = NULL;
302 kfree(esp->auth.work_icv); 313 kfree(esp->auth.work_icv);
303 esp->auth.work_icv = NULL; 314 esp->auth.work_icv = NULL;
@@ -307,6 +318,7 @@ static void esp_destroy(struct xfrm_state *x)
307static int esp_init_state(struct xfrm_state *x) 318static int esp_init_state(struct xfrm_state *x)
308{ 319{
309 struct esp_data *esp = NULL; 320 struct esp_data *esp = NULL;
321 struct crypto_blkcipher *tfm;
310 322
311 /* null auth and encryption can have zero length keys */ 323 /* null auth and encryption can have zero length keys */
312 if (x->aalg) { 324 if (x->aalg) {
@@ -322,22 +334,27 @@ static int esp_init_state(struct xfrm_state *x)
322 334
323 if (x->aalg) { 335 if (x->aalg) {
324 struct xfrm_algo_desc *aalg_desc; 336 struct xfrm_algo_desc *aalg_desc;
337 struct crypto_hash *hash;
325 338
326 esp->auth.key = x->aalg->alg_key; 339 esp->auth.key = x->aalg->alg_key;
327 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 340 esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
328 esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 341 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
329 if (esp->auth.tfm == NULL) 342 CRYPTO_ALG_ASYNC);
343 if (IS_ERR(hash))
344 goto error;
345
346 esp->auth.tfm = hash;
347 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
330 goto error; 348 goto error;
331 esp->auth.icv = esp_hmac_digest;
332 349
333 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 350 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
334 BUG_ON(!aalg_desc); 351 BUG_ON(!aalg_desc);
335 352
336 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 353 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
337 crypto_tfm_alg_digestsize(esp->auth.tfm)) { 354 crypto_hash_digestsize(hash)) {
338 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", 355 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
339 x->aalg->alg_name, 356 x->aalg->alg_name,
340 crypto_tfm_alg_digestsize(esp->auth.tfm), 357 crypto_hash_digestsize(hash),
341 aalg_desc->uinfo.auth.icv_fullbits/8); 358 aalg_desc->uinfo.auth.icv_fullbits/8);
342 goto error; 359 goto error;
343 } 360 }
@@ -351,13 +368,11 @@ static int esp_init_state(struct xfrm_state *x)
351 } 368 }
352 esp->conf.key = x->ealg->alg_key; 369 esp->conf.key = x->ealg->alg_key;
353 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 370 esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
354 if (x->props.ealgo == SADB_EALG_NULL) 371 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
355 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 372 if (IS_ERR(tfm))
356 else
357 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
358 if (esp->conf.tfm == NULL)
359 goto error; 373 goto error;
360 esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 374 esp->conf.tfm = tfm;
375 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
361 esp->conf.padlen = 0; 376 esp->conf.padlen = 0;
362 if (esp->conf.ivlen) { 377 if (esp->conf.ivlen) {
363 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 378 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@@ -365,7 +380,7 @@ static int esp_init_state(struct xfrm_state *x)
365 goto error; 380 goto error;
366 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 381 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
367 } 382 }
368 if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 383 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
369 goto error; 384 goto error;
370 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; 385 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
371 if (x->props.mode) 386 if (x->props.mode)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index a0c28b2b756e..5bb9c9f03fb6 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -32,7 +32,7 @@
32 32
33struct ipcomp_tfms { 33struct ipcomp_tfms {
34 struct list_head list; 34 struct list_head list;
35 struct crypto_tfm **tfms; 35 struct crypto_comp **tfms;
36 int users; 36 int users;
37}; 37};
38 38
@@ -46,7 +46,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
46 int err, plen, dlen; 46 int err, plen, dlen;
47 struct ipcomp_data *ipcd = x->data; 47 struct ipcomp_data *ipcd = x->data;
48 u8 *start, *scratch; 48 u8 *start, *scratch;
49 struct crypto_tfm *tfm; 49 struct crypto_comp *tfm;
50 int cpu; 50 int cpu;
51 51
52 plen = skb->len; 52 plen = skb->len;
@@ -107,7 +107,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
107 struct iphdr *iph = skb->nh.iph; 107 struct iphdr *iph = skb->nh.iph;
108 struct ipcomp_data *ipcd = x->data; 108 struct ipcomp_data *ipcd = x->data;
109 u8 *start, *scratch; 109 u8 *start, *scratch;
110 struct crypto_tfm *tfm; 110 struct crypto_comp *tfm;
111 int cpu; 111 int cpu;
112 112
113 ihlen = iph->ihl * 4; 113 ihlen = iph->ihl * 4;
@@ -302,7 +302,7 @@ static void **ipcomp_alloc_scratches(void)
302 return scratches; 302 return scratches;
303} 303}
304 304
305static void ipcomp_free_tfms(struct crypto_tfm **tfms) 305static void ipcomp_free_tfms(struct crypto_comp **tfms)
306{ 306{
307 struct ipcomp_tfms *pos; 307 struct ipcomp_tfms *pos;
308 int cpu; 308 int cpu;
@@ -324,28 +324,28 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
324 return; 324 return;
325 325
326 for_each_possible_cpu(cpu) { 326 for_each_possible_cpu(cpu) {
327 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 327 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
328 crypto_free_tfm(tfm); 328 crypto_free_comp(tfm);
329 } 329 }
330 free_percpu(tfms); 330 free_percpu(tfms);
331} 331}
332 332
333static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) 333static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
334{ 334{
335 struct ipcomp_tfms *pos; 335 struct ipcomp_tfms *pos;
336 struct crypto_tfm **tfms; 336 struct crypto_comp **tfms;
337 int cpu; 337 int cpu;
338 338
339 /* This can be any valid CPU ID so we don't need locking. */ 339 /* This can be any valid CPU ID so we don't need locking. */
340 cpu = raw_smp_processor_id(); 340 cpu = raw_smp_processor_id();
341 341
342 list_for_each_entry(pos, &ipcomp_tfms_list, list) { 342 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
343 struct crypto_tfm *tfm; 343 struct crypto_comp *tfm;
344 344
345 tfms = pos->tfms; 345 tfms = pos->tfms;
346 tfm = *per_cpu_ptr(tfms, cpu); 346 tfm = *per_cpu_ptr(tfms, cpu);
347 347
348 if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 348 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
349 pos->users++; 349 pos->users++;
350 return tfms; 350 return tfms;
351 } 351 }
@@ -359,12 +359,13 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
359 INIT_LIST_HEAD(&pos->list); 359 INIT_LIST_HEAD(&pos->list);
360 list_add(&pos->list, &ipcomp_tfms_list); 360 list_add(&pos->list, &ipcomp_tfms_list);
361 361
362 pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 362 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
363 if (!tfms) 363 if (!tfms)
364 goto error; 364 goto error;
365 365
366 for_each_possible_cpu(cpu) { 366 for_each_possible_cpu(cpu) {
367 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 367 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
368 CRYPTO_ALG_ASYNC);
368 if (!tfm) 369 if (!tfm)
369 goto error; 370 goto error;
370 *per_cpu_ptr(tfms, cpu) = tfm; 371 *per_cpu_ptr(tfms, cpu) = tfm;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index e923d4dea418..0ba06c0c5d39 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -77,6 +77,7 @@ config INET6_ESP
77 select CRYPTO 77 select CRYPTO
78 select CRYPTO_HMAC 78 select CRYPTO_HMAC
79 select CRYPTO_MD5 79 select CRYPTO_MD5
80 select CRYPTO_CBC
80 select CRYPTO_SHA1 81 select CRYPTO_SHA1
81 select CRYPTO_DES 82 select CRYPTO_DES
82 ---help--- 83 ---help---
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 9d4831bd4335..00ffa7bc6c9f 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -213,7 +213,10 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
213 ah->spi = x->id.spi; 213 ah->spi = x->id.spi;
214 ah->seq_no = htonl(++x->replay.oseq); 214 ah->seq_no = htonl(++x->replay.oseq);
215 xfrm_aevent_doreplay(x); 215 xfrm_aevent_doreplay(x);
216 ahp->icv(ahp, skb, ah->auth_data); 216 err = ah_mac_digest(ahp, skb, ah->auth_data);
217 if (err)
218 goto error_free_iph;
219 memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
217 220
218 err = 0; 221 err = 0;
219 222
@@ -251,6 +254,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
251 u16 hdr_len; 254 u16 hdr_len;
252 u16 ah_hlen; 255 u16 ah_hlen;
253 int nexthdr; 256 int nexthdr;
257 int err = -EINVAL;
254 258
255 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) 259 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
256 goto out; 260 goto out;
@@ -292,8 +296,11 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
292 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 296 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
293 memset(ah->auth_data, 0, ahp->icv_trunc_len); 297 memset(ah->auth_data, 0, ahp->icv_trunc_len);
294 skb_push(skb, hdr_len); 298 skb_push(skb, hdr_len);
295 ahp->icv(ahp, skb, ah->auth_data); 299 err = ah_mac_digest(ahp, skb, ah->auth_data);
296 if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 300 if (err)
301 goto free_out;
302 err = -EINVAL;
303 if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
297 LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n"); 304 LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n");
298 x->stats.integrity_failed++; 305 x->stats.integrity_failed++;
299 goto free_out; 306 goto free_out;
@@ -310,7 +317,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
310free_out: 317free_out:
311 kfree(tmp_hdr); 318 kfree(tmp_hdr);
312out: 319out:
313 return -EINVAL; 320 return err;
314} 321}
315 322
316static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 323static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -338,6 +345,7 @@ static int ah6_init_state(struct xfrm_state *x)
338{ 345{
339 struct ah_data *ahp = NULL; 346 struct ah_data *ahp = NULL;
340 struct xfrm_algo_desc *aalg_desc; 347 struct xfrm_algo_desc *aalg_desc;
348 struct crypto_hash *tfm;
341 349
342 if (!x->aalg) 350 if (!x->aalg)
343 goto error; 351 goto error;
@@ -355,24 +363,27 @@ static int ah6_init_state(struct xfrm_state *x)
355 363
356 ahp->key = x->aalg->alg_key; 364 ahp->key = x->aalg->alg_key;
357 ahp->key_len = (x->aalg->alg_key_len+7)/8; 365 ahp->key_len = (x->aalg->alg_key_len+7)/8;
358 ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 366 tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
359 if (!ahp->tfm) 367 if (IS_ERR(tfm))
368 goto error;
369
370 ahp->tfm = tfm;
371 if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
360 goto error; 372 goto error;
361 ahp->icv = ah_hmac_digest;
362 373
363 /* 374 /*
364 * Lookup the algorithm description maintained by xfrm_algo, 375 * Lookup the algorithm description maintained by xfrm_algo,
365 * verify crypto transform properties, and store information 376 * verify crypto transform properties, and store information
366 * we need for AH processing. This lookup cannot fail here 377 * we need for AH processing. This lookup cannot fail here
367 * after a successful crypto_alloc_tfm(). 378 * after a successful crypto_alloc_hash().
368 */ 379 */
369 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 380 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
370 BUG_ON(!aalg_desc); 381 BUG_ON(!aalg_desc);
371 382
372 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 383 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
373 crypto_tfm_alg_digestsize(ahp->tfm)) { 384 crypto_hash_digestsize(tfm)) {
374 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 385 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
375 x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), 386 x->aalg->alg_name, crypto_hash_digestsize(tfm),
376 aalg_desc->uinfo.auth.icv_fullbits/8); 387 aalg_desc->uinfo.auth.icv_fullbits/8);
377 goto error; 388 goto error;
378 } 389 }
@@ -396,7 +407,7 @@ static int ah6_init_state(struct xfrm_state *x)
396error: 407error:
397 if (ahp) { 408 if (ahp) {
398 kfree(ahp->work_icv); 409 kfree(ahp->work_icv);
399 crypto_free_tfm(ahp->tfm); 410 crypto_free_hash(ahp->tfm);
400 kfree(ahp); 411 kfree(ahp);
401 } 412 }
402 return -EINVAL; 413 return -EINVAL;
@@ -411,7 +422,7 @@ static void ah6_destroy(struct xfrm_state *x)
411 422
412 kfree(ahp->work_icv); 423 kfree(ahp->work_icv);
413 ahp->work_icv = NULL; 424 ahp->work_icv = NULL;
414 crypto_free_tfm(ahp->tfm); 425 crypto_free_hash(ahp->tfm);
415 ahp->tfm = NULL; 426 ahp->tfm = NULL;
416 kfree(ahp); 427 kfree(ahp);
417} 428}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a278d5e862fe..2ebfd281e721 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,6 +24,7 @@
24 * This file is derived from net/ipv4/esp.c 24 * This file is derived from net/ipv4/esp.c
25 */ 25 */
26 26
27#include <linux/err.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <net/ip.h> 29#include <net/ip.h>
29#include <net/xfrm.h> 30#include <net/xfrm.h>
@@ -44,7 +45,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
44 int hdr_len; 45 int hdr_len;
45 struct ipv6hdr *top_iph; 46 struct ipv6hdr *top_iph;
46 struct ipv6_esp_hdr *esph; 47 struct ipv6_esp_hdr *esph;
47 struct crypto_tfm *tfm; 48 struct crypto_blkcipher *tfm;
49 struct blkcipher_desc desc;
48 struct esp_data *esp; 50 struct esp_data *esp;
49 struct sk_buff *trailer; 51 struct sk_buff *trailer;
50 int blksize; 52 int blksize;
@@ -67,7 +69,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
67 69
68 alen = esp->auth.icv_trunc_len; 70 alen = esp->auth.icv_trunc_len;
69 tfm = esp->conf.tfm; 71 tfm = esp->conf.tfm;
70 blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); 72 desc.tfm = tfm;
73 desc.flags = 0;
74 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
71 clen = ALIGN(clen + 2, blksize); 75 clen = ALIGN(clen + 2, blksize);
72 if (esp->conf.padlen) 76 if (esp->conf.padlen)
73 clen = ALIGN(clen, esp->conf.padlen); 77 clen = ALIGN(clen, esp->conf.padlen);
@@ -96,7 +100,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
96 xfrm_aevent_doreplay(x); 100 xfrm_aevent_doreplay(x);
97 101
98 if (esp->conf.ivlen) 102 if (esp->conf.ivlen)
99 crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 103 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
100 104
101 do { 105 do {
102 struct scatterlist *sg = &esp->sgbuf[0]; 106 struct scatterlist *sg = &esp->sgbuf[0];
@@ -107,24 +111,25 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
107 goto error; 111 goto error;
108 } 112 }
109 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
110 crypto_cipher_encrypt(tfm, sg, sg, clen); 114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
111 if (unlikely(sg != &esp->sgbuf[0])) 115 if (unlikely(sg != &esp->sgbuf[0]))
112 kfree(sg); 116 kfree(sg);
113 } while (0); 117 } while (0);
114 118
119 if (unlikely(err))
120 goto error;
121
115 if (esp->conf.ivlen) { 122 if (esp->conf.ivlen) {
116 memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 123 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
117 crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); 124 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
118 } 125 }
119 126
120 if (esp->auth.icv_full_len) { 127 if (esp->auth.icv_full_len) {
121 esp->auth.icv(esp, skb, (u8*)esph-skb->data, 128 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
122 sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); 129 sizeof(*esph) + esp->conf.ivlen + clen);
123 pskb_put(skb, trailer, alen); 130 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
124 } 131 }
125 132
126 err = 0;
127
128error: 133error:
129 return err; 134 return err;
130} 135}
@@ -134,8 +139,10 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
134 struct ipv6hdr *iph; 139 struct ipv6hdr *iph;
135 struct ipv6_esp_hdr *esph; 140 struct ipv6_esp_hdr *esph;
136 struct esp_data *esp = x->data; 141 struct esp_data *esp = x->data;
142 struct crypto_blkcipher *tfm = esp->conf.tfm;
143 struct blkcipher_desc desc = { .tfm = tfm };
137 struct sk_buff *trailer; 144 struct sk_buff *trailer;
138 int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 145 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
139 int alen = esp->auth.icv_trunc_len; 146 int alen = esp->auth.icv_trunc_len;
140 int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; 147 int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;
141 148
@@ -155,15 +162,16 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
155 162
156 /* If integrity check is required, do this. */ 163 /* If integrity check is required, do this. */
157 if (esp->auth.icv_full_len) { 164 if (esp->auth.icv_full_len) {
158 u8 sum[esp->auth.icv_full_len]; 165 u8 sum[alen];
159 u8 sum1[alen];
160 166
161 esp->auth.icv(esp, skb, 0, skb->len-alen, sum); 167 ret = esp_mac_digest(esp, skb, 0, skb->len - alen);
168 if (ret)
169 goto out;
162 170
163 if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) 171 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
164 BUG(); 172 BUG();
165 173
166 if (unlikely(memcmp(sum, sum1, alen))) { 174 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
167 x->stats.integrity_failed++; 175 x->stats.integrity_failed++;
168 ret = -EINVAL; 176 ret = -EINVAL;
169 goto out; 177 goto out;
@@ -182,7 +190,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
182 190
183 /* Get ivec. This can be wrong, check against another impls. */ 191 /* Get ivec. This can be wrong, check against another impls. */
184 if (esp->conf.ivlen) 192 if (esp->conf.ivlen)
185 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 193 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
186 194
187 { 195 {
188 u8 nexthdr[2]; 196 u8 nexthdr[2];
@@ -197,9 +205,11 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
197 } 205 }
198 } 206 }
199 skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); 207 skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
200 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); 208 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
201 if (unlikely(sg != &esp->sgbuf[0])) 209 if (unlikely(sg != &esp->sgbuf[0]))
202 kfree(sg); 210 kfree(sg);
211 if (unlikely(ret))
212 goto out;
203 213
204 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) 214 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
205 BUG(); 215 BUG();
@@ -225,7 +235,7 @@ out:
225static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) 235static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
226{ 236{
227 struct esp_data *esp = x->data; 237 struct esp_data *esp = x->data;
228 u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); 238 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
229 239
230 if (x->props.mode) { 240 if (x->props.mode) {
231 mtu = ALIGN(mtu + 2, blksize); 241 mtu = ALIGN(mtu + 2, blksize);
@@ -266,11 +276,11 @@ static void esp6_destroy(struct xfrm_state *x)
266 if (!esp) 276 if (!esp)
267 return; 277 return;
268 278
269 crypto_free_tfm(esp->conf.tfm); 279 crypto_free_blkcipher(esp->conf.tfm);
270 esp->conf.tfm = NULL; 280 esp->conf.tfm = NULL;
271 kfree(esp->conf.ivec); 281 kfree(esp->conf.ivec);
272 esp->conf.ivec = NULL; 282 esp->conf.ivec = NULL;
273 crypto_free_tfm(esp->auth.tfm); 283 crypto_free_hash(esp->auth.tfm);
274 esp->auth.tfm = NULL; 284 esp->auth.tfm = NULL;
275 kfree(esp->auth.work_icv); 285 kfree(esp->auth.work_icv);
276 esp->auth.work_icv = NULL; 286 esp->auth.work_icv = NULL;
@@ -280,6 +290,7 @@ static void esp6_destroy(struct xfrm_state *x)
280static int esp6_init_state(struct xfrm_state *x) 290static int esp6_init_state(struct xfrm_state *x)
281{ 291{
282 struct esp_data *esp = NULL; 292 struct esp_data *esp = NULL;
293 struct crypto_blkcipher *tfm;
283 294
284 /* null auth and encryption can have zero length keys */ 295 /* null auth and encryption can have zero length keys */
285 if (x->aalg) { 296 if (x->aalg) {
@@ -298,24 +309,29 @@ static int esp6_init_state(struct xfrm_state *x)
298 309
299 if (x->aalg) { 310 if (x->aalg) {
300 struct xfrm_algo_desc *aalg_desc; 311 struct xfrm_algo_desc *aalg_desc;
312 struct crypto_hash *hash;
301 313
302 esp->auth.key = x->aalg->alg_key; 314 esp->auth.key = x->aalg->alg_key;
303 esp->auth.key_len = (x->aalg->alg_key_len+7)/8; 315 esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
304 esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); 316 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
305 if (esp->auth.tfm == NULL) 317 CRYPTO_ALG_ASYNC);
318 if (IS_ERR(hash))
319 goto error;
320
321 esp->auth.tfm = hash;
322 if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
306 goto error; 323 goto error;
307 esp->auth.icv = esp_hmac_digest;
308 324
309 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 325 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
310 BUG_ON(!aalg_desc); 326 BUG_ON(!aalg_desc);
311 327
312 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 328 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
313 crypto_tfm_alg_digestsize(esp->auth.tfm)) { 329 crypto_hash_digestsize(hash)) {
314 printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", 330 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
315 x->aalg->alg_name, 331 x->aalg->alg_name,
316 crypto_tfm_alg_digestsize(esp->auth.tfm), 332 crypto_hash_digestsize(hash),
317 aalg_desc->uinfo.auth.icv_fullbits/8); 333 aalg_desc->uinfo.auth.icv_fullbits/8);
318 goto error; 334 goto error;
319 } 335 }
320 336
321 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 337 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
@@ -327,13 +343,11 @@ static int esp6_init_state(struct xfrm_state *x)
327 } 343 }
328 esp->conf.key = x->ealg->alg_key; 344 esp->conf.key = x->ealg->alg_key;
329 esp->conf.key_len = (x->ealg->alg_key_len+7)/8; 345 esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
330 if (x->props.ealgo == SADB_EALG_NULL) 346 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
331 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); 347 if (IS_ERR(tfm))
332 else
333 esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
334 if (esp->conf.tfm == NULL)
335 goto error; 348 goto error;
336 esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); 349 esp->conf.tfm = tfm;
350 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
337 esp->conf.padlen = 0; 351 esp->conf.padlen = 0;
338 if (esp->conf.ivlen) { 352 if (esp->conf.ivlen) {
339 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); 353 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@@ -341,7 +355,7 @@ static int esp6_init_state(struct xfrm_state *x)
341 goto error; 355 goto error;
342 get_random_bytes(esp->conf.ivec, esp->conf.ivlen); 356 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
343 } 357 }
344 if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) 358 if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
345 goto error; 359 goto error;
346 x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; 360 x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
347 if (x->props.mode) 361 if (x->props.mode)
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 7e4d1c17bfbc..a81e9e9d93bd 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -53,7 +53,7 @@
53 53
54struct ipcomp6_tfms { 54struct ipcomp6_tfms {
55 struct list_head list; 55 struct list_head list;
56 struct crypto_tfm **tfms; 56 struct crypto_comp **tfms;
57 int users; 57 int users;
58}; 58};
59 59
@@ -70,7 +70,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
70 int plen, dlen; 70 int plen, dlen;
71 struct ipcomp_data *ipcd = x->data; 71 struct ipcomp_data *ipcd = x->data;
72 u8 *start, *scratch; 72 u8 *start, *scratch;
73 struct crypto_tfm *tfm; 73 struct crypto_comp *tfm;
74 int cpu; 74 int cpu;
75 75
76 if (skb_linearize_cow(skb)) 76 if (skb_linearize_cow(skb))
@@ -129,7 +129,7 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
129 struct ipcomp_data *ipcd = x->data; 129 struct ipcomp_data *ipcd = x->data;
130 int plen, dlen; 130 int plen, dlen;
131 u8 *start, *scratch; 131 u8 *start, *scratch;
132 struct crypto_tfm *tfm; 132 struct crypto_comp *tfm;
133 int cpu; 133 int cpu;
134 134
135 hdr_len = skb->h.raw - skb->data; 135 hdr_len = skb->h.raw - skb->data;
@@ -301,7 +301,7 @@ static void **ipcomp6_alloc_scratches(void)
301 return scratches; 301 return scratches;
302} 302}
303 303
304static void ipcomp6_free_tfms(struct crypto_tfm **tfms) 304static void ipcomp6_free_tfms(struct crypto_comp **tfms)
305{ 305{
306 struct ipcomp6_tfms *pos; 306 struct ipcomp6_tfms *pos;
307 int cpu; 307 int cpu;
@@ -323,28 +323,28 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
323 return; 323 return;
324 324
325 for_each_possible_cpu(cpu) { 325 for_each_possible_cpu(cpu) {
326 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 326 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
327 crypto_free_tfm(tfm); 327 crypto_free_comp(tfm);
328 } 328 }
329 free_percpu(tfms); 329 free_percpu(tfms);
330} 330}
331 331
332static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) 332static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
333{ 333{
334 struct ipcomp6_tfms *pos; 334 struct ipcomp6_tfms *pos;
335 struct crypto_tfm **tfms; 335 struct crypto_comp **tfms;
336 int cpu; 336 int cpu;
337 337
338 /* This can be any valid CPU ID so we don't need locking. */ 338 /* This can be any valid CPU ID so we don't need locking. */
339 cpu = raw_smp_processor_id(); 339 cpu = raw_smp_processor_id();
340 340
341 list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 341 list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
342 struct crypto_tfm *tfm; 342 struct crypto_comp *tfm;
343 343
344 tfms = pos->tfms; 344 tfms = pos->tfms;
345 tfm = *per_cpu_ptr(tfms, cpu); 345 tfm = *per_cpu_ptr(tfms, cpu);
346 346
347 if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { 347 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
348 pos->users++; 348 pos->users++;
349 return tfms; 349 return tfms;
350 } 350 }
@@ -358,12 +358,13 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
358 INIT_LIST_HEAD(&pos->list); 358 INIT_LIST_HEAD(&pos->list);
359 list_add(&pos->list, &ipcomp6_tfms_list); 359 list_add(&pos->list, &ipcomp6_tfms_list);
360 360
361 pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); 361 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
362 if (!tfms) 362 if (!tfms)
363 goto error; 363 goto error;
364 364
365 for_each_possible_cpu(cpu) { 365 for_each_possible_cpu(cpu) {
366 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 366 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
367 CRYPTO_ALG_ASYNC);
367 if (!tfm) 368 if (!tfm)
368 goto error; 369 goto error;
369 *per_cpu_ptr(tfms, cpu) = tfm; 370 *per_cpu_ptr(tfms, cpu) = tfm;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index ffda1d680529..35c49ff2d062 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -173,7 +173,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
174 174
175 /* Free up the HMAC transform. */ 175 /* Free up the HMAC transform. */
176 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); 176 crypto_free_hash(sctp_sk(ep->base.sk)->hmac);
177 177
178 /* Cleanup. */ 178 /* Cleanup. */
179 sctp_inq_free(&ep->base.inqueue); 179 sctp_inq_free(&ep->base.inqueue);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 17b509282cf2..7745bdea7817 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1282,10 +1282,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1282 1282
1283 retval = kmalloc(*cookie_len, GFP_ATOMIC); 1283 retval = kmalloc(*cookie_len, GFP_ATOMIC);
1284 1284
1285 if (!retval) { 1285 if (!retval)
1286 *cookie_len = 0;
1287 goto nodata; 1286 goto nodata;
1288 }
1289 1287
1290 /* Clear this memory since we are sending this data structure 1288 /* Clear this memory since we are sending this data structure
1291 * out on the network. 1289 * out on the network.
@@ -1321,19 +1319,29 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1321 ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); 1319 ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
1322 1320
1323 if (sctp_sk(ep->base.sk)->hmac) { 1321 if (sctp_sk(ep->base.sk)->hmac) {
1322 struct hash_desc desc;
1323
1324 /* Sign the message. */ 1324 /* Sign the message. */
1325 sg.page = virt_to_page(&cookie->c); 1325 sg.page = virt_to_page(&cookie->c);
1326 sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; 1326 sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE;
1327 sg.length = bodysize; 1327 sg.length = bodysize;
1328 keylen = SCTP_SECRET_SIZE; 1328 keylen = SCTP_SECRET_SIZE;
1329 key = (char *)ep->secret_key[ep->current_key]; 1329 key = (char *)ep->secret_key[ep->current_key];
1330 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1331 desc.flags = 0;
1330 1332
1331 sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, 1333 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1332 &sg, 1, cookie->signature); 1334 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
1335 goto free_cookie;
1333 } 1336 }
1334 1337
1335nodata:
1336 return retval; 1338 return retval;
1339
1340free_cookie:
1341 kfree(retval);
1342nodata:
1343 *cookie_len = 0;
1344 return NULL;
1337} 1345}
1338 1346
1339/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ 1347/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
@@ -1354,6 +1362,7 @@ struct sctp_association *sctp_unpack_cookie(
1354 sctp_scope_t scope; 1362 sctp_scope_t scope;
1355 struct sk_buff *skb = chunk->skb; 1363 struct sk_buff *skb = chunk->skb;
1356 struct timeval tv; 1364 struct timeval tv;
1365 struct hash_desc desc;
1357 1366
1358 /* Header size is static data prior to the actual cookie, including 1367 /* Header size is static data prior to the actual cookie, including
1359 * any padding. 1368 * any padding.
@@ -1389,17 +1398,25 @@ struct sctp_association *sctp_unpack_cookie(
1389 sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; 1398 sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE;
1390 sg.length = bodysize; 1399 sg.length = bodysize;
1391 key = (char *)ep->secret_key[ep->current_key]; 1400 key = (char *)ep->secret_key[ep->current_key];
1401 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1402 desc.flags = 0;
1392 1403
1393 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1404 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1394 sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, &sg, 1405 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1395 1, digest); 1406 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1407 *error = -SCTP_IERROR_NOMEM;
1408 goto fail;
1409 }
1396 1410
1397 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1411 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1398 /* Try the previous key. */ 1412 /* Try the previous key. */
1399 key = (char *)ep->secret_key[ep->last_key]; 1413 key = (char *)ep->secret_key[ep->last_key];
1400 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1414 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1401 sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, 1415 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1402 &sg, 1, digest); 1416 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1417 *error = -SCTP_IERROR_NOMEM;
1418 goto fail;
1419 }
1403 1420
1404 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1421 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1405 /* Yikes! Still bad signature! */ 1422 /* Yikes! Still bad signature! */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index dab15949958e..85caf7963886 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4898,7 +4898,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
4898int sctp_inet_listen(struct socket *sock, int backlog) 4898int sctp_inet_listen(struct socket *sock, int backlog)
4899{ 4899{
4900 struct sock *sk = sock->sk; 4900 struct sock *sk = sock->sk;
4901 struct crypto_tfm *tfm=NULL; 4901 struct crypto_hash *tfm = NULL;
4902 int err = -EINVAL; 4902 int err = -EINVAL;
4903 4903
4904 if (unlikely(backlog < 0)) 4904 if (unlikely(backlog < 0))
@@ -4911,7 +4911,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
4911 4911
4912 /* Allocate HMAC for generating cookie. */ 4912 /* Allocate HMAC for generating cookie. */
4913 if (sctp_hmac_alg) { 4913 if (sctp_hmac_alg) {
4914 tfm = sctp_crypto_alloc_tfm(sctp_hmac_alg, 0); 4914 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
4915 if (!tfm) { 4915 if (!tfm) {
4916 err = -ENOSYS; 4916 err = -ENOSYS;
4917 goto out; 4917 goto out;
@@ -4937,7 +4937,7 @@ out:
4937 sctp_release_sock(sk); 4937 sctp_release_sock(sk);
4938 return err; 4938 return err;
4939cleanup: 4939cleanup:
4940 sctp_crypto_free_tfm(tfm); 4940 crypto_free_hash(tfm);
4941 goto out; 4941 goto out;
4942} 4942}
4943 4943
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 76b969e6904f..e11a40b25cce 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -34,6 +34,7 @@
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/types.h> 38#include <linux/types.h>
38#include <linux/mm.h> 39#include <linux/mm.h>
39#include <linux/slab.h> 40#include <linux/slab.h>
@@ -49,7 +50,7 @@
49 50
50u32 51u32
51krb5_encrypt( 52krb5_encrypt(
52 struct crypto_tfm *tfm, 53 struct crypto_blkcipher *tfm,
53 void * iv, 54 void * iv,
54 void * in, 55 void * in,
55 void * out, 56 void * out,
@@ -58,26 +59,27 @@ krb5_encrypt(
58 u32 ret = -EINVAL; 59 u32 ret = -EINVAL;
59 struct scatterlist sg[1]; 60 struct scatterlist sg[1];
60 u8 local_iv[16] = {0}; 61 u8 local_iv[16] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
61 63
62 dprintk("RPC: krb5_encrypt: input data:\n"); 64 dprintk("RPC: krb5_encrypt: input data:\n");
63 print_hexl((u32 *)in, length, 0); 65 print_hexl((u32 *)in, length, 0);
64 66
65 if (length % crypto_tfm_alg_blocksize(tfm) != 0) 67 if (length % crypto_blkcipher_blocksize(tfm) != 0)
66 goto out; 68 goto out;
67 69
68 if (crypto_tfm_alg_ivsize(tfm) > 16) { 70 if (crypto_blkcipher_ivsize(tfm) > 16) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 71 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
70 crypto_tfm_alg_ivsize(tfm)); 72 crypto_blkcipher_ivsize(tfm));
71 goto out; 73 goto out;
72 } 74 }
73 75
74 if (iv) 76 if (iv)
75 memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); 77 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
76 78
77 memcpy(out, in, length); 79 memcpy(out, in, length);
78 sg_set_buf(sg, out, length); 80 sg_set_buf(sg, out, length);
79 81
80 ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv); 82 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
81 83
82 dprintk("RPC: krb5_encrypt: output data:\n"); 84 dprintk("RPC: krb5_encrypt: output data:\n");
83 print_hexl((u32 *)out, length, 0); 85 print_hexl((u32 *)out, length, 0);
@@ -90,7 +92,7 @@ EXPORT_SYMBOL(krb5_encrypt);
90 92
91u32 93u32
92krb5_decrypt( 94krb5_decrypt(
93 struct crypto_tfm *tfm, 95 struct crypto_blkcipher *tfm,
94 void * iv, 96 void * iv,
95 void * in, 97 void * in,
96 void * out, 98 void * out,
@@ -99,25 +101,26 @@ krb5_decrypt(
99 u32 ret = -EINVAL; 101 u32 ret = -EINVAL;
100 struct scatterlist sg[1]; 102 struct scatterlist sg[1];
101 u8 local_iv[16] = {0}; 103 u8 local_iv[16] = {0};
104 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
102 105
103 dprintk("RPC: krb5_decrypt: input data:\n"); 106 dprintk("RPC: krb5_decrypt: input data:\n");
104 print_hexl((u32 *)in, length, 0); 107 print_hexl((u32 *)in, length, 0);
105 108
106 if (length % crypto_tfm_alg_blocksize(tfm) != 0) 109 if (length % crypto_blkcipher_blocksize(tfm) != 0)
107 goto out; 110 goto out;
108 111
109 if (crypto_tfm_alg_ivsize(tfm) > 16) { 112 if (crypto_blkcipher_ivsize(tfm) > 16) {
110 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", 113 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n",
111 crypto_tfm_alg_ivsize(tfm)); 114 crypto_blkcipher_ivsize(tfm));
112 goto out; 115 goto out;
113 } 116 }
114 if (iv) 117 if (iv)
115 memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm)); 118 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
116 119
117 memcpy(out, in, length); 120 memcpy(out, in, length);
118 sg_set_buf(sg, out, length); 121 sg_set_buf(sg, out, length);
119 122
120 ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv); 123 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
121 124
122 dprintk("RPC: krb5_decrypt: output_data:\n"); 125 dprintk("RPC: krb5_decrypt: output_data:\n");
123 print_hexl((u32 *)out, length, 0); 126 print_hexl((u32 *)out, length, 0);
@@ -197,11 +200,9 @@ out:
197static int 200static int
198checksummer(struct scatterlist *sg, void *data) 201checksummer(struct scatterlist *sg, void *data)
199{ 202{
200 struct crypto_tfm *tfm = (struct crypto_tfm *)data; 203 struct hash_desc *desc = data;
201 204
202 crypto_digest_update(tfm, sg, 1); 205 return crypto_hash_update(desc, sg, sg->length);
203
204 return 0;
205} 206}
206 207
207/* checksum the plaintext data and hdrlen bytes of the token header */ 208/* checksum the plaintext data and hdrlen bytes of the token header */
@@ -210,8 +211,9 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
210 int body_offset, struct xdr_netobj *cksum) 211 int body_offset, struct xdr_netobj *cksum)
211{ 212{
212 char *cksumname; 213 char *cksumname;
213 struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ 214 struct hash_desc desc; /* XXX add to ctx? */
214 struct scatterlist sg[1]; 215 struct scatterlist sg[1];
216 int err;
215 217
216 switch (cksumtype) { 218 switch (cksumtype) {
217 case CKSUMTYPE_RSA_MD5: 219 case CKSUMTYPE_RSA_MD5:
@@ -222,25 +224,35 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
222 " unsupported checksum %d", cksumtype); 224 " unsupported checksum %d", cksumtype);
223 return GSS_S_FAILURE; 225 return GSS_S_FAILURE;
224 } 226 }
225 if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) 227 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
228 if (IS_ERR(desc.tfm))
226 return GSS_S_FAILURE; 229 return GSS_S_FAILURE;
227 cksum->len = crypto_tfm_alg_digestsize(tfm); 230 cksum->len = crypto_hash_digestsize(desc.tfm);
231 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
228 232
229 crypto_digest_init(tfm); 233 err = crypto_hash_init(&desc);
234 if (err)
235 goto out;
230 sg_set_buf(sg, header, hdrlen); 236 sg_set_buf(sg, header, hdrlen);
231 crypto_digest_update(tfm, sg, 1); 237 err = crypto_hash_update(&desc, sg, hdrlen);
232 process_xdr_buf(body, body_offset, body->len - body_offset, 238 if (err)
233 checksummer, tfm); 239 goto out;
234 crypto_digest_final(tfm, cksum->data); 240 err = process_xdr_buf(body, body_offset, body->len - body_offset,
235 crypto_free_tfm(tfm); 241 checksummer, &desc);
236 return 0; 242 if (err)
243 goto out;
244 err = crypto_hash_final(&desc, cksum->data);
245
246out:
247 crypto_free_hash(desc.tfm);
248 return err ? GSS_S_FAILURE : 0;
237} 249}
238 250
239EXPORT_SYMBOL(make_checksum); 251EXPORT_SYMBOL(make_checksum);
240 252
241struct encryptor_desc { 253struct encryptor_desc {
242 u8 iv[8]; /* XXX hard-coded blocksize */ 254 u8 iv[8]; /* XXX hard-coded blocksize */
243 struct crypto_tfm *tfm; 255 struct blkcipher_desc desc;
244 int pos; 256 int pos;
245 struct xdr_buf *outbuf; 257 struct xdr_buf *outbuf;
246 struct page **pages; 258 struct page **pages;
@@ -285,8 +297,8 @@ encryptor(struct scatterlist *sg, void *data)
285 if (thislen == 0) 297 if (thislen == 0)
286 return 0; 298 return 0;
287 299
288 ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, 300 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
289 thislen, desc->iv); 301 desc->infrags, thislen);
290 if (ret) 302 if (ret)
291 return ret; 303 return ret;
292 if (fraglen) { 304 if (fraglen) {
@@ -305,16 +317,18 @@ encryptor(struct scatterlist *sg, void *data)
305} 317}
306 318
307int 319int
308gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, 320gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
309 struct page **pages) 321 int offset, struct page **pages)
310{ 322{
311 int ret; 323 int ret;
312 struct encryptor_desc desc; 324 struct encryptor_desc desc;
313 325
314 BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 326 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
315 327
316 memset(desc.iv, 0, sizeof(desc.iv)); 328 memset(desc.iv, 0, sizeof(desc.iv));
317 desc.tfm = tfm; 329 desc.desc.tfm = tfm;
330 desc.desc.info = desc.iv;
331 desc.desc.flags = 0;
318 desc.pos = offset; 332 desc.pos = offset;
319 desc.outbuf = buf; 333 desc.outbuf = buf;
320 desc.pages = pages; 334 desc.pages = pages;
@@ -329,7 +343,7 @@ EXPORT_SYMBOL(gss_encrypt_xdr_buf);
329 343
330struct decryptor_desc { 344struct decryptor_desc {
331 u8 iv[8]; /* XXX hard-coded blocksize */ 345 u8 iv[8]; /* XXX hard-coded blocksize */
332 struct crypto_tfm *tfm; 346 struct blkcipher_desc desc;
333 struct scatterlist frags[4]; 347 struct scatterlist frags[4];
334 int fragno; 348 int fragno;
335 int fraglen; 349 int fraglen;
@@ -355,8 +369,8 @@ decryptor(struct scatterlist *sg, void *data)
355 if (thislen == 0) 369 if (thislen == 0)
356 return 0; 370 return 0;
357 371
358 ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, 372 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
359 thislen, desc->iv); 373 desc->frags, thislen);
360 if (ret) 374 if (ret)
361 return ret; 375 return ret;
362 if (fraglen) { 376 if (fraglen) {
@@ -373,15 +387,18 @@ decryptor(struct scatterlist *sg, void *data)
373} 387}
374 388
375int 389int
376gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) 390gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
391 int offset)
377{ 392{
378 struct decryptor_desc desc; 393 struct decryptor_desc desc;
379 394
380 /* XXXJBF: */ 395 /* XXXJBF: */
381 BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); 396 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
382 397
383 memset(desc.iv, 0, sizeof(desc.iv)); 398 memset(desc.iv, 0, sizeof(desc.iv));
384 desc.tfm = tfm; 399 desc.desc.tfm = tfm;
400 desc.desc.info = desc.iv;
401 desc.desc.flags = 0;
385 desc.fragno = 0; 402 desc.fragno = 0;
386 desc.fraglen = 0; 403 desc.fraglen = 0;
387 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); 404 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 70e1e53a632b..325e72e4fd31 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -34,6 +34,7 @@
34 * 34 *
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/init.h> 39#include <linux/init.h>
39#include <linux/types.h> 40#include <linux/types.h>
@@ -78,10 +79,10 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
78} 79}
79 80
80static inline const void * 81static inline const void *
81get_key(const void *p, const void *end, struct crypto_tfm **res) 82get_key(const void *p, const void *end, struct crypto_blkcipher **res)
82{ 83{
83 struct xdr_netobj key; 84 struct xdr_netobj key;
84 int alg, alg_mode; 85 int alg;
85 char *alg_name; 86 char *alg_name;
86 87
87 p = simple_get_bytes(p, end, &alg, sizeof(alg)); 88 p = simple_get_bytes(p, end, &alg, sizeof(alg));
@@ -93,18 +94,19 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
93 94
94 switch (alg) { 95 switch (alg) {
95 case ENCTYPE_DES_CBC_RAW: 96 case ENCTYPE_DES_CBC_RAW:
96 alg_name = "des"; 97 alg_name = "cbc(des)";
97 alg_mode = CRYPTO_TFM_MODE_CBC;
98 break; 98 break;
99 default: 99 default:
100 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); 100 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
101 goto out_err_free_key; 101 goto out_err_free_key;
102 } 102 }
103 if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 103 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
104 if (IS_ERR(*res)) {
104 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); 105 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
106 *res = NULL;
105 goto out_err_free_key; 107 goto out_err_free_key;
106 } 108 }
107 if (crypto_cipher_setkey(*res, key.data, key.len)) { 109 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
108 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); 110 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
109 goto out_err_free_tfm; 111 goto out_err_free_tfm;
110 } 112 }
@@ -113,7 +115,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
113 return p; 115 return p;
114 116
115out_err_free_tfm: 117out_err_free_tfm:
116 crypto_free_tfm(*res); 118 crypto_free_blkcipher(*res);
117out_err_free_key: 119out_err_free_key:
118 kfree(key.data); 120 kfree(key.data);
119 p = ERR_PTR(-EINVAL); 121 p = ERR_PTR(-EINVAL);
@@ -172,9 +174,9 @@ gss_import_sec_context_kerberos(const void *p,
172 return 0; 174 return 0;
173 175
174out_err_free_key2: 176out_err_free_key2:
175 crypto_free_tfm(ctx->seq); 177 crypto_free_blkcipher(ctx->seq);
176out_err_free_key1: 178out_err_free_key1:
177 crypto_free_tfm(ctx->enc); 179 crypto_free_blkcipher(ctx->enc);
178out_err_free_mech: 180out_err_free_mech:
179 kfree(ctx->mech_used.data); 181 kfree(ctx->mech_used.data);
180out_err_free_ctx: 182out_err_free_ctx:
@@ -187,8 +189,8 @@ static void
187gss_delete_sec_context_kerberos(void *internal_ctx) { 189gss_delete_sec_context_kerberos(void *internal_ctx) {
188 struct krb5_ctx *kctx = internal_ctx; 190 struct krb5_ctx *kctx = internal_ctx;
189 191
190 crypto_free_tfm(kctx->seq); 192 crypto_free_blkcipher(kctx->seq);
191 crypto_free_tfm(kctx->enc); 193 crypto_free_blkcipher(kctx->enc);
192 kfree(kctx->mech_used.data); 194 kfree(kctx->mech_used.data);
193 kfree(kctx); 195 kfree(kctx);
194} 196}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c53ead39118d..c604baf3a5f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43s32 43s32
44krb5_make_seq_num(struct crypto_tfm *key, 44krb5_make_seq_num(struct crypto_blkcipher *key,
45 int direction, 45 int direction,
46 s32 seqnum, 46 s32 seqnum,
47 unsigned char *cksum, unsigned char *buf) 47 unsigned char *cksum, unsigned char *buf)
@@ -62,7 +62,7 @@ krb5_make_seq_num(struct crypto_tfm *key,
62} 62}
63 63
64s32 64s32
65krb5_get_seq_num(struct crypto_tfm *key, 65krb5_get_seq_num(struct crypto_blkcipher *key,
66 unsigned char *cksum, 66 unsigned char *cksum,
67 unsigned char *buf, 67 unsigned char *buf,
68 int *direction, s32 * seqnum) 68 int *direction, s32 * seqnum)
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 89d1f3e14128..f179415d0c38 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
149 goto out_err; 149 goto out_err;
150 } 150 }
151 151
152 blocksize = crypto_tfm_alg_blocksize(kctx->enc); 152 blocksize = crypto_blkcipher_blocksize(kctx->enc);
153 gss_krb5_add_padding(buf, offset, blocksize); 153 gss_krb5_add_padding(buf, offset, blocksize);
154 BUG_ON((buf->len - offset) % blocksize); 154 BUG_ON((buf->len - offset) % blocksize);
155 plainlen = blocksize + buf->len - offset; 155 plainlen = blocksize + buf->len - offset;
@@ -346,7 +346,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
346 /* Copy the data back to the right position. XXX: Would probably be 346 /* Copy the data back to the right position. XXX: Would probably be
347 * better to copy and encrypt at the same time. */ 347 * better to copy and encrypt at the same time. */
348 348
349 blocksize = crypto_tfm_alg_blocksize(kctx->enc); 349 blocksize = crypto_blkcipher_blocksize(kctx->enc);
350 data_start = ptr + 22 + blocksize; 350 data_start = ptr + 22 + blocksize;
351 orig_start = buf->head[0].iov_base + offset; 351 orig_start = buf->head[0].iov_base + offset;
352 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 352 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 88dcb52d171b..bdedf456bc17 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -34,6 +34,7 @@
34 * 34 *
35 */ 35 */
36 36
37#include <linux/err.h>
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/init.h> 39#include <linux/init.h>
39#include <linux/types.h> 40#include <linux/types.h>
@@ -83,10 +84,11 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
83} 84}
84 85
85static inline const void * 86static inline const void *
86get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) 87get_key(const void *p, const void *end, struct crypto_blkcipher **res,
88 int *resalg)
87{ 89{
88 struct xdr_netobj key = { 0 }; 90 struct xdr_netobj key = { 0 };
89 int alg_mode,setkey = 0; 91 int setkey = 0;
90 char *alg_name; 92 char *alg_name;
91 93
92 p = simple_get_bytes(p, end, resalg, sizeof(*resalg)); 94 p = simple_get_bytes(p, end, resalg, sizeof(*resalg));
@@ -98,14 +100,12 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
98 100
99 switch (*resalg) { 101 switch (*resalg) {
100 case NID_des_cbc: 102 case NID_des_cbc:
101 alg_name = "des"; 103 alg_name = "cbc(des)";
102 alg_mode = CRYPTO_TFM_MODE_CBC;
103 setkey = 1; 104 setkey = 1;
104 break; 105 break;
105 case NID_cast5_cbc: 106 case NID_cast5_cbc:
106 /* XXXX here in name only, not used */ 107 /* XXXX here in name only, not used */
107 alg_name = "cast5"; 108 alg_name = "cbc(cast5)";
108 alg_mode = CRYPTO_TFM_MODE_CBC;
109 setkey = 0; /* XXX will need to set to 1 */ 109 setkey = 0; /* XXX will need to set to 1 */
110 break; 110 break;
111 case NID_md5: 111 case NID_md5:
@@ -113,19 +113,20 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); 113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
114 } 114 }
115 alg_name = "md5"; 115 alg_name = "md5";
116 alg_mode = 0;
117 setkey = 0; 116 setkey = 0;
118 break; 117 break;
119 default: 118 default:
120 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg); 119 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
121 goto out_err_free_key; 120 goto out_err_free_key;
122 } 121 }
123 if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { 122 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
123 if (IS_ERR(*res)) {
124 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name); 124 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
125 *res = NULL;
125 goto out_err_free_key; 126 goto out_err_free_key;
126 } 127 }
127 if (setkey) { 128 if (setkey) {
128 if (crypto_cipher_setkey(*res, key.data, key.len)) { 129 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
129 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name); 130 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
130 goto out_err_free_tfm; 131 goto out_err_free_tfm;
131 } 132 }
@@ -136,7 +137,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
136 return p; 137 return p;
137 138
138out_err_free_tfm: 139out_err_free_tfm:
139 crypto_free_tfm(*res); 140 crypto_free_blkcipher(*res);
140out_err_free_key: 141out_err_free_key:
141 if(key.len > 0) 142 if(key.len > 0)
142 kfree(key.data); 143 kfree(key.data);
@@ -204,9 +205,9 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
204 return 0; 205 return 0;
205 206
206out_err_free_key2: 207out_err_free_key2:
207 crypto_free_tfm(ctx->derived_integ_key); 208 crypto_free_blkcipher(ctx->derived_integ_key);
208out_err_free_key1: 209out_err_free_key1:
209 crypto_free_tfm(ctx->derived_conf_key); 210 crypto_free_blkcipher(ctx->derived_conf_key);
210out_err_free_s_key: 211out_err_free_s_key:
211 kfree(ctx->share_key.data); 212 kfree(ctx->share_key.data);
212out_err_free_mech: 213out_err_free_mech:
@@ -223,8 +224,8 @@ static void
223gss_delete_sec_context_spkm3(void *internal_ctx) { 224gss_delete_sec_context_spkm3(void *internal_ctx) {
224 struct spkm3_ctx *sctx = internal_ctx; 225 struct spkm3_ctx *sctx = internal_ctx;
225 226
226 crypto_free_tfm(sctx->derived_integ_key); 227 crypto_free_blkcipher(sctx->derived_integ_key);
227 crypto_free_tfm(sctx->derived_conf_key); 228 crypto_free_blkcipher(sctx->derived_conf_key);
228 kfree(sctx->share_key.data); 229 kfree(sctx->share_key.data);
229 kfree(sctx->mech_used.data); 230 kfree(sctx->mech_used.data);
230 kfree(sctx); 231 kfree(sctx);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 04e1aea58bc9..5a0dbeb6bbe8 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -30,7 +30,8 @@
30 */ 30 */
31static struct xfrm_algo_desc aalg_list[] = { 31static struct xfrm_algo_desc aalg_list[] = {
32{ 32{
33 .name = "digest_null", 33 .name = "hmac(digest_null)",
34 .compat = "digest_null",
34 35
35 .uinfo = { 36 .uinfo = {
36 .auth = { 37 .auth = {
@@ -47,7 +48,8 @@ static struct xfrm_algo_desc aalg_list[] = {
47 } 48 }
48}, 49},
49{ 50{
50 .name = "md5", 51 .name = "hmac(md5)",
52 .compat = "md5",
51 53
52 .uinfo = { 54 .uinfo = {
53 .auth = { 55 .auth = {
@@ -64,7 +66,8 @@ static struct xfrm_algo_desc aalg_list[] = {
64 } 66 }
65}, 67},
66{ 68{
67 .name = "sha1", 69 .name = "hmac(sha1)",
70 .compat = "sha1",
68 71
69 .uinfo = { 72 .uinfo = {
70 .auth = { 73 .auth = {
@@ -81,7 +84,8 @@ static struct xfrm_algo_desc aalg_list[] = {
81 } 84 }
82}, 85},
83{ 86{
84 .name = "sha256", 87 .name = "hmac(sha256)",
88 .compat = "sha256",
85 89
86 .uinfo = { 90 .uinfo = {
87 .auth = { 91 .auth = {
@@ -98,7 +102,8 @@ static struct xfrm_algo_desc aalg_list[] = {
98 } 102 }
99}, 103},
100{ 104{
101 .name = "ripemd160", 105 .name = "hmac(ripemd160)",
106 .compat = "ripemd160",
102 107
103 .uinfo = { 108 .uinfo = {
104 .auth = { 109 .auth = {
@@ -118,7 +123,8 @@ static struct xfrm_algo_desc aalg_list[] = {
118 123
119static struct xfrm_algo_desc ealg_list[] = { 124static struct xfrm_algo_desc ealg_list[] = {
120{ 125{
121 .name = "cipher_null", 126 .name = "ecb(cipher_null)",
127 .compat = "cipher_null",
122 128
123 .uinfo = { 129 .uinfo = {
124 .encr = { 130 .encr = {
@@ -135,7 +141,8 @@ static struct xfrm_algo_desc ealg_list[] = {
135 } 141 }
136}, 142},
137{ 143{
138 .name = "des", 144 .name = "cbc(des)",
145 .compat = "des",
139 146
140 .uinfo = { 147 .uinfo = {
141 .encr = { 148 .encr = {
@@ -152,7 +159,8 @@ static struct xfrm_algo_desc ealg_list[] = {
152 } 159 }
153}, 160},
154{ 161{
155 .name = "des3_ede", 162 .name = "cbc(des3_ede)",
163 .compat = "des3_ede",
156 164
157 .uinfo = { 165 .uinfo = {
158 .encr = { 166 .encr = {
@@ -169,7 +177,8 @@ static struct xfrm_algo_desc ealg_list[] = {
169 } 177 }
170}, 178},
171{ 179{
172 .name = "cast128", 180 .name = "cbc(cast128)",
181 .compat = "cast128",
173 182
174 .uinfo = { 183 .uinfo = {
175 .encr = { 184 .encr = {
@@ -186,7 +195,8 @@ static struct xfrm_algo_desc ealg_list[] = {
186 } 195 }
187}, 196},
188{ 197{
189 .name = "blowfish", 198 .name = "cbc(blowfish)",
199 .compat = "blowfish",
190 200
191 .uinfo = { 201 .uinfo = {
192 .encr = { 202 .encr = {
@@ -203,7 +213,8 @@ static struct xfrm_algo_desc ealg_list[] = {
203 } 213 }
204}, 214},
205{ 215{
206 .name = "aes", 216 .name = "cbc(aes)",
217 .compat = "aes",
207 218
208 .uinfo = { 219 .uinfo = {
209 .encr = { 220 .encr = {
@@ -220,7 +231,8 @@ static struct xfrm_algo_desc ealg_list[] = {
220 } 231 }
221}, 232},
222{ 233{
223 .name = "serpent", 234 .name = "cbc(serpent)",
235 .compat = "serpent",
224 236
225 .uinfo = { 237 .uinfo = {
226 .encr = { 238 .encr = {
@@ -237,7 +249,8 @@ static struct xfrm_algo_desc ealg_list[] = {
237 } 249 }
238}, 250},
239{ 251{
240 .name = "twofish", 252 .name = "cbc(twofish)",
253 .compat = "twofish",
241 254
242 .uinfo = { 255 .uinfo = {
243 .encr = { 256 .encr = {
@@ -350,8 +363,8 @@ struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
350EXPORT_SYMBOL_GPL(xfrm_calg_get_byid); 363EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
351 364
352static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, 365static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
353 int entries, char *name, 366 int entries, u32 type, u32 mask,
354 int probe) 367 char *name, int probe)
355{ 368{
356 int i, status; 369 int i, status;
357 370
@@ -359,7 +372,8 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
359 return NULL; 372 return NULL;
360 373
361 for (i = 0; i < entries; i++) { 374 for (i = 0; i < entries; i++) {
362 if (strcmp(name, list[i].name)) 375 if (strcmp(name, list[i].name) &&
376 (!list[i].compat || strcmp(name, list[i].compat)))
363 continue; 377 continue;
364 378
365 if (list[i].available) 379 if (list[i].available)
@@ -368,7 +382,7 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
368 if (!probe) 382 if (!probe)
369 break; 383 break;
370 384
371 status = crypto_alg_available(name, 0); 385 status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC);
372 if (!status) 386 if (!status)
373 break; 387 break;
374 388
@@ -380,19 +394,25 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
380 394
381struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe) 395struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
382{ 396{
383 return xfrm_get_byname(aalg_list, aalg_entries(), name, probe); 397 return xfrm_get_byname(aalg_list, aalg_entries(),
398 CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
399 name, probe);
384} 400}
385EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname); 401EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
386 402
387struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe) 403struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
388{ 404{
389 return xfrm_get_byname(ealg_list, ealg_entries(), name, probe); 405 return xfrm_get_byname(ealg_list, ealg_entries(),
406 CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
407 name, probe);
390} 408}
391EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname); 409EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
392 410
393struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe) 411struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
394{ 412{
395 return xfrm_get_byname(calg_list, calg_entries(), name, probe); 413 return xfrm_get_byname(calg_list, calg_entries(),
414 CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
415 name, probe);
396} 416}
397EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); 417EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
398 418
@@ -427,19 +447,22 @@ void xfrm_probe_algs(void)
427 BUG_ON(in_softirq()); 447 BUG_ON(in_softirq());
428 448
429 for (i = 0; i < aalg_entries(); i++) { 449 for (i = 0; i < aalg_entries(); i++) {
430 status = crypto_alg_available(aalg_list[i].name, 0); 450 status = crypto_has_hash(aalg_list[i].name, 0,
451 CRYPTO_ALG_ASYNC);
431 if (aalg_list[i].available != status) 452 if (aalg_list[i].available != status)
432 aalg_list[i].available = status; 453 aalg_list[i].available = status;
433 } 454 }
434 455
435 for (i = 0; i < ealg_entries(); i++) { 456 for (i = 0; i < ealg_entries(); i++) {
436 status = crypto_alg_available(ealg_list[i].name, 0); 457 status = crypto_has_blkcipher(ealg_list[i].name, 0,
458 CRYPTO_ALG_ASYNC);
437 if (ealg_list[i].available != status) 459 if (ealg_list[i].available != status)
438 ealg_list[i].available = status; 460 ealg_list[i].available = status;
439 } 461 }
440 462
441 for (i = 0; i < calg_entries(); i++) { 463 for (i = 0; i < calg_entries(); i++) {
442 status = crypto_alg_available(calg_list[i].name, 0); 464 status = crypto_has_comp(calg_list[i].name, 0,
465 CRYPTO_ALG_ASYNC);
443 if (calg_list[i].available != status) 466 if (calg_list[i].available != status)
444 calg_list[i].available = status; 467 calg_list[i].available = status;
445 } 468 }
@@ -471,11 +494,12 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
471 494
472/* Move to common area: it is shared with AH. */ 495/* Move to common area: it is shared with AH. */
473 496
474void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, 497int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
475 int offset, int len, icv_update_fn_t icv_update) 498 int offset, int len, icv_update_fn_t icv_update)
476{ 499{
477 int start = skb_headlen(skb); 500 int start = skb_headlen(skb);
478 int i, copy = start - offset; 501 int i, copy = start - offset;
502 int err;
479 struct scatterlist sg; 503 struct scatterlist sg;
480 504
481 /* Checksum header. */ 505 /* Checksum header. */
@@ -487,10 +511,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
487 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 511 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
488 sg.length = copy; 512 sg.length = copy;
489 513
490 icv_update(tfm, &sg, 1); 514 err = icv_update(desc, &sg, copy);
515 if (unlikely(err))
516 return err;
491 517
492 if ((len -= copy) == 0) 518 if ((len -= copy) == 0)
493 return; 519 return 0;
494 offset += copy; 520 offset += copy;
495 } 521 }
496 522
@@ -510,10 +536,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
510 sg.offset = frag->page_offset + offset-start; 536 sg.offset = frag->page_offset + offset-start;
511 sg.length = copy; 537 sg.length = copy;
512 538
513 icv_update(tfm, &sg, 1); 539 err = icv_update(desc, &sg, copy);
540 if (unlikely(err))
541 return err;
514 542
515 if (!(len -= copy)) 543 if (!(len -= copy))
516 return; 544 return 0;
517 offset += copy; 545 offset += copy;
518 } 546 }
519 start = end; 547 start = end;
@@ -531,15 +559,19 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
531 if ((copy = end - offset) > 0) { 559 if ((copy = end - offset) > 0) {
532 if (copy > len) 560 if (copy > len)
533 copy = len; 561 copy = len;
534 skb_icv_walk(list, tfm, offset-start, copy, icv_update); 562 err = skb_icv_walk(list, desc, offset-start,
563 copy, icv_update);
564 if (unlikely(err))
565 return err;
535 if ((len -= copy) == 0) 566 if ((len -= copy) == 0)
536 return; 567 return 0;
537 offset += copy; 568 offset += copy;
538 } 569 }
539 start = end; 570 start = end;
540 } 571 }
541 } 572 }
542 BUG_ON(len); 573 BUG_ON(len);
574 return 0;
543} 575}
544EXPORT_SYMBOL_GPL(skb_icv_walk); 576EXPORT_SYMBOL_GPL(skb_icv_walk);
545 577
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3e6a722d072e..fa79ddc4239e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -10,6 +10,7 @@
10 * 10 *
11 */ 11 */
12 12
13#include <linux/crypto.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/types.h> 16#include <linux/types.h>
@@ -212,6 +213,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
212 return -ENOMEM; 213 return -ENOMEM;
213 214
214 memcpy(p, ualg, len); 215 memcpy(p, ualg, len);
216 strcpy(p->alg_name, algo->name);
215 *algpp = p; 217 *algpp = p;
216 return 0; 218 return 0;
217} 219}