aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 19:43:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 19:43:35 -0400
commit62606c224d72a98c35d21a849f95cccf95b0a252 (patch)
tree6f6f3466451edf9baa2ea8b5f9fc558aa555c69a /net
parent24ed334f33666f2ae929ccc08f72e7e72e353c64 (diff)
parenta1c6fd4308d37f072e939a2782f24214115fc7e8 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Remove VLA usage - Add cryptostat user-space interface - Add notifier for new crypto algorithms Algorithms: - Add OFB mode - Remove speck Drivers: - Remove x86/sha*-mb as they are buggy - Remove pcbc(aes) from x86/aesni - Improve performance of arm/ghash-ce by up to 85% - Implement CTS-CBC in arm64/aes-blk, faster by up to 50% - Remove PMULL based arm64/crc32 driver - Use PMULL in arm64/crct10dif - Add aes-ctr support in s5p-sss - Add caam/qi2 driver Others: - Pick better transform if one becomes available in crc-t10dif" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (124 commits) crypto: chelsio - Update ntx queue received from cxgb4 crypto: ccree - avoid implicit enum conversion crypto: caam - add SPDX license identifier to all files crypto: caam/qi - simplify CGR allocation, freeing crypto: mxs-dcp - make symbols 'sha1_null_hash' and 'sha256_null_hash' static crypto: arm64/aes-blk - ensure XTS mask is always loaded crypto: testmgr - fix sizeof() on COMP_BUF_SIZE crypto: chtls - remove set but not used variable 'csk' crypto: axis - fix platform_no_drv_owner.cocci warnings crypto: x86/aes-ni - fix build error following fpu template removal crypto: arm64/aes - fix handling sub-block CTS-CBC inputs crypto: caam/qi2 - avoid double export crypto: mxs-dcp - Fix AES issues crypto: mxs-dcp - Fix SHA null hashes and output length crypto: mxs-dcp - Implement sha import/export crypto: aegis/generic - fix for big endian systems crypto: morus/generic - fix for big endian systems crypto: lrw - fix rebase error after out of bounds fix crypto: cavium/nitrox - use pci_alloc_irq_vectors() while enabling MSI-X. crypto: cavium/nitrox - NITROX command queue changes. ...
Diffstat (limited to 'net')
-rw-r--r--net/ceph/crypto.c12
-rw-r--r--net/ceph/crypto.h2
-rw-r--r--net/mac802154/llsec.c16
-rw-r--r--net/mac802154/llsec.h2
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/rxkad.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c87
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c53
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c18
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c20
11 files changed, 132 insertions, 133 deletions
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 02172c408ff2..5d6724cee38f 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -46,9 +46,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
46 goto fail; 46 goto fail;
47 } 47 }
48 48
49 /* crypto_alloc_skcipher() allocates with GFP_KERNEL */ 49 /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
50 noio_flag = memalloc_noio_save(); 50 noio_flag = memalloc_noio_save();
51 key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 51 key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
52 memalloc_noio_restore(noio_flag); 52 memalloc_noio_restore(noio_flag);
53 if (IS_ERR(key->tfm)) { 53 if (IS_ERR(key->tfm)) {
54 ret = PTR_ERR(key->tfm); 54 ret = PTR_ERR(key->tfm);
@@ -56,7 +56,7 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
56 goto fail; 56 goto fail;
57 } 57 }
58 58
59 ret = crypto_skcipher_setkey(key->tfm, key->key, key->len); 59 ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
60 if (ret) 60 if (ret)
61 goto fail; 61 goto fail;
62 62
@@ -136,7 +136,7 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
136 if (key) { 136 if (key) {
137 kfree(key->key); 137 kfree(key->key);
138 key->key = NULL; 138 key->key = NULL;
139 crypto_free_skcipher(key->tfm); 139 crypto_free_sync_skcipher(key->tfm);
140 key->tfm = NULL; 140 key->tfm = NULL;
141 } 141 }
142} 142}
@@ -216,7 +216,7 @@ static void teardown_sgtable(struct sg_table *sgt)
216static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt, 216static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
217 void *buf, int buf_len, int in_len, int *pout_len) 217 void *buf, int buf_len, int in_len, int *pout_len)
218{ 218{
219 SKCIPHER_REQUEST_ON_STACK(req, key->tfm); 219 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
220 struct sg_table sgt; 220 struct sg_table sgt;
221 struct scatterlist prealloc_sg; 221 struct scatterlist prealloc_sg;
222 char iv[AES_BLOCK_SIZE] __aligned(8); 222 char iv[AES_BLOCK_SIZE] __aligned(8);
@@ -232,7 +232,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
232 return ret; 232 return ret;
233 233
234 memcpy(iv, aes_iv, AES_BLOCK_SIZE); 234 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
235 skcipher_request_set_tfm(req, key->tfm); 235 skcipher_request_set_sync_tfm(req, key->tfm);
236 skcipher_request_set_callback(req, 0, NULL, NULL); 236 skcipher_request_set_callback(req, 0, NULL, NULL);
237 skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv); 237 skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
238 238
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index bb45c7d43739..96ef4d860bc9 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -13,7 +13,7 @@ struct ceph_crypto_key {
13 struct ceph_timespec created; 13 struct ceph_timespec created;
14 int len; 14 int len;
15 void *key; 15 void *key;
16 struct crypto_skcipher *tfm; 16 struct crypto_sync_skcipher *tfm;
17}; 17};
18 18
19int ceph_crypto_key_clone(struct ceph_crypto_key *dst, 19int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 2fb703d70803..7e29f88dbf6a 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -146,18 +146,18 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
146 goto err_tfm; 146 goto err_tfm;
147 } 147 }
148 148
149 key->tfm0 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); 149 key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
150 if (IS_ERR(key->tfm0)) 150 if (IS_ERR(key->tfm0))
151 goto err_tfm; 151 goto err_tfm;
152 152
153 if (crypto_skcipher_setkey(key->tfm0, template->key, 153 if (crypto_sync_skcipher_setkey(key->tfm0, template->key,
154 IEEE802154_LLSEC_KEY_SIZE)) 154 IEEE802154_LLSEC_KEY_SIZE))
155 goto err_tfm0; 155 goto err_tfm0;
156 156
157 return key; 157 return key;
158 158
159err_tfm0: 159err_tfm0:
160 crypto_free_skcipher(key->tfm0); 160 crypto_free_sync_skcipher(key->tfm0);
161err_tfm: 161err_tfm:
162 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) 162 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
163 if (key->tfm[i]) 163 if (key->tfm[i])
@@ -177,7 +177,7 @@ static void llsec_key_release(struct kref *ref)
177 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) 177 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
178 crypto_free_aead(key->tfm[i]); 178 crypto_free_aead(key->tfm[i]);
179 179
180 crypto_free_skcipher(key->tfm0); 180 crypto_free_sync_skcipher(key->tfm0);
181 kzfree(key); 181 kzfree(key);
182} 182}
183 183
@@ -622,7 +622,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
622{ 622{
623 u8 iv[16]; 623 u8 iv[16];
624 struct scatterlist src; 624 struct scatterlist src;
625 SKCIPHER_REQUEST_ON_STACK(req, key->tfm0); 625 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
626 int err, datalen; 626 int err, datalen;
627 unsigned char *data; 627 unsigned char *data;
628 628
@@ -632,7 +632,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
632 datalen = skb_tail_pointer(skb) - data; 632 datalen = skb_tail_pointer(skb) - data;
633 sg_init_one(&src, data, datalen); 633 sg_init_one(&src, data, datalen);
634 634
635 skcipher_request_set_tfm(req, key->tfm0); 635 skcipher_request_set_sync_tfm(req, key->tfm0);
636 skcipher_request_set_callback(req, 0, NULL, NULL); 636 skcipher_request_set_callback(req, 0, NULL, NULL);
637 skcipher_request_set_crypt(req, &src, &src, datalen, iv); 637 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
638 err = crypto_skcipher_encrypt(req); 638 err = crypto_skcipher_encrypt(req);
@@ -840,7 +840,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
840 unsigned char *data; 840 unsigned char *data;
841 int datalen; 841 int datalen;
842 struct scatterlist src; 842 struct scatterlist src;
843 SKCIPHER_REQUEST_ON_STACK(req, key->tfm0); 843 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
844 int err; 844 int err;
845 845
846 llsec_geniv(iv, dev_addr, &hdr->sec); 846 llsec_geniv(iv, dev_addr, &hdr->sec);
@@ -849,7 +849,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
849 849
850 sg_init_one(&src, data, datalen); 850 sg_init_one(&src, data, datalen);
851 851
852 skcipher_request_set_tfm(req, key->tfm0); 852 skcipher_request_set_sync_tfm(req, key->tfm0);
853 skcipher_request_set_callback(req, 0, NULL, NULL); 853 skcipher_request_set_callback(req, 0, NULL, NULL);
854 skcipher_request_set_crypt(req, &src, &src, datalen, iv); 854 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
855 855
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
index 6f3b658e3279..8be46d74dc39 100644
--- a/net/mac802154/llsec.h
+++ b/net/mac802154/llsec.h
@@ -29,7 +29,7 @@ struct mac802154_llsec_key {
29 29
30 /* one tfm for each authsize (4/8/16) */ 30 /* one tfm for each authsize (4/8/16) */
31 struct crypto_aead *tfm[3]; 31 struct crypto_aead *tfm[3];
32 struct crypto_skcipher *tfm0; 32 struct crypto_sync_skcipher *tfm0;
33 33
34 struct kref ref; 34 struct kref ref;
35}; 35};
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 0a7c49e8e053..382196e57a26 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -435,7 +435,7 @@ struct rxrpc_connection {
435 struct sk_buff_head rx_queue; /* received conn-level packets */ 435 struct sk_buff_head rx_queue; /* received conn-level packets */
436 const struct rxrpc_security *security; /* applied security module */ 436 const struct rxrpc_security *security; /* applied security module */
437 struct key *server_key; /* security for this service */ 437 struct key *server_key; /* security for this service */
438 struct crypto_skcipher *cipher; /* encryption handle */ 438 struct crypto_sync_skcipher *cipher; /* encryption handle */
439 struct rxrpc_crypt csum_iv; /* packet checksum base */ 439 struct rxrpc_crypt csum_iv; /* packet checksum base */
440 unsigned long flags; 440 unsigned long flags;
441 unsigned long events; 441 unsigned long events;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index cea16838d588..cbef9ea43dec 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -46,7 +46,7 @@ struct rxkad_level2_hdr {
46 * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE 46 * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
47 * packets 47 * packets
48 */ 48 */
49static struct crypto_skcipher *rxkad_ci; 49static struct crypto_sync_skcipher *rxkad_ci;
50static DEFINE_MUTEX(rxkad_ci_mutex); 50static DEFINE_MUTEX(rxkad_ci_mutex);
51 51
52/* 52/*
@@ -54,7 +54,7 @@ static DEFINE_MUTEX(rxkad_ci_mutex);
54 */ 54 */
55static int rxkad_init_connection_security(struct rxrpc_connection *conn) 55static int rxkad_init_connection_security(struct rxrpc_connection *conn)
56{ 56{
57 struct crypto_skcipher *ci; 57 struct crypto_sync_skcipher *ci;
58 struct rxrpc_key_token *token; 58 struct rxrpc_key_token *token;
59 int ret; 59 int ret;
60 60
@@ -63,14 +63,14 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
63 token = conn->params.key->payload.data[0]; 63 token = conn->params.key->payload.data[0];
64 conn->security_ix = token->security_index; 64 conn->security_ix = token->security_index;
65 65
66 ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 66 ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
67 if (IS_ERR(ci)) { 67 if (IS_ERR(ci)) {
68 _debug("no cipher"); 68 _debug("no cipher");
69 ret = PTR_ERR(ci); 69 ret = PTR_ERR(ci);
70 goto error; 70 goto error;
71 } 71 }
72 72
73 if (crypto_skcipher_setkey(ci, token->kad->session_key, 73 if (crypto_sync_skcipher_setkey(ci, token->kad->session_key,
74 sizeof(token->kad->session_key)) < 0) 74 sizeof(token->kad->session_key)) < 0)
75 BUG(); 75 BUG();
76 76
@@ -104,7 +104,7 @@ error:
104static int rxkad_prime_packet_security(struct rxrpc_connection *conn) 104static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
105{ 105{
106 struct rxrpc_key_token *token; 106 struct rxrpc_key_token *token;
107 SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); 107 SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
108 struct scatterlist sg; 108 struct scatterlist sg;
109 struct rxrpc_crypt iv; 109 struct rxrpc_crypt iv;
110 __be32 *tmpbuf; 110 __be32 *tmpbuf;
@@ -128,7 +128,7 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
128 tmpbuf[3] = htonl(conn->security_ix); 128 tmpbuf[3] = htonl(conn->security_ix);
129 129
130 sg_init_one(&sg, tmpbuf, tmpsize); 130 sg_init_one(&sg, tmpbuf, tmpsize);
131 skcipher_request_set_tfm(req, conn->cipher); 131 skcipher_request_set_sync_tfm(req, conn->cipher);
132 skcipher_request_set_callback(req, 0, NULL, NULL); 132 skcipher_request_set_callback(req, 0, NULL, NULL);
133 skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); 133 skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x);
134 crypto_skcipher_encrypt(req); 134 crypto_skcipher_encrypt(req);
@@ -167,7 +167,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
167 memset(&iv, 0, sizeof(iv)); 167 memset(&iv, 0, sizeof(iv));
168 168
169 sg_init_one(&sg, sechdr, 8); 169 sg_init_one(&sg, sechdr, 8);
170 skcipher_request_set_tfm(req, call->conn->cipher); 170 skcipher_request_set_sync_tfm(req, call->conn->cipher);
171 skcipher_request_set_callback(req, 0, NULL, NULL); 171 skcipher_request_set_callback(req, 0, NULL, NULL);
172 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 172 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
173 crypto_skcipher_encrypt(req); 173 crypto_skcipher_encrypt(req);
@@ -212,7 +212,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
212 memcpy(&iv, token->kad->session_key, sizeof(iv)); 212 memcpy(&iv, token->kad->session_key, sizeof(iv));
213 213
214 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); 214 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
215 skcipher_request_set_tfm(req, call->conn->cipher); 215 skcipher_request_set_sync_tfm(req, call->conn->cipher);
216 skcipher_request_set_callback(req, 0, NULL, NULL); 216 skcipher_request_set_callback(req, 0, NULL, NULL);
217 skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x); 217 skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x);
218 crypto_skcipher_encrypt(req); 218 crypto_skcipher_encrypt(req);
@@ -250,7 +250,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
250 void *sechdr) 250 void *sechdr)
251{ 251{
252 struct rxrpc_skb_priv *sp; 252 struct rxrpc_skb_priv *sp;
253 SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); 253 SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
254 struct rxrpc_crypt iv; 254 struct rxrpc_crypt iv;
255 struct scatterlist sg; 255 struct scatterlist sg;
256 u32 x, y; 256 u32 x, y;
@@ -279,7 +279,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
279 call->crypto_buf[1] = htonl(x); 279 call->crypto_buf[1] = htonl(x);
280 280
281 sg_init_one(&sg, call->crypto_buf, 8); 281 sg_init_one(&sg, call->crypto_buf, 8);
282 skcipher_request_set_tfm(req, call->conn->cipher); 282 skcipher_request_set_sync_tfm(req, call->conn->cipher);
283 skcipher_request_set_callback(req, 0, NULL, NULL); 283 skcipher_request_set_callback(req, 0, NULL, NULL);
284 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 284 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
285 crypto_skcipher_encrypt(req); 285 crypto_skcipher_encrypt(req);
@@ -352,7 +352,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
352 /* start the decryption afresh */ 352 /* start the decryption afresh */
353 memset(&iv, 0, sizeof(iv)); 353 memset(&iv, 0, sizeof(iv));
354 354
355 skcipher_request_set_tfm(req, call->conn->cipher); 355 skcipher_request_set_sync_tfm(req, call->conn->cipher);
356 skcipher_request_set_callback(req, 0, NULL, NULL); 356 skcipher_request_set_callback(req, 0, NULL, NULL);
357 skcipher_request_set_crypt(req, sg, sg, 8, iv.x); 357 skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
358 crypto_skcipher_decrypt(req); 358 crypto_skcipher_decrypt(req);
@@ -450,7 +450,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
450 token = call->conn->params.key->payload.data[0]; 450 token = call->conn->params.key->payload.data[0];
451 memcpy(&iv, token->kad->session_key, sizeof(iv)); 451 memcpy(&iv, token->kad->session_key, sizeof(iv));
452 452
453 skcipher_request_set_tfm(req, call->conn->cipher); 453 skcipher_request_set_sync_tfm(req, call->conn->cipher);
454 skcipher_request_set_callback(req, 0, NULL, NULL); 454 skcipher_request_set_callback(req, 0, NULL, NULL);
455 skcipher_request_set_crypt(req, sg, sg, len, iv.x); 455 skcipher_request_set_crypt(req, sg, sg, len, iv.x);
456 crypto_skcipher_decrypt(req); 456 crypto_skcipher_decrypt(req);
@@ -506,7 +506,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
506 unsigned int offset, unsigned int len, 506 unsigned int offset, unsigned int len,
507 rxrpc_seq_t seq, u16 expected_cksum) 507 rxrpc_seq_t seq, u16 expected_cksum)
508{ 508{
509 SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); 509 SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
510 struct rxrpc_crypt iv; 510 struct rxrpc_crypt iv;
511 struct scatterlist sg; 511 struct scatterlist sg;
512 bool aborted; 512 bool aborted;
@@ -529,7 +529,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
529 call->crypto_buf[1] = htonl(x); 529 call->crypto_buf[1] = htonl(x);
530 530
531 sg_init_one(&sg, call->crypto_buf, 8); 531 sg_init_one(&sg, call->crypto_buf, 8);
532 skcipher_request_set_tfm(req, call->conn->cipher); 532 skcipher_request_set_sync_tfm(req, call->conn->cipher);
533 skcipher_request_set_callback(req, 0, NULL, NULL); 533 skcipher_request_set_callback(req, 0, NULL, NULL);
534 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 534 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
535 crypto_skcipher_encrypt(req); 535 crypto_skcipher_encrypt(req);
@@ -755,7 +755,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
755 struct rxkad_response *resp, 755 struct rxkad_response *resp,
756 const struct rxkad_key *s2) 756 const struct rxkad_key *s2)
757{ 757{
758 SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); 758 SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
759 struct rxrpc_crypt iv; 759 struct rxrpc_crypt iv;
760 struct scatterlist sg[1]; 760 struct scatterlist sg[1];
761 761
@@ -764,7 +764,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
764 764
765 sg_init_table(sg, 1); 765 sg_init_table(sg, 1);
766 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted)); 766 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
767 skcipher_request_set_tfm(req, conn->cipher); 767 skcipher_request_set_sync_tfm(req, conn->cipher);
768 skcipher_request_set_callback(req, 0, NULL, NULL); 768 skcipher_request_set_callback(req, 0, NULL, NULL);
769 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); 769 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
770 crypto_skcipher_encrypt(req); 770 crypto_skcipher_encrypt(req);
@@ -1021,7 +1021,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
1021 struct rxkad_response *resp, 1021 struct rxkad_response *resp,
1022 const struct rxrpc_crypt *session_key) 1022 const struct rxrpc_crypt *session_key)
1023{ 1023{
1024 SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci); 1024 SYNC_SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
1025 struct scatterlist sg[1]; 1025 struct scatterlist sg[1];
1026 struct rxrpc_crypt iv; 1026 struct rxrpc_crypt iv;
1027 1027
@@ -1031,7 +1031,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
1031 ASSERT(rxkad_ci != NULL); 1031 ASSERT(rxkad_ci != NULL);
1032 1032
1033 mutex_lock(&rxkad_ci_mutex); 1033 mutex_lock(&rxkad_ci_mutex);
1034 if (crypto_skcipher_setkey(rxkad_ci, session_key->x, 1034 if (crypto_sync_skcipher_setkey(rxkad_ci, session_key->x,
1035 sizeof(*session_key)) < 0) 1035 sizeof(*session_key)) < 0)
1036 BUG(); 1036 BUG();
1037 1037
@@ -1039,7 +1039,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
1039 1039
1040 sg_init_table(sg, 1); 1040 sg_init_table(sg, 1);
1041 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted)); 1041 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
1042 skcipher_request_set_tfm(req, rxkad_ci); 1042 skcipher_request_set_sync_tfm(req, rxkad_ci);
1043 skcipher_request_set_callback(req, 0, NULL, NULL); 1043 skcipher_request_set_callback(req, 0, NULL, NULL);
1044 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); 1044 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
1045 crypto_skcipher_decrypt(req); 1045 crypto_skcipher_decrypt(req);
@@ -1218,7 +1218,7 @@ static void rxkad_clear(struct rxrpc_connection *conn)
1218 _enter(""); 1218 _enter("");
1219 1219
1220 if (conn->cipher) 1220 if (conn->cipher)
1221 crypto_free_skcipher(conn->cipher); 1221 crypto_free_sync_skcipher(conn->cipher);
1222} 1222}
1223 1223
1224/* 1224/*
@@ -1228,7 +1228,7 @@ static int rxkad_init(void)
1228{ 1228{
1229 /* pin the cipher we need so that the crypto layer doesn't invoke 1229 /* pin the cipher we need so that the crypto layer doesn't invoke
1230 * keventd to go get it */ 1230 * keventd to go get it */
1231 rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 1231 rxkad_ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
1232 return PTR_ERR_OR_ZERO(rxkad_ci); 1232 return PTR_ERR_OR_ZERO(rxkad_ci);
1233} 1233}
1234 1234
@@ -1238,7 +1238,7 @@ static int rxkad_init(void)
1238static void rxkad_exit(void) 1238static void rxkad_exit(void)
1239{ 1239{
1240 if (rxkad_ci) 1240 if (rxkad_ci)
1241 crypto_free_skcipher(rxkad_ci); 1241 crypto_free_sync_skcipher(rxkad_ci);
1242} 1242}
1243 1243
1244/* 1244/*
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 0220e1ca5280..4f43383971ba 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -53,7 +53,7 @@
53 53
54u32 54u32
55krb5_encrypt( 55krb5_encrypt(
56 struct crypto_skcipher *tfm, 56 struct crypto_sync_skcipher *tfm,
57 void * iv, 57 void * iv,
58 void * in, 58 void * in,
59 void * out, 59 void * out,
@@ -62,24 +62,24 @@ krb5_encrypt(
62 u32 ret = -EINVAL; 62 u32 ret = -EINVAL;
63 struct scatterlist sg[1]; 63 struct scatterlist sg[1];
64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
65 SKCIPHER_REQUEST_ON_STACK(req, tfm); 65 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
66 66
67 if (length % crypto_skcipher_blocksize(tfm) != 0) 67 if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
68 goto out; 68 goto out;
69 69
70 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 70 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
72 crypto_skcipher_ivsize(tfm)); 72 crypto_sync_skcipher_ivsize(tfm));
73 goto out; 73 goto out;
74 } 74 }
75 75
76 if (iv) 76 if (iv)
77 memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 77 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
78 78
79 memcpy(out, in, length); 79 memcpy(out, in, length);
80 sg_init_one(sg, out, length); 80 sg_init_one(sg, out, length);
81 81
82 skcipher_request_set_tfm(req, tfm); 82 skcipher_request_set_sync_tfm(req, tfm);
83 skcipher_request_set_callback(req, 0, NULL, NULL); 83 skcipher_request_set_callback(req, 0, NULL, NULL);
84 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 84 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
85 85
@@ -92,7 +92,7 @@ out:
92 92
93u32 93u32
94krb5_decrypt( 94krb5_decrypt(
95 struct crypto_skcipher *tfm, 95 struct crypto_sync_skcipher *tfm,
96 void * iv, 96 void * iv,
97 void * in, 97 void * in,
98 void * out, 98 void * out,
@@ -101,23 +101,23 @@ krb5_decrypt(
101 u32 ret = -EINVAL; 101 u32 ret = -EINVAL;
102 struct scatterlist sg[1]; 102 struct scatterlist sg[1];
103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
104 SKCIPHER_REQUEST_ON_STACK(req, tfm); 104 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
105 105
106 if (length % crypto_skcipher_blocksize(tfm) != 0) 106 if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
107 goto out; 107 goto out;
108 108
109 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 109 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
111 crypto_skcipher_ivsize(tfm)); 111 crypto_sync_skcipher_ivsize(tfm));
112 goto out; 112 goto out;
113 } 113 }
114 if (iv) 114 if (iv)
115 memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 115 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
116 116
117 memcpy(out, in, length); 117 memcpy(out, in, length);
118 sg_init_one(sg, out, length); 118 sg_init_one(sg, out, length);
119 119
120 skcipher_request_set_tfm(req, tfm); 120 skcipher_request_set_sync_tfm(req, tfm);
121 skcipher_request_set_callback(req, 0, NULL, NULL); 121 skcipher_request_set_callback(req, 0, NULL, NULL);
122 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 122 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
123 123
@@ -466,7 +466,8 @@ encryptor(struct scatterlist *sg, void *data)
466{ 466{
467 struct encryptor_desc *desc = data; 467 struct encryptor_desc *desc = data;
468 struct xdr_buf *outbuf = desc->outbuf; 468 struct xdr_buf *outbuf = desc->outbuf;
469 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 469 struct crypto_sync_skcipher *tfm =
470 crypto_sync_skcipher_reqtfm(desc->req);
470 struct page *in_page; 471 struct page *in_page;
471 int thislen = desc->fraglen + sg->length; 472 int thislen = desc->fraglen + sg->length;
472 int fraglen, ret; 473 int fraglen, ret;
@@ -492,7 +493,7 @@ encryptor(struct scatterlist *sg, void *data)
492 desc->fraglen += sg->length; 493 desc->fraglen += sg->length;
493 desc->pos += sg->length; 494 desc->pos += sg->length;
494 495
495 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 496 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
496 thislen -= fraglen; 497 thislen -= fraglen;
497 498
498 if (thislen == 0) 499 if (thislen == 0)
@@ -526,16 +527,16 @@ encryptor(struct scatterlist *sg, void *data)
526} 527}
527 528
528int 529int
529gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 530gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
530 int offset, struct page **pages) 531 int offset, struct page **pages)
531{ 532{
532 int ret; 533 int ret;
533 struct encryptor_desc desc; 534 struct encryptor_desc desc;
534 SKCIPHER_REQUEST_ON_STACK(req, tfm); 535 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
535 536
536 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 537 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
537 538
538 skcipher_request_set_tfm(req, tfm); 539 skcipher_request_set_sync_tfm(req, tfm);
539 skcipher_request_set_callback(req, 0, NULL, NULL); 540 skcipher_request_set_callback(req, 0, NULL, NULL);
540 541
541 memset(desc.iv, 0, sizeof(desc.iv)); 542 memset(desc.iv, 0, sizeof(desc.iv));
@@ -567,7 +568,8 @@ decryptor(struct scatterlist *sg, void *data)
567{ 568{
568 struct decryptor_desc *desc = data; 569 struct decryptor_desc *desc = data;
569 int thislen = desc->fraglen + sg->length; 570 int thislen = desc->fraglen + sg->length;
570 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 571 struct crypto_sync_skcipher *tfm =
572 crypto_sync_skcipher_reqtfm(desc->req);
571 int fraglen, ret; 573 int fraglen, ret;
572 574
573 /* Worst case is 4 fragments: head, end of page 1, start 575 /* Worst case is 4 fragments: head, end of page 1, start
@@ -578,7 +580,7 @@ decryptor(struct scatterlist *sg, void *data)
578 desc->fragno++; 580 desc->fragno++;
579 desc->fraglen += sg->length; 581 desc->fraglen += sg->length;
580 582
581 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 583 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
582 thislen -= fraglen; 584 thislen -= fraglen;
583 585
584 if (thislen == 0) 586 if (thislen == 0)
@@ -608,17 +610,17 @@ decryptor(struct scatterlist *sg, void *data)
608} 610}
609 611
610int 612int
611gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 613gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
612 int offset) 614 int offset)
613{ 615{
614 int ret; 616 int ret;
615 struct decryptor_desc desc; 617 struct decryptor_desc desc;
616 SKCIPHER_REQUEST_ON_STACK(req, tfm); 618 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
617 619
618 /* XXXJBF: */ 620 /* XXXJBF: */
619 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 621 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
620 622
621 skcipher_request_set_tfm(req, tfm); 623 skcipher_request_set_sync_tfm(req, tfm);
622 skcipher_request_set_callback(req, 0, NULL, NULL); 624 skcipher_request_set_callback(req, 0, NULL, NULL);
623 625
624 memset(desc.iv, 0, sizeof(desc.iv)); 626 memset(desc.iv, 0, sizeof(desc.iv));
@@ -672,12 +674,12 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
672} 674}
673 675
674static u32 676static u32
675gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 677gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
676 u32 offset, u8 *iv, struct page **pages, int encrypt) 678 u32 offset, u8 *iv, struct page **pages, int encrypt)
677{ 679{
678 u32 ret; 680 u32 ret;
679 struct scatterlist sg[1]; 681 struct scatterlist sg[1];
680 SKCIPHER_REQUEST_ON_STACK(req, cipher); 682 SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
681 u8 *data; 683 u8 *data;
682 struct page **save_pages; 684 struct page **save_pages;
683 u32 len = buf->len - offset; 685 u32 len = buf->len - offset;
@@ -706,7 +708,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
706 708
707 sg_init_one(sg, data, len); 709 sg_init_one(sg, data, len);
708 710
709 skcipher_request_set_tfm(req, cipher); 711 skcipher_request_set_sync_tfm(req, cipher);
710 skcipher_request_set_callback(req, 0, NULL, NULL); 712 skcipher_request_set_callback(req, 0, NULL, NULL);
711 skcipher_request_set_crypt(req, sg, sg, len, iv); 713 skcipher_request_set_crypt(req, sg, sg, len, iv);
712 714
@@ -735,7 +737,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
735 struct xdr_netobj hmac; 737 struct xdr_netobj hmac;
736 u8 *cksumkey; 738 u8 *cksumkey;
737 u8 *ecptr; 739 u8 *ecptr;
738 struct crypto_skcipher *cipher, *aux_cipher; 740 struct crypto_sync_skcipher *cipher, *aux_cipher;
739 int blocksize; 741 int blocksize;
740 struct page **save_pages; 742 struct page **save_pages;
741 int nblocks, nbytes; 743 int nblocks, nbytes;
@@ -754,7 +756,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
754 cksumkey = kctx->acceptor_integ; 756 cksumkey = kctx->acceptor_integ;
755 usage = KG_USAGE_ACCEPTOR_SEAL; 757 usage = KG_USAGE_ACCEPTOR_SEAL;
756 } 758 }
757 blocksize = crypto_skcipher_blocksize(cipher); 759 blocksize = crypto_sync_skcipher_blocksize(cipher);
758 760
759 /* hide the gss token header and insert the confounder */ 761 /* hide the gss token header and insert the confounder */
760 offset += GSS_KRB5_TOK_HDR_LEN; 762 offset += GSS_KRB5_TOK_HDR_LEN;
@@ -807,7 +809,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
807 memset(desc.iv, 0, sizeof(desc.iv)); 809 memset(desc.iv, 0, sizeof(desc.iv));
808 810
809 if (cbcbytes) { 811 if (cbcbytes) {
810 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 812 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
811 813
812 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 814 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
813 desc.fragno = 0; 815 desc.fragno = 0;
@@ -816,7 +818,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
816 desc.outbuf = buf; 818 desc.outbuf = buf;
817 desc.req = req; 819 desc.req = req;
818 820
819 skcipher_request_set_tfm(req, aux_cipher); 821 skcipher_request_set_sync_tfm(req, aux_cipher);
820 skcipher_request_set_callback(req, 0, NULL, NULL); 822 skcipher_request_set_callback(req, 0, NULL, NULL);
821 823
822 sg_init_table(desc.infrags, 4); 824 sg_init_table(desc.infrags, 4);
@@ -855,7 +857,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
855 struct xdr_buf subbuf; 857 struct xdr_buf subbuf;
856 u32 ret = 0; 858 u32 ret = 0;
857 u8 *cksum_key; 859 u8 *cksum_key;
858 struct crypto_skcipher *cipher, *aux_cipher; 860 struct crypto_sync_skcipher *cipher, *aux_cipher;
859 struct xdr_netobj our_hmac_obj; 861 struct xdr_netobj our_hmac_obj;
860 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 862 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
861 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 863 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
@@ -874,7 +876,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
874 cksum_key = kctx->initiator_integ; 876 cksum_key = kctx->initiator_integ;
875 usage = KG_USAGE_INITIATOR_SEAL; 877 usage = KG_USAGE_INITIATOR_SEAL;
876 } 878 }
877 blocksize = crypto_skcipher_blocksize(cipher); 879 blocksize = crypto_sync_skcipher_blocksize(cipher);
878 880
879 881
880 /* create a segment skipping the header and leaving out the checksum */ 882 /* create a segment skipping the header and leaving out the checksum */
@@ -891,13 +893,13 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
891 memset(desc.iv, 0, sizeof(desc.iv)); 893 memset(desc.iv, 0, sizeof(desc.iv));
892 894
893 if (cbcbytes) { 895 if (cbcbytes) {
894 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 896 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
895 897
896 desc.fragno = 0; 898 desc.fragno = 0;
897 desc.fraglen = 0; 899 desc.fraglen = 0;
898 desc.req = req; 900 desc.req = req;
899 901
900 skcipher_request_set_tfm(req, aux_cipher); 902 skcipher_request_set_sync_tfm(req, aux_cipher);
901 skcipher_request_set_callback(req, 0, NULL, NULL); 903 skcipher_request_set_callback(req, 0, NULL, NULL);
902 904
903 sg_init_table(desc.frags, 4); 905 sg_init_table(desc.frags, 4);
@@ -946,7 +948,8 @@ out_err:
946 * Set the key of the given cipher. 948 * Set the key of the given cipher.
947 */ 949 */
948int 950int
949krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 951krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
952 struct crypto_sync_skcipher *cipher,
950 unsigned char *cksum) 953 unsigned char *cksum)
951{ 954{
952 struct crypto_shash *hmac; 955 struct crypto_shash *hmac;
@@ -994,7 +997,7 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
994 if (err) 997 if (err)
995 goto out_err; 998 goto out_err;
996 999
997 err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 1000 err = crypto_sync_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
998 if (err) 1001 if (err)
999 goto out_err; 1002 goto out_err;
1000 1003
@@ -1012,7 +1015,8 @@ out_err:
1012 * Set the key of cipher kctx->enc. 1015 * Set the key of cipher kctx->enc.
1013 */ 1016 */
1014int 1017int
1015krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 1018krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
1019 struct crypto_sync_skcipher *cipher,
1016 s32 seqnum) 1020 s32 seqnum)
1017{ 1021{
1018 struct crypto_shash *hmac; 1022 struct crypto_shash *hmac;
@@ -1069,7 +1073,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
1069 if (err) 1073 if (err)
1070 goto out_err; 1074 goto out_err;
1071 1075
1072 err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1076 err = crypto_sync_skcipher_setkey(cipher, Kcrypt,
1077 kctx->gk5e->keylength);
1073 if (err) 1078 if (err)
1074 goto out_err; 1079 goto out_err;
1075 1080
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index f7fe2d2b851f..550fdf18d3b3 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -147,7 +147,7 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
147 size_t blocksize, keybytes, keylength, n; 147 size_t blocksize, keybytes, keylength, n;
148 unsigned char *inblockdata, *outblockdata, *rawkey; 148 unsigned char *inblockdata, *outblockdata, *rawkey;
149 struct xdr_netobj inblock, outblock; 149 struct xdr_netobj inblock, outblock;
150 struct crypto_skcipher *cipher; 150 struct crypto_sync_skcipher *cipher;
151 u32 ret = EINVAL; 151 u32 ret = EINVAL;
152 152
153 blocksize = gk5e->blocksize; 153 blocksize = gk5e->blocksize;
@@ -157,11 +157,10 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
157 if ((inkey->len != keylength) || (outkey->len != keylength)) 157 if ((inkey->len != keylength) || (outkey->len != keylength))
158 goto err_return; 158 goto err_return;
159 159
160 cipher = crypto_alloc_skcipher(gk5e->encrypt_name, 0, 160 cipher = crypto_alloc_sync_skcipher(gk5e->encrypt_name, 0, 0);
161 CRYPTO_ALG_ASYNC);
162 if (IS_ERR(cipher)) 161 if (IS_ERR(cipher))
163 goto err_return; 162 goto err_return;
164 if (crypto_skcipher_setkey(cipher, inkey->data, inkey->len)) 163 if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
165 goto err_return; 164 goto err_return;
166 165
167 /* allocate and set up buffers */ 166 /* allocate and set up buffers */
@@ -238,7 +237,7 @@ err_free_in:
238 memset(inblockdata, 0, blocksize); 237 memset(inblockdata, 0, blocksize);
239 kfree(inblockdata); 238 kfree(inblockdata);
240err_free_cipher: 239err_free_cipher:
241 crypto_free_skcipher(cipher); 240 crypto_free_sync_skcipher(cipher);
242err_return: 241err_return:
243 return ret; 242 return ret;
244} 243}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 7bb2514aadd9..7f0424dfa8f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -218,7 +218,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
218 218
219static inline const void * 219static inline const void *
220get_key(const void *p, const void *end, 220get_key(const void *p, const void *end,
221 struct krb5_ctx *ctx, struct crypto_skcipher **res) 221 struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
222{ 222{
223 struct xdr_netobj key; 223 struct xdr_netobj key;
224 int alg; 224 int alg;
@@ -246,15 +246,14 @@ get_key(const void *p, const void *end,
246 if (IS_ERR(p)) 246 if (IS_ERR(p))
247 goto out_err; 247 goto out_err;
248 248
249 *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 249 *res = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
250 CRYPTO_ALG_ASYNC);
251 if (IS_ERR(*res)) { 250 if (IS_ERR(*res)) {
252 printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " 251 printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
253 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); 252 "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
254 *res = NULL; 253 *res = NULL;
255 goto out_err_free_key; 254 goto out_err_free_key;
256 } 255 }
257 if (crypto_skcipher_setkey(*res, key.data, key.len)) { 256 if (crypto_sync_skcipher_setkey(*res, key.data, key.len)) {
258 printk(KERN_WARNING "gss_kerberos_mech: error setting key for " 257 printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
259 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); 258 "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
260 goto out_err_free_tfm; 259 goto out_err_free_tfm;
@@ -264,7 +263,7 @@ get_key(const void *p, const void *end,
264 return p; 263 return p;
265 264
266out_err_free_tfm: 265out_err_free_tfm:
267 crypto_free_skcipher(*res); 266 crypto_free_sync_skcipher(*res);
268out_err_free_key: 267out_err_free_key:
269 kfree(key.data); 268 kfree(key.data);
270 p = ERR_PTR(-EINVAL); 269 p = ERR_PTR(-EINVAL);
@@ -336,30 +335,30 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
336 return 0; 335 return 0;
337 336
338out_err_free_key2: 337out_err_free_key2:
339 crypto_free_skcipher(ctx->seq); 338 crypto_free_sync_skcipher(ctx->seq);
340out_err_free_key1: 339out_err_free_key1:
341 crypto_free_skcipher(ctx->enc); 340 crypto_free_sync_skcipher(ctx->enc);
342out_err_free_mech: 341out_err_free_mech:
343 kfree(ctx->mech_used.data); 342 kfree(ctx->mech_used.data);
344out_err: 343out_err:
345 return PTR_ERR(p); 344 return PTR_ERR(p);
346} 345}
347 346
348static struct crypto_skcipher * 347static struct crypto_sync_skcipher *
349context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) 348context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
350{ 349{
351 struct crypto_skcipher *cp; 350 struct crypto_sync_skcipher *cp;
352 351
353 cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC); 352 cp = crypto_alloc_sync_skcipher(cname, 0, 0);
354 if (IS_ERR(cp)) { 353 if (IS_ERR(cp)) {
355 dprintk("gss_kerberos_mech: unable to initialize " 354 dprintk("gss_kerberos_mech: unable to initialize "
356 "crypto algorithm %s\n", cname); 355 "crypto algorithm %s\n", cname);
357 return NULL; 356 return NULL;
358 } 357 }
359 if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) { 358 if (crypto_sync_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
360 dprintk("gss_kerberos_mech: error setting key for " 359 dprintk("gss_kerberos_mech: error setting key for "
361 "crypto algorithm %s\n", cname); 360 "crypto algorithm %s\n", cname);
362 crypto_free_skcipher(cp); 361 crypto_free_sync_skcipher(cp);
363 return NULL; 362 return NULL;
364 } 363 }
365 return cp; 364 return cp;
@@ -413,9 +412,9 @@ context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
413 return 0; 412 return 0;
414 413
415out_free_enc: 414out_free_enc:
416 crypto_free_skcipher(ctx->enc); 415 crypto_free_sync_skcipher(ctx->enc);
417out_free_seq: 416out_free_seq:
418 crypto_free_skcipher(ctx->seq); 417 crypto_free_sync_skcipher(ctx->seq);
419out_err: 418out_err:
420 return -EINVAL; 419 return -EINVAL;
421} 420}
@@ -469,17 +468,15 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
469 /* 468 /*
470 * allocate hash, and skciphers for data and seqnum encryption 469 * allocate hash, and skciphers for data and seqnum encryption
471 */ 470 */
472 ctx->enc = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 471 ctx->enc = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
473 CRYPTO_ALG_ASYNC);
474 if (IS_ERR(ctx->enc)) { 472 if (IS_ERR(ctx->enc)) {
475 err = PTR_ERR(ctx->enc); 473 err = PTR_ERR(ctx->enc);
476 goto out_err_free_hmac; 474 goto out_err_free_hmac;
477 } 475 }
478 476
479 ctx->seq = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 477 ctx->seq = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
480 CRYPTO_ALG_ASYNC);
481 if (IS_ERR(ctx->seq)) { 478 if (IS_ERR(ctx->seq)) {
482 crypto_free_skcipher(ctx->enc); 479 crypto_free_sync_skcipher(ctx->enc);
483 err = PTR_ERR(ctx->seq); 480 err = PTR_ERR(ctx->seq);
484 goto out_err_free_hmac; 481 goto out_err_free_hmac;
485 } 482 }
@@ -591,7 +588,7 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
591 context_v2_alloc_cipher(ctx, "cbc(aes)", 588 context_v2_alloc_cipher(ctx, "cbc(aes)",
592 ctx->acceptor_seal); 589 ctx->acceptor_seal);
593 if (ctx->acceptor_enc_aux == NULL) { 590 if (ctx->acceptor_enc_aux == NULL) {
594 crypto_free_skcipher(ctx->initiator_enc_aux); 591 crypto_free_sync_skcipher(ctx->initiator_enc_aux);
595 goto out_free_acceptor_enc; 592 goto out_free_acceptor_enc;
596 } 593 }
597 } 594 }
@@ -599,9 +596,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
599 return 0; 596 return 0;
600 597
601out_free_acceptor_enc: 598out_free_acceptor_enc:
602 crypto_free_skcipher(ctx->acceptor_enc); 599 crypto_free_sync_skcipher(ctx->acceptor_enc);
603out_free_initiator_enc: 600out_free_initiator_enc:
604 crypto_free_skcipher(ctx->initiator_enc); 601 crypto_free_sync_skcipher(ctx->initiator_enc);
605out_err: 602out_err:
606 return -EINVAL; 603 return -EINVAL;
607} 604}
@@ -713,12 +710,12 @@ static void
713gss_delete_sec_context_kerberos(void *internal_ctx) { 710gss_delete_sec_context_kerberos(void *internal_ctx) {
714 struct krb5_ctx *kctx = internal_ctx; 711 struct krb5_ctx *kctx = internal_ctx;
715 712
716 crypto_free_skcipher(kctx->seq); 713 crypto_free_sync_skcipher(kctx->seq);
717 crypto_free_skcipher(kctx->enc); 714 crypto_free_sync_skcipher(kctx->enc);
718 crypto_free_skcipher(kctx->acceptor_enc); 715 crypto_free_sync_skcipher(kctx->acceptor_enc);
719 crypto_free_skcipher(kctx->initiator_enc); 716 crypto_free_sync_skcipher(kctx->initiator_enc);
720 crypto_free_skcipher(kctx->acceptor_enc_aux); 717 crypto_free_sync_skcipher(kctx->acceptor_enc_aux);
721 crypto_free_skcipher(kctx->initiator_enc_aux); 718 crypto_free_sync_skcipher(kctx->initiator_enc_aux);
722 kfree(kctx->mech_used.data); 719 kfree(kctx->mech_used.data);
723 kfree(kctx); 720 kfree(kctx);
724} 721}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c8b9082f4a9d..fb6656295204 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -43,13 +43,12 @@ static s32
43krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, 43krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
44 unsigned char *cksum, unsigned char *buf) 44 unsigned char *cksum, unsigned char *buf)
45{ 45{
46 struct crypto_skcipher *cipher; 46 struct crypto_sync_skcipher *cipher;
47 unsigned char plain[8]; 47 unsigned char plain[8];
48 s32 code; 48 s32 code;
49 49
50 dprintk("RPC: %s:\n", __func__); 50 dprintk("RPC: %s:\n", __func__);
51 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 51 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
52 CRYPTO_ALG_ASYNC);
53 if (IS_ERR(cipher)) 52 if (IS_ERR(cipher))
54 return PTR_ERR(cipher); 53 return PTR_ERR(cipher);
55 54
@@ -68,12 +67,12 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
68 67
69 code = krb5_encrypt(cipher, cksum, plain, buf, 8); 68 code = krb5_encrypt(cipher, cksum, plain, buf, 8);
70out: 69out:
71 crypto_free_skcipher(cipher); 70 crypto_free_sync_skcipher(cipher);
72 return code; 71 return code;
73} 72}
74s32 73s32
75krb5_make_seq_num(struct krb5_ctx *kctx, 74krb5_make_seq_num(struct krb5_ctx *kctx,
76 struct crypto_skcipher *key, 75 struct crypto_sync_skcipher *key,
77 int direction, 76 int direction,
78 u32 seqnum, 77 u32 seqnum,
79 unsigned char *cksum, unsigned char *buf) 78 unsigned char *cksum, unsigned char *buf)
@@ -101,13 +100,12 @@ static s32
101krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, 100krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
102 unsigned char *buf, int *direction, s32 *seqnum) 101 unsigned char *buf, int *direction, s32 *seqnum)
103{ 102{
104 struct crypto_skcipher *cipher; 103 struct crypto_sync_skcipher *cipher;
105 unsigned char plain[8]; 104 unsigned char plain[8];
106 s32 code; 105 s32 code;
107 106
108 dprintk("RPC: %s:\n", __func__); 107 dprintk("RPC: %s:\n", __func__);
109 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 108 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
110 CRYPTO_ALG_ASYNC);
111 if (IS_ERR(cipher)) 109 if (IS_ERR(cipher))
112 return PTR_ERR(cipher); 110 return PTR_ERR(cipher);
113 111
@@ -130,7 +128,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
130 *seqnum = ((plain[0] << 24) | (plain[1] << 16) | 128 *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
131 (plain[2] << 8) | (plain[3])); 129 (plain[2] << 8) | (plain[3]));
132out: 130out:
133 crypto_free_skcipher(cipher); 131 crypto_free_sync_skcipher(cipher);
134 return code; 132 return code;
135} 133}
136 134
@@ -142,7 +140,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
142{ 140{
143 s32 code; 141 s32 code;
144 unsigned char plain[8]; 142 unsigned char plain[8];
145 struct crypto_skcipher *key = kctx->seq; 143 struct crypto_sync_skcipher *key = kctx->seq;
146 144
147 dprintk("RPC: krb5_get_seq_num:\n"); 145 dprintk("RPC: krb5_get_seq_num:\n");
148 146
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 39a2e672900b..3d975a4013d2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -174,7 +174,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
174 174
175 now = get_seconds(); 175 now = get_seconds();
176 176
177 blocksize = crypto_skcipher_blocksize(kctx->enc); 177 blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
178 gss_krb5_add_padding(buf, offset, blocksize); 178 gss_krb5_add_padding(buf, offset, blocksize);
179 BUG_ON((buf->len - offset) % blocksize); 179 BUG_ON((buf->len - offset) % blocksize);
180 plainlen = conflen + buf->len - offset; 180 plainlen = conflen + buf->len - offset;
@@ -239,10 +239,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
239 return GSS_S_FAILURE; 239 return GSS_S_FAILURE;
240 240
241 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 241 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
242 struct crypto_skcipher *cipher; 242 struct crypto_sync_skcipher *cipher;
243 int err; 243 int err;
244 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 244 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
245 CRYPTO_ALG_ASYNC); 245 0, 0);
246 if (IS_ERR(cipher)) 246 if (IS_ERR(cipher))
247 return GSS_S_FAILURE; 247 return GSS_S_FAILURE;
248 248
@@ -250,7 +250,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
250 250
251 err = gss_encrypt_xdr_buf(cipher, buf, 251 err = gss_encrypt_xdr_buf(cipher, buf,
252 offset + headlen - conflen, pages); 252 offset + headlen - conflen, pages);
253 crypto_free_skcipher(cipher); 253 crypto_free_sync_skcipher(cipher);
254 if (err) 254 if (err)
255 return GSS_S_FAILURE; 255 return GSS_S_FAILURE;
256 } else { 256 } else {
@@ -327,18 +327,18 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
327 return GSS_S_BAD_SIG; 327 return GSS_S_BAD_SIG;
328 328
329 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 329 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
330 struct crypto_skcipher *cipher; 330 struct crypto_sync_skcipher *cipher;
331 int err; 331 int err;
332 332
333 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 333 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
334 CRYPTO_ALG_ASYNC); 334 0, 0);
335 if (IS_ERR(cipher)) 335 if (IS_ERR(cipher))
336 return GSS_S_FAILURE; 336 return GSS_S_FAILURE;
337 337
338 krb5_rc4_setup_enc_key(kctx, cipher, seqnum); 338 krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
339 339
340 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); 340 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
341 crypto_free_skcipher(cipher); 341 crypto_free_sync_skcipher(cipher);
342 if (err) 342 if (err)
343 return GSS_S_DEFECTIVE_TOKEN; 343 return GSS_S_DEFECTIVE_TOKEN;
344 } else { 344 } else {
@@ -371,7 +371,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
371 /* Copy the data back to the right position. XXX: Would probably be 371 /* Copy the data back to the right position. XXX: Would probably be
372 * better to copy and encrypt at the same time. */ 372 * better to copy and encrypt at the same time. */
373 373
374 blocksize = crypto_skcipher_blocksize(kctx->enc); 374 blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
375 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 375 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
376 conflen; 376 conflen;
377 orig_start = buf->head[0].iov_base + offset; 377 orig_start = buf->head[0].iov_base + offset;