aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/crypto/padlock-sha.c4
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/net/ppp_mppe.c10
-rw-r--r--drivers/scsi/iscsi_tcp.c5
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--net/ipv4/esp4.c7
-rw-r--r--net/ipv6/esp6.c8
-rw-r--r--net/rxrpc/rxkad.c66
-rw-r--r--net/sctp/auth.c3
-rw-r--r--net/sctp/sm_make_chunk.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c34
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c2
-rw-r--r--net/sunrpc/xdr.c2
13 files changed, 93 insertions, 58 deletions
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 4e8de162fc12..c666b4e0933e 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -55,7 +55,7 @@ static void padlock_sha_bypass(struct crypto_tfm *tfm)
55 if (ctx(tfm)->data && ctx(tfm)->used) { 55 if (ctx(tfm)->data && ctx(tfm)->used) {
56 struct scatterlist sg; 56 struct scatterlist sg;
57 57
58 sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); 58 sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
59 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); 59 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
60 } 60 }
61 61
@@ -79,7 +79,7 @@ static void padlock_sha_update(struct crypto_tfm *tfm,
79 79
80 if (unlikely(ctx(tfm)->bypass)) { 80 if (unlikely(ctx(tfm)->bypass)) {
81 struct scatterlist sg; 81 struct scatterlist sg;
82 sg_set_buf(&sg, (uint8_t *)data, length); 82 sg_init_one(&sg, (uint8_t *)data, length);
83 crypto_hash_update(&ctx(tfm)->fallback, &sg, length); 83 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
84 return; 84 return;
85 } 85 }
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 1c159ac68c98..28c6ae095c56 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -168,7 +168,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
168 return -ENOMEM; 168 return -ENOMEM;
169 } 169 }
170 170
171 sg_set_buf(&sg, cc->key, cc->key_size); 171 sg_init_one(&sg, cc->key, cc->key_size);
172 desc.tfm = hash_tfm; 172 desc.tfm = hash_tfm;
173 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 173 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
174 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); 174 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index bcb0885011c8..b35d79449500 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -68,7 +68,7 @@ MODULE_VERSION("1.0.2");
68static unsigned int 68static unsigned int
69setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 69setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
70{ 70{
71 sg_init_one(sg, address, length); 71 sg_set_buf(sg, address, length);
72 return length; 72 return length;
73} 73}
74 74
@@ -140,6 +140,8 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
140 struct scatterlist sg[4]; 140 struct scatterlist sg[4];
141 unsigned int nbytes; 141 unsigned int nbytes;
142 142
143 sg_init_table(sg, 4);
144
143 nbytes = setup_sg(&sg[0], state->master_key, state->keylen); 145 nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
144 nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, 146 nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
145 sizeof(sha_pad->sha_pad1)); 147 sizeof(sha_pad->sha_pad1));
@@ -166,6 +168,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
166 if (!initial_key) { 168 if (!initial_key) {
167 crypto_blkcipher_setkey(state->arc4, state->sha1_digest, 169 crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
168 state->keylen); 170 state->keylen);
171 sg_init_table(sg_in, 1);
172 sg_init_table(sg_out, 1);
169 setup_sg(sg_in, state->sha1_digest, state->keylen); 173 setup_sg(sg_in, state->sha1_digest, state->keylen);
170 setup_sg(sg_out, state->session_key, state->keylen); 174 setup_sg(sg_out, state->session_key, state->keylen);
171 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 175 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
@@ -421,6 +425,8 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
421 isize -= 2; 425 isize -= 2;
422 426
423 /* Encrypt packet */ 427 /* Encrypt packet */
428 sg_init_table(sg_in, 1);
429 sg_init_table(sg_out, 1);
424 setup_sg(sg_in, ibuf, isize); 430 setup_sg(sg_in, ibuf, isize);
425 setup_sg(sg_out, obuf, osize); 431 setup_sg(sg_out, obuf, osize);
426 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { 432 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
@@ -608,6 +614,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
608 * Decrypt the first byte in order to check if it is 614 * Decrypt the first byte in order to check if it is
609 * a compressed or uncompressed protocol field. 615 * a compressed or uncompressed protocol field.
610 */ 616 */
617 sg_init_table(sg_in, 1);
618 sg_init_table(sg_out, 1);
611 setup_sg(sg_in, ibuf, 1); 619 setup_sg(sg_in, ibuf, 1);
612 setup_sg(sg_out, obuf, 1); 620 setup_sg(sg_out, obuf, 1);
613 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { 621 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 097a136398cb..4bcf916c21a7 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -674,9 +674,8 @@ partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
674{ 674{
675 struct scatterlist temp; 675 struct scatterlist temp;
676 676
677 memcpy(&temp, sg, sizeof(struct scatterlist)); 677 sg_init_table(&temp, 1);
678 temp.offset = offset; 678 sg_set_page(&temp, sg_page(sg), length, offset);
679 temp.length = length;
680 crypto_hash_update(desc, &temp, length); 679 crypto_hash_update(desc, &temp, length);
681} 680}
682 681
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 7a472b129997..9d70289f7df3 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -279,6 +279,8 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
279 int offset; 279 int offset;
280 int remainder_of_page; 280 int remainder_of_page;
281 281
282 sg_init_table(sg, sg_size);
283
282 while (size > 0 && i < sg_size) { 284 while (size > 0 && i < sg_size) {
283 pg = virt_to_page(addr); 285 pg = virt_to_page(addr);
284 offset = offset_in_page(addr); 286 offset = offset_in_page(addr);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 23b647c668f1..cad4278025ad 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -111,7 +111,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
111 goto unlock; 111 goto unlock;
112 } 112 }
113 sg_init_table(sg, nfrags); 113 sg_init_table(sg, nfrags);
114 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 114 sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data +
115 esp->conf.ivlen -
116 skb->data, clen));
115 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 117 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
116 if (unlikely(sg != &esp->sgbuf[0])) 118 if (unlikely(sg != &esp->sgbuf[0]))
117 kfree(sg); 119 kfree(sg);
@@ -203,7 +205,8 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
203 goto out; 205 goto out;
204 } 206 }
205 sg_init_table(sg, nfrags); 207 sg_init_table(sg, nfrags);
206 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 208 sg_mark_end(sg, skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen,
209 elen));
207 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 210 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
208 if (unlikely(sg != &esp->sgbuf[0])) 211 if (unlikely(sg != &esp->sgbuf[0]))
209 kfree(sg); 212 kfree(sg);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index f8bb136d3711..ab17b5e62355 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -110,7 +110,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
110 goto unlock; 110 goto unlock;
111 } 111 }
112 sg_init_table(sg, nfrags); 112 sg_init_table(sg, nfrags);
113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 113 sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data +
114 esp->conf.ivlen -
115 skb->data, clen));
114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 116 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
115 if (unlikely(sg != &esp->sgbuf[0])) 117 if (unlikely(sg != &esp->sgbuf[0]))
116 kfree(sg); 118 kfree(sg);
@@ -207,7 +209,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
207 } 209 }
208 } 210 }
209 sg_init_table(sg, nfrags); 211 sg_init_table(sg, nfrags);
210 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 212 sg_mark_end(sg, skb_to_sgvec(skb, sg,
213 sizeof(*esph) + esp->conf.ivlen,
214 elen));
211 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 215 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
212 if (unlikely(sg != &esp->sgbuf[0])) 216 if (unlikely(sg != &esp->sgbuf[0]))
213 kfree(sg); 217 kfree(sg);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index ac3cabdca78c..eebefb6ef139 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -135,9 +135,8 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
135 tmpbuf.x[2] = 0; 135 tmpbuf.x[2] = 0;
136 tmpbuf.x[3] = htonl(conn->security_ix); 136 tmpbuf.x[3] = htonl(conn->security_ix);
137 137
138 memset(sg, 0, sizeof(sg)); 138 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
139 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 139 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
140 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
141 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 140 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
142 141
143 memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); 142 memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
@@ -180,9 +179,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
180 desc.info = iv.x; 179 desc.info = iv.x;
181 desc.flags = 0; 180 desc.flags = 0;
182 181
183 memset(sg, 0, sizeof(sg)); 182 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
184 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 183 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
185 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
186 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 184 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
187 185
188 memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); 186 memcpy(sechdr, &tmpbuf, sizeof(tmpbuf));
@@ -227,9 +225,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
227 desc.info = iv.x; 225 desc.info = iv.x;
228 desc.flags = 0; 226 desc.flags = 0;
229 227
230 memset(sg, 0, sizeof(sg[0]) * 2); 228 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
231 sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr)); 229 sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr));
232 sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr));
233 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); 230 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr));
234 231
235 /* we want to encrypt the skbuff in-place */ 232 /* we want to encrypt the skbuff in-place */
@@ -240,7 +237,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
240 len = data_size + call->conn->size_align - 1; 237 len = data_size + call->conn->size_align - 1;
241 len &= ~(call->conn->size_align - 1); 238 len &= ~(call->conn->size_align - 1);
242 239
243 skb_to_sgvec(skb, sg, 0, len); 240 sg_init_table(sg, skb_to_sgvec(skb, sg, 0, len));
244 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); 241 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
245 242
246 _leave(" = 0"); 243 _leave(" = 0");
@@ -290,9 +287,8 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
290 tmpbuf.x[0] = sp->hdr.callNumber; 287 tmpbuf.x[0] = sp->hdr.callNumber;
291 tmpbuf.x[1] = x; 288 tmpbuf.x[1] = x;
292 289
293 memset(&sg, 0, sizeof(sg)); 290 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
294 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 291 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
295 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
296 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 292 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
297 293
298 x = ntohl(tmpbuf.x[1]); 294 x = ntohl(tmpbuf.x[1]);
@@ -332,20 +328,23 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
332 struct rxrpc_skb_priv *sp; 328 struct rxrpc_skb_priv *sp;
333 struct blkcipher_desc desc; 329 struct blkcipher_desc desc;
334 struct rxrpc_crypt iv; 330 struct rxrpc_crypt iv;
335 struct scatterlist sg[2]; 331 struct scatterlist sg[16];
336 struct sk_buff *trailer; 332 struct sk_buff *trailer;
337 u32 data_size, buf; 333 u32 data_size, buf;
338 u16 check; 334 u16 check;
335 int nsg;
339 336
340 _enter(""); 337 _enter("");
341 338
342 sp = rxrpc_skb(skb); 339 sp = rxrpc_skb(skb);
343 340
344 /* we want to decrypt the skbuff in-place */ 341 /* we want to decrypt the skbuff in-place */
345 if (skb_cow_data(skb, 0, &trailer) < 0) 342 nsg = skb_cow_data(skb, 0, &trailer);
343 if (nsg < 0 || nsg > 16)
346 goto nomem; 344 goto nomem;
347 345
348 skb_to_sgvec(skb, sg, 0, 8); 346 sg_init_table(sg, nsg);
347 sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, 8));
349 348
350 /* start the decryption afresh */ 349 /* start the decryption afresh */
351 memset(&iv, 0, sizeof(iv)); 350 memset(&iv, 0, sizeof(iv));
@@ -426,7 +425,8 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
426 goto nomem; 425 goto nomem;
427 } 426 }
428 427
429 skb_to_sgvec(skb, sg, 0, skb->len); 428 sg_init_table(sg, nsg);
429 sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, skb->len));
430 430
431 /* decrypt from the session key */ 431 /* decrypt from the session key */
432 payload = call->conn->key->payload.data; 432 payload = call->conn->key->payload.data;
@@ -521,9 +521,8 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
521 tmpbuf.x[0] = call->call_id; 521 tmpbuf.x[0] = call->call_id;
522 tmpbuf.x[1] = x; 522 tmpbuf.x[1] = x;
523 523
524 memset(&sg, 0, sizeof(sg)); 524 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
525 sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 525 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
526 sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf));
527 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 526 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
528 527
529 x = ntohl(tmpbuf.x[1]); 528 x = ntohl(tmpbuf.x[1]);
@@ -690,16 +689,20 @@ static void rxkad_calc_response_checksum(struct rxkad_response *response)
690static void rxkad_sg_set_buf2(struct scatterlist sg[2], 689static void rxkad_sg_set_buf2(struct scatterlist sg[2],
691 void *buf, size_t buflen) 690 void *buf, size_t buflen)
692{ 691{
692 int nsg = 1;
693 693
694 memset(sg, 0, sizeof(sg)); 694 sg_init_table(sg, 2);
695 695
696 sg_set_buf(&sg[0], buf, buflen); 696 sg_set_buf(&sg[0], buf, buflen);
697 if (sg[0].offset + buflen > PAGE_SIZE) { 697 if (sg[0].offset + buflen > PAGE_SIZE) {
698 /* the buffer was split over two pages */ 698 /* the buffer was split over two pages */
699 sg[0].length = PAGE_SIZE - sg[0].offset; 699 sg[0].length = PAGE_SIZE - sg[0].offset;
700 sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); 700 sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length);
701 nsg++;
701 } 702 }
702 703
704 sg_mark_end(sg, nsg);
705
703 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); 706 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen);
704} 707}
705 708
@@ -712,7 +715,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
712{ 715{
713 struct blkcipher_desc desc; 716 struct blkcipher_desc desc;
714 struct rxrpc_crypt iv; 717 struct rxrpc_crypt iv;
715 struct scatterlist ssg[2], dsg[2]; 718 struct scatterlist sg[2];
716 719
717 /* continue encrypting from where we left off */ 720 /* continue encrypting from where we left off */
718 memcpy(&iv, s2->session_key, sizeof(iv)); 721 memcpy(&iv, s2->session_key, sizeof(iv));
@@ -720,9 +723,8 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
720 desc.info = iv.x; 723 desc.info = iv.x;
721 desc.flags = 0; 724 desc.flags = 0;
722 725
723 rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); 726 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
724 memcpy(dsg, ssg, sizeof(dsg)); 727 crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
725 crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted));
726} 728}
727 729
728/* 730/*
@@ -817,7 +819,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
817{ 819{
818 struct blkcipher_desc desc; 820 struct blkcipher_desc desc;
819 struct rxrpc_crypt iv, key; 821 struct rxrpc_crypt iv, key;
820 struct scatterlist ssg[1], dsg[1]; 822 struct scatterlist sg[1];
821 struct in_addr addr; 823 struct in_addr addr;
822 unsigned life; 824 unsigned life;
823 time_t issue, now; 825 time_t issue, now;
@@ -850,9 +852,8 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
850 desc.info = iv.x; 852 desc.info = iv.x;
851 desc.flags = 0; 853 desc.flags = 0;
852 854
853 sg_init_one(&ssg[0], ticket, ticket_len); 855 sg_init_one(&sg[0], ticket, ticket_len);
854 memcpy(dsg, ssg, sizeof(dsg)); 856 crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len);
855 crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len);
856 857
857 p = ticket; 858 p = ticket;
858 end = p + ticket_len; 859 end = p + ticket_len;
@@ -961,7 +962,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
961 const struct rxrpc_crypt *session_key) 962 const struct rxrpc_crypt *session_key)
962{ 963{
963 struct blkcipher_desc desc; 964 struct blkcipher_desc desc;
964 struct scatterlist ssg[2], dsg[2]; 965 struct scatterlist sg[2];
965 struct rxrpc_crypt iv; 966 struct rxrpc_crypt iv;
966 967
967 _enter(",,%08x%08x", 968 _enter(",,%08x%08x",
@@ -979,9 +980,8 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
979 desc.info = iv.x; 980 desc.info = iv.x;
980 desc.flags = 0; 981 desc.flags = 0;
981 982
982 rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); 983 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
983 memcpy(dsg, ssg, sizeof(dsg)); 984 crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
984 crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted));
985 mutex_unlock(&rxkad_ci_mutex); 985 mutex_unlock(&rxkad_ci_mutex);
986 986
987 _leave(""); 987 _leave("");
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index c9dbc3afa99f..8af1004abefe 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -726,8 +726,7 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
726 726
727 /* set up scatter list */ 727 /* set up scatter list */
728 end = skb_tail_pointer(skb); 728 end = skb_tail_pointer(skb);
729 sg_init_table(&sg, 1); 729 sg_init_one(&sg, auth, end - (unsigned char *)auth);
730 sg_set_buf(&sg, auth, end - (unsigned char *)auth);
731 730
732 desc.tfm = asoc->ep->auth_hmacs[hmac_id]; 731 desc.tfm = asoc->ep->auth_hmacs[hmac_id];
733 desc.flags = 0; 732 desc.flags = 0;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index c055212875f6..c377e4e8f653 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1513,8 +1513,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1513 struct hash_desc desc; 1513 struct hash_desc desc;
1514 1514
1515 /* Sign the message. */ 1515 /* Sign the message. */
1516 sg_init_table(&sg, 1); 1516 sg_init_one(&sg, &cookie->c, bodysize);
1517 sg_set_buf(&sg, &cookie->c, bodysize);
1518 keylen = SCTP_SECRET_SIZE; 1517 keylen = SCTP_SECRET_SIZE;
1519 key = (char *)ep->secret_key[ep->current_key]; 1518 key = (char *)ep->secret_key[ep->current_key];
1520 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1519 desc.tfm = sctp_sk(ep->base.sk)->hmac;
@@ -1584,8 +1583,7 @@ struct sctp_association *sctp_unpack_cookie(
1584 1583
1585 /* Check the signature. */ 1584 /* Check the signature. */
1586 keylen = SCTP_SECRET_SIZE; 1585 keylen = SCTP_SECRET_SIZE;
1587 sg_init_table(&sg, 1); 1586 sg_init_one(&sg, bear_cookie, bodysize);
1588 sg_set_buf(&sg, bear_cookie, bodysize);
1589 key = (char *)ep->secret_key[ep->current_key]; 1587 key = (char *)ep->secret_key[ep->current_key];
1590 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1588 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1591 desc.flags = 0; 1589 desc.flags = 0;
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 24711be4b2dc..91cd8f0d1e10 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -75,7 +75,7 @@ krb5_encrypt(
75 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); 75 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
76 76
77 memcpy(out, in, length); 77 memcpy(out, in, length);
78 sg_set_buf(sg, out, length); 78 sg_init_one(sg, out, length);
79 79
80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); 80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
81out: 81out:
@@ -110,7 +110,7 @@ krb5_decrypt(
110 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); 110 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
111 111
112 memcpy(out, in, length); 112 memcpy(out, in, length);
113 sg_set_buf(sg, out, length); 113 sg_init_one(sg, out, length);
114 114
115 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); 115 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
116out: 116out:
@@ -146,7 +146,7 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
146 err = crypto_hash_init(&desc); 146 err = crypto_hash_init(&desc);
147 if (err) 147 if (err)
148 goto out; 148 goto out;
149 sg_set_buf(sg, header, hdrlen); 149 sg_init_one(sg, header, hdrlen);
150 err = crypto_hash_update(&desc, sg, hdrlen); 150 err = crypto_hash_update(&desc, sg, hdrlen);
151 if (err) 151 if (err)
152 goto out; 152 goto out;
@@ -188,8 +188,6 @@ encryptor(struct scatterlist *sg, void *data)
188 /* Worst case is 4 fragments: head, end of page 1, start 188 /* Worst case is 4 fragments: head, end of page 1, start
189 * of page 2, tail. Anything more is a bug. */ 189 * of page 2, tail. Anything more is a bug. */
190 BUG_ON(desc->fragno > 3); 190 BUG_ON(desc->fragno > 3);
191 desc->infrags[desc->fragno] = *sg;
192 desc->outfrags[desc->fragno] = *sg;
193 191
194 page_pos = desc->pos - outbuf->head[0].iov_len; 192 page_pos = desc->pos - outbuf->head[0].iov_len;
195 if (page_pos >= 0 && page_pos < outbuf->page_len) { 193 if (page_pos >= 0 && page_pos < outbuf->page_len) {
@@ -199,7 +197,10 @@ encryptor(struct scatterlist *sg, void *data)
199 } else { 197 } else {
200 in_page = sg_page(sg); 198 in_page = sg_page(sg);
201 } 199 }
202 sg_assign_page(&desc->infrags[desc->fragno], in_page); 200 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
201 sg->offset);
202 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
203 sg->offset);
203 desc->fragno++; 204 desc->fragno++;
204 desc->fraglen += sg->length; 205 desc->fraglen += sg->length;
205 desc->pos += sg->length; 206 desc->pos += sg->length;
@@ -210,10 +211,17 @@ encryptor(struct scatterlist *sg, void *data)
210 if (thislen == 0) 211 if (thislen == 0)
211 return 0; 212 return 0;
212 213
214 sg_mark_end(desc->infrags, desc->fragno);
215 sg_mark_end(desc->outfrags, desc->fragno);
216
213 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, 217 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
214 desc->infrags, thislen); 218 desc->infrags, thislen);
215 if (ret) 219 if (ret)
216 return ret; 220 return ret;
221
222 sg_init_table(desc->infrags, 4);
223 sg_init_table(desc->outfrags, 4);
224
217 if (fraglen) { 225 if (fraglen) {
218 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 226 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
219 sg->offset + sg->length - fraglen); 227 sg->offset + sg->length - fraglen);
@@ -247,6 +255,9 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
247 desc.fragno = 0; 255 desc.fragno = 0;
248 desc.fraglen = 0; 256 desc.fraglen = 0;
249 257
258 sg_init_table(desc.infrags, 4);
259 sg_init_table(desc.outfrags, 4);
260
250 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 261 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
251 return ret; 262 return ret;
252} 263}
@@ -271,7 +282,8 @@ decryptor(struct scatterlist *sg, void *data)
271 /* Worst case is 4 fragments: head, end of page 1, start 282 /* Worst case is 4 fragments: head, end of page 1, start
272 * of page 2, tail. Anything more is a bug. */ 283 * of page 2, tail. Anything more is a bug. */
273 BUG_ON(desc->fragno > 3); 284 BUG_ON(desc->fragno > 3);
274 desc->frags[desc->fragno] = *sg; 285 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
286 sg->offset);
275 desc->fragno++; 287 desc->fragno++;
276 desc->fraglen += sg->length; 288 desc->fraglen += sg->length;
277 289
@@ -281,10 +293,15 @@ decryptor(struct scatterlist *sg, void *data)
281 if (thislen == 0) 293 if (thislen == 0)
282 return 0; 294 return 0;
283 295
296 sg_mark_end(desc->frags, desc->fragno);
297
284 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, 298 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
285 desc->frags, thislen); 299 desc->frags, thislen);
286 if (ret) 300 if (ret)
287 return ret; 301 return ret;
302
303 sg_init_table(desc->frags, 4);
304
288 if (fraglen) { 305 if (fraglen) {
289 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 306 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
290 sg->offset + sg->length - fraglen); 307 sg->offset + sg->length - fraglen);
@@ -312,6 +329,9 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
312 desc.desc.flags = 0; 329 desc.desc.flags = 0;
313 desc.fragno = 0; 330 desc.fragno = 0;
314 desc.fraglen = 0; 331 desc.fraglen = 0;
332
333 sg_init_table(desc.frags, 4);
334
315 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 335 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
316} 336}
317 337
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index d158635de6c0..abf17ce2e3b1 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -173,7 +173,7 @@ make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
173 if (err) 173 if (err)
174 goto out; 174 goto out;
175 175
176 sg_set_buf(sg, header, hdrlen); 176 sg_init_one(sg, header, hdrlen);
177 crypto_hash_update(&desc, sg, sg->length); 177 crypto_hash_update(&desc, sg, sg->length);
178 178
179 xdr_process_buf(body, body_offset, body->len - body_offset, 179 xdr_process_buf(body, body_offset, body->len - body_offset,
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index f38dac30481b..fdc5e6d7562b 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1030,6 +1030,8 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1030 unsigned page_len, thislen, page_offset; 1030 unsigned page_len, thislen, page_offset;
1031 struct scatterlist sg[1]; 1031 struct scatterlist sg[1];
1032 1032
1033 sg_init_table(sg, 1);
1034
1033 if (offset >= buf->head[0].iov_len) { 1035 if (offset >= buf->head[0].iov_len) {
1034 offset -= buf->head[0].iov_len; 1036 offset -= buf->head[0].iov_len;
1035 } else { 1037 } else {