summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--crypto/ahash.c4
-rw-r--r--crypto/async_tx/async_memcpy.c8
-rw-r--r--crypto/blkcipher.c8
-rw-r--r--crypto/ccm.c4
-rw-r--r--crypto/scatterwalk.c8
-rw-r--r--crypto/shash.c8
-rw-r--r--include/crypto/scatterwalk.h28
7 files changed, 23 insertions, 45 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ac93c99cfae8..33bc9b62e9ae 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -46,7 +46,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
46 unsigned int nbytes = min(walk->entrylen, 46 unsigned int nbytes = min(walk->entrylen,
47 ((unsigned int)(PAGE_SIZE)) - offset); 47 ((unsigned int)(PAGE_SIZE)) - offset);
48 48
49 walk->data = crypto_kmap(walk->pg, 0); 49 walk->data = kmap_atomic(walk->pg);
50 walk->data += offset; 50 walk->data += offset;
51 51
52 if (offset & alignmask) { 52 if (offset & alignmask) {
@@ -93,7 +93,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
93 return nbytes; 93 return nbytes;
94 } 94 }
95 95
96 crypto_kunmap(walk->data, 0); 96 kunmap_atomic(walk->data);
97 crypto_yield(walk->flags); 97 crypto_yield(walk->flags);
98 98
99 if (err) 99 if (err)
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 0d5a90ca6501..361b5e8239bc 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -79,13 +79,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
79 /* wait for any prerequisite operations */ 79 /* wait for any prerequisite operations */
80 async_tx_quiesce(&submit->depend_tx); 80 async_tx_quiesce(&submit->depend_tx);
81 81
82 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 82 dest_buf = kmap_atomic(dest) + dest_offset;
83 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 83 src_buf = kmap_atomic(src) + src_offset;
84 84
85 memcpy(dest_buf, src_buf, len); 85 memcpy(dest_buf, src_buf, len);
86 86
87 kunmap_atomic(src_buf, KM_USER1); 87 kunmap_atomic(src_buf);
88 kunmap_atomic(dest_buf, KM_USER0); 88 kunmap_atomic(dest_buf);
89 89
90 async_tx_sync_epilog(submit); 90 async_tx_sync_epilog(submit);
91 } 91 }
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 1e61d1a888b2..4dd80c725498 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -43,22 +43,22 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
43 43
44static inline void blkcipher_map_src(struct blkcipher_walk *walk) 44static inline void blkcipher_map_src(struct blkcipher_walk *walk)
45{ 45{
46 walk->src.virt.addr = scatterwalk_map(&walk->in, 0); 46 walk->src.virt.addr = scatterwalk_map(&walk->in);
47} 47}
48 48
49static inline void blkcipher_map_dst(struct blkcipher_walk *walk) 49static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
50{ 50{
51 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); 51 walk->dst.virt.addr = scatterwalk_map(&walk->out);
52} 52}
53 53
54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) 54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
55{ 55{
56 scatterwalk_unmap(walk->src.virt.addr, 0); 56 scatterwalk_unmap(walk->src.virt.addr);
57} 57}
58 58
59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) 59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
60{ 60{
61 scatterwalk_unmap(walk->dst.virt.addr, 1); 61 scatterwalk_unmap(walk->dst.virt.addr);
62} 62}
63 63
64/* Get a spot of the specified length that does not straddle a page. 64/* Get a spot of the specified length that does not straddle a page.
diff --git a/crypto/ccm.c b/crypto/ccm.c
index c36d654cf56a..32fe1bb5decb 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -216,12 +216,12 @@ static void get_data_to_compute(struct crypto_cipher *tfm,
216 scatterwalk_start(&walk, sg_next(walk.sg)); 216 scatterwalk_start(&walk, sg_next(walk.sg));
217 n = scatterwalk_clamp(&walk, len); 217 n = scatterwalk_clamp(&walk, len);
218 } 218 }
219 data_src = scatterwalk_map(&walk, 0); 219 data_src = scatterwalk_map(&walk);
220 220
221 compute_mac(tfm, data_src, n, pctx); 221 compute_mac(tfm, data_src, n, pctx);
222 len -= n; 222 len -= n;
223 223
224 scatterwalk_unmap(data_src, 0); 224 scatterwalk_unmap(data_src);
225 scatterwalk_advance(&walk, n); 225 scatterwalk_advance(&walk, n);
226 scatterwalk_done(&walk, 0, len); 226 scatterwalk_done(&walk, 0, len);
227 if (len) 227 if (len)
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 41e529af0773..7281b8a93ad3 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -40,9 +40,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
40} 40}
41EXPORT_SYMBOL_GPL(scatterwalk_start); 41EXPORT_SYMBOL_GPL(scatterwalk_start);
42 42
43void *scatterwalk_map(struct scatter_walk *walk, int out) 43void *scatterwalk_map(struct scatter_walk *walk)
44{ 44{
45 return crypto_kmap(scatterwalk_page(walk), out) + 45 return kmap_atomic(scatterwalk_page(walk)) +
46 offset_in_page(walk->offset); 46 offset_in_page(walk->offset);
47} 47}
48EXPORT_SYMBOL_GPL(scatterwalk_map); 48EXPORT_SYMBOL_GPL(scatterwalk_map);
@@ -83,9 +83,9 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
83 if (len_this_page > nbytes) 83 if (len_this_page > nbytes)
84 len_this_page = nbytes; 84 len_this_page = nbytes;
85 85
86 vaddr = scatterwalk_map(walk, out); 86 vaddr = scatterwalk_map(walk);
87 memcpy_dir(buf, vaddr, len_this_page, out); 87 memcpy_dir(buf, vaddr, len_this_page, out);
88 scatterwalk_unmap(vaddr, out); 88 scatterwalk_unmap(vaddr);
89 89
90 scatterwalk_advance(walk, len_this_page); 90 scatterwalk_advance(walk, len_this_page);
91 91
diff --git a/crypto/shash.c b/crypto/shash.c
index 9100912716ae..21fc12e2378f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -281,10 +281,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
281 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { 281 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
282 void *data; 282 void *data;
283 283
284 data = crypto_kmap(sg_page(sg), 0); 284 data = kmap_atomic(sg_page(sg));
285 err = crypto_shash_digest(desc, data + offset, nbytes, 285 err = crypto_shash_digest(desc, data + offset, nbytes,
286 req->result); 286 req->result);
287 crypto_kunmap(data, 0); 287 kunmap_atomic(data);
288 crypto_yield(desc->flags); 288 crypto_yield(desc->flags);
289 } else 289 } else
290 err = crypto_shash_init(desc) ?: 290 err = crypto_shash_init(desc) ?:
@@ -420,9 +420,9 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
420 420
421 desc->flags = hdesc->flags; 421 desc->flags = hdesc->flags;
422 422
423 data = crypto_kmap(sg_page(sg), 0); 423 data = kmap_atomic(sg_page(sg));
424 err = crypto_shash_digest(desc, data + offset, nbytes, out); 424 err = crypto_shash_digest(desc, data + offset, nbytes, out);
425 crypto_kunmap(data, 0); 425 kunmap_atomic(data);
426 crypto_yield(desc->flags); 426 crypto_yield(desc->flags);
427 goto out; 427 goto out;
428 } 428 }
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 4fd95a323beb..3744d2a642df 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,28 +25,6 @@
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27 27
28static inline enum km_type crypto_kmap_type(int out)
29{
30 enum km_type type;
31
32 if (in_softirq())
33 type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
34 else
35 type = out * (KM_USER1 - KM_USER0) + KM_USER0;
36
37 return type;
38}
39
40static inline void *crypto_kmap(struct page *page, int out)
41{
42 return kmap_atomic(page, crypto_kmap_type(out));
43}
44
45static inline void crypto_kunmap(void *vaddr, int out)
46{
47 kunmap_atomic(vaddr, crypto_kmap_type(out));
48}
49
50static inline void crypto_yield(u32 flags) 28static inline void crypto_yield(u32 flags)
51{ 29{
52 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) 30 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
@@ -121,15 +99,15 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
121 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 99 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
122} 100}
123 101
124static inline void scatterwalk_unmap(void *vaddr, int out) 102static inline void scatterwalk_unmap(void *vaddr)
125{ 103{
126 crypto_kunmap(vaddr, out); 104 kunmap_atomic(vaddr);
127} 105}
128 106
129void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); 107void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
130void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 108void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
131 size_t nbytes, int out); 109 size_t nbytes, int out);
132void *scatterwalk_map(struct scatter_walk *walk, int out); 110void *scatterwalk_map(struct scatter_walk *walk);
133void scatterwalk_done(struct scatter_walk *walk, int out, int more); 111void scatterwalk_done(struct scatter_walk *walk, int out, int more);
134 112
135void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, 113void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,