summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2014-05-21 08:56:12 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2014-05-21 08:56:12 -0400
commit75ecb231ff45b54afa9f4ec9137965c3c00868f4 (patch)
treeeb185e64a01a81de14fdc3e380f7273b09bf76a4 /crypto
parent0118a552137506a68ac062981967d8b5147e6028 (diff)
crypto: hash - Add real ahash walk interface
Although the existing hash walk interface has already been used by a number of ahash crypto drivers, it turns out that none of them were really asynchronous. They were all essentially polling for completion. That's why nobody has noticed until now that the walk interface couldn't work with a real asynchronous driver since the memory is mapped using kmap_atomic. As we now have a use-case for a real ahash implementation on x86, this patch creates a minimal ahash walk interface. Basically it just calls kmap instead of kmap_atomic and does away with the crypto_yield call. Real ahash crypto drivers don't need to yield since by definition they won't be hogging the CPU. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/ahash.c41
1 files changed, 36 insertions, 5 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 6e7223392e80..f2a5d8f656ff 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -15,6 +15,7 @@
15 15
16#include <crypto/internal/hash.h> 16#include <crypto/internal/hash.h>
17#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
18#include <linux/bug.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
@@ -46,7 +47,10 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
46 unsigned int nbytes = min(walk->entrylen, 47 unsigned int nbytes = min(walk->entrylen,
47 ((unsigned int)(PAGE_SIZE)) - offset); 48 ((unsigned int)(PAGE_SIZE)) - offset);
48 49
49 walk->data = kmap_atomic(walk->pg); 50 if (walk->flags & CRYPTO_ALG_ASYNC)
51 walk->data = kmap(walk->pg);
52 else
53 walk->data = kmap_atomic(walk->pg);
50 walk->data += offset; 54 walk->data += offset;
51 55
52 if (offset & alignmask) { 56 if (offset & alignmask) {
@@ -93,8 +97,16 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
93 return nbytes; 97 return nbytes;
94 } 98 }
95 99
96 kunmap_atomic(walk->data); 100 if (walk->flags & CRYPTO_ALG_ASYNC)
97 crypto_yield(walk->flags); 101 kunmap(walk->pg);
102 else {
103 kunmap_atomic(walk->data);
104 /*
105 * The may sleep test only makes sense for sync users.
106 * Async users don't need to sleep here anyway.
107 */
108 crypto_yield(walk->flags);
109 }
98 110
99 if (err) 111 if (err)
100 return err; 112 return err;
@@ -124,12 +136,31 @@ int crypto_hash_walk_first(struct ahash_request *req,
124 136
125 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); 137 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
126 walk->sg = req->src; 138 walk->sg = req->src;
127 walk->flags = req->base.flags; 139 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
128 140
129 return hash_walk_new_entry(walk); 141 return hash_walk_new_entry(walk);
130} 142}
131EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 143EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
132 144
145int crypto_ahash_walk_first(struct ahash_request *req,
146 struct crypto_hash_walk *walk)
147{
148 walk->total = req->nbytes;
149
150 if (!walk->total)
151 return 0;
152
153 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
154 walk->sg = req->src;
155 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
156 walk->flags |= CRYPTO_ALG_ASYNC;
157
158 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
159
160 return hash_walk_new_entry(walk);
161}
162EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
163
133int crypto_hash_walk_first_compat(struct hash_desc *hdesc, 164int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
134 struct crypto_hash_walk *walk, 165 struct crypto_hash_walk *walk,
135 struct scatterlist *sg, unsigned int len) 166 struct scatterlist *sg, unsigned int len)
@@ -141,7 +172,7 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
141 172
142 walk->alignmask = crypto_hash_alignmask(hdesc->tfm); 173 walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
143 walk->sg = sg; 174 walk->sg = sg;
144 walk->flags = hdesc->flags; 175 walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
145 176
146 return hash_walk_new_entry(walk); 177 return hash_walk_new_entry(walk);
147} 178}