aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-05-19 00:13:07 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2010-05-19 00:13:07 -0400
commitbf06099db18a1244957076e51847c644cfe46808 (patch)
tree80a4e17f939e4dfaf5b2c3b7180c1bd700401831 /crypto
parenta8f1a05292db8b410be47fa905669672011f0343 (diff)
crypto: skcipher - Add ablkcipher_walk interfaces
These are akin to the blkcipher_walk helpers. The main differences in the async variant are: 1) Only physical walking is supported. We can't hold on to kmap mappings across the async operation to support virtual ablkcipher_walk operations anyways. 2) Bounce buffers used for async more need to be persistent and freed at a later point in time when the async op completes. Therefore we maintain a list of writeback buffers and require that the ablkcipher_walk user call the 'complete' operation so we can copy the bounce buffers out to the real buffers and free up the bounce buffer chunks. These interfaces will be used by the new Niagara2 crypto driver. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/ablkcipher.c277
1 files changed, 277 insertions, 0 deletions
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index fe980dae1727..98a66103f4f2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -24,10 +24,287 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26 26
27#include <crypto/scatterwalk.h>
28
27#include "internal.h" 29#include "internal.h"
28 30
29static const char *skcipher_default_geniv __read_mostly; 31static const char *skcipher_default_geniv __read_mostly;
30 32
33struct ablkcipher_buffer {
34 struct list_head entry;
35 struct scatter_walk dst;
36 unsigned int len;
37 void *data;
38};
39
40enum {
41 ABLKCIPHER_WALK_SLOW = 1 << 0,
42};
43
44static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
45{
46 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
47}
48
49void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
50{
51 struct ablkcipher_buffer *p, *tmp;
52
53 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
54 ablkcipher_buffer_write(p);
55 list_del(&p->entry);
56 kfree(p);
57 }
58}
59EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
60
61static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
62 struct ablkcipher_buffer *p)
63{
64 p->dst = walk->out;
65 list_add_tail(&p->entry, &walk->buffers);
66}
67
68/* Get a spot of the specified length that does not straddle a page.
69 * The caller needs to ensure that there is enough space for this operation.
70 */
71static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
72{
73 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
74 return max(start, end_page);
75}
76
77static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
78 unsigned int bsize)
79{
80 unsigned int n = bsize;
81
82 for (;;) {
83 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
84
85 if (len_this_page > n)
86 len_this_page = n;
87 scatterwalk_advance(&walk->out, n);
88 if (n == len_this_page)
89 break;
90 n -= len_this_page;
91 scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
92 }
93
94 return bsize;
95}
96
97static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
98 unsigned int n)
99{
100 scatterwalk_advance(&walk->in, n);
101 scatterwalk_advance(&walk->out, n);
102
103 return n;
104}
105
106static int ablkcipher_walk_next(struct ablkcipher_request *req,
107 struct ablkcipher_walk *walk);
108
109int ablkcipher_walk_done(struct ablkcipher_request *req,
110 struct ablkcipher_walk *walk, int err)
111{
112 struct crypto_tfm *tfm = req->base.tfm;
113 unsigned int nbytes = 0;
114
115 if (likely(err >= 0)) {
116 unsigned int n = walk->nbytes - err;
117
118 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
119 n = ablkcipher_done_fast(walk, n);
120 else if (WARN_ON(err)) {
121 err = -EINVAL;
122 goto err;
123 } else
124 n = ablkcipher_done_slow(walk, n);
125
126 nbytes = walk->total - n;
127 err = 0;
128 }
129
130 scatterwalk_done(&walk->in, 0, nbytes);
131 scatterwalk_done(&walk->out, 1, nbytes);
132
133err:
134 walk->total = nbytes;
135 walk->nbytes = nbytes;
136
137 if (nbytes) {
138 crypto_yield(req->base.flags);
139 return ablkcipher_walk_next(req, walk);
140 }
141
142 if (walk->iv != req->info)
143 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
144 if (walk->iv_buffer)
145 kfree(walk->iv_buffer);
146
147 return err;
148}
149EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
150
151static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
152 struct ablkcipher_walk *walk,
153 unsigned int bsize,
154 unsigned int alignmask,
155 void **src_p, void **dst_p)
156{
157 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
158 struct ablkcipher_buffer *p;
159 void *src, *dst, *base;
160 unsigned int n;
161
162 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
163 n += (aligned_bsize * 3 - (alignmask + 1) +
164 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
165
166 p = kmalloc(n, GFP_ATOMIC);
167 if (!p)
168 ablkcipher_walk_done(req, walk, -ENOMEM);
169
170 base = p + 1;
171
172 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
173 src = dst = ablkcipher_get_spot(dst, bsize);
174
175 p->len = bsize;
176 p->data = dst;
177
178 scatterwalk_copychunks(src, &walk->in, bsize, 0);
179
180 ablkcipher_queue_write(walk, p);
181
182 walk->nbytes = bsize;
183 walk->flags |= ABLKCIPHER_WALK_SLOW;
184
185 *src_p = src;
186 *dst_p = dst;
187
188 return 0;
189}
190
191static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
192 struct crypto_tfm *tfm,
193 unsigned int alignmask)
194{
195 unsigned bs = walk->blocksize;
196 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
197 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
198 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
199 (alignmask + 1);
200 u8 *iv;
201
202 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
203 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
204 if (!walk->iv_buffer)
205 return -ENOMEM;
206
207 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
208 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
209 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
210 iv = ablkcipher_get_spot(iv, ivsize);
211
212 walk->iv = memcpy(iv, walk->iv, ivsize);
213 return 0;
214}
215
216static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
217 struct ablkcipher_walk *walk)
218{
219 walk->src.page = scatterwalk_page(&walk->in);
220 walk->src.offset = offset_in_page(walk->in.offset);
221 walk->dst.page = scatterwalk_page(&walk->out);
222 walk->dst.offset = offset_in_page(walk->out.offset);
223
224 return 0;
225}
226
227static int ablkcipher_walk_next(struct ablkcipher_request *req,
228 struct ablkcipher_walk *walk)
229{
230 struct crypto_tfm *tfm = req->base.tfm;
231 unsigned int alignmask, bsize, n;
232 void *src, *dst;
233 int err;
234
235 alignmask = crypto_tfm_alg_alignmask(tfm);
236 n = walk->total;
237 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
238 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
239 return ablkcipher_walk_done(req, walk, -EINVAL);
240 }
241
242 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
243 src = dst = NULL;
244
245 bsize = min(walk->blocksize, n);
246 n = scatterwalk_clamp(&walk->in, n);
247 n = scatterwalk_clamp(&walk->out, n);
248
249 if (n < bsize ||
250 !scatterwalk_aligned(&walk->in, alignmask) ||
251 !scatterwalk_aligned(&walk->out, alignmask)) {
252 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
253 &src, &dst);
254 goto set_phys_lowmem;
255 }
256
257 walk->nbytes = n;
258
259 return ablkcipher_next_fast(req, walk);
260
261set_phys_lowmem:
262 if (err >= 0) {
263 walk->src.page = virt_to_page(src);
264 walk->dst.page = virt_to_page(dst);
265 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
266 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
267 }
268
269 return err;
270}
271
272static int ablkcipher_walk_first(struct ablkcipher_request *req,
273 struct ablkcipher_walk *walk)
274{
275 struct crypto_tfm *tfm = req->base.tfm;
276 unsigned int alignmask;
277
278 alignmask = crypto_tfm_alg_alignmask(tfm);
279 if (WARN_ON_ONCE(in_irq()))
280 return -EDEADLK;
281
282 walk->nbytes = walk->total;
283 if (unlikely(!walk->total))
284 return 0;
285
286 walk->iv_buffer = NULL;
287 walk->iv = req->info;
288 if (unlikely(((unsigned long)walk->iv & alignmask))) {
289 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
290 if (err)
291 return err;
292 }
293
294 scatterwalk_start(&walk->in, walk->in.sg);
295 scatterwalk_start(&walk->out, walk->out.sg);
296
297 return ablkcipher_walk_next(req, walk);
298}
299
300int ablkcipher_walk_phys(struct ablkcipher_request *req,
301 struct ablkcipher_walk *walk)
302{
303 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
304 return ablkcipher_walk_first(req, walk);
305}
306EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
307
31static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, 308static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
32 unsigned int keylen) 309 unsigned int keylen)
33{ 310{