diff options
Diffstat (limited to 'crypto/ahash.c')
-rw-r--r-- | crypto/ahash.c | 147 |
1 files changed, 94 insertions, 53 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c index a92dc382f781..6e7223392e80 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -190,6 +190,75 @@ static inline unsigned int ahash_align_buffer_size(unsigned len, | |||
190 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); | 190 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); |
191 | } | 191 | } |
192 | 192 | ||
193 | static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) | ||
194 | { | ||
195 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
196 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
197 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
198 | struct ahash_request_priv *priv; | ||
199 | |||
200 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | ||
201 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
202 | GFP_KERNEL : GFP_ATOMIC); | ||
203 | if (!priv) | ||
204 | return -ENOMEM; | ||
205 | |||
206 | /* | ||
207 | * WARNING: Voodoo programming below! | ||
208 | * | ||
209 | * The code below is obscure and hard to understand, thus explanation | ||
210 | * is necessary. See include/crypto/hash.h and include/linux/crypto.h | ||
211 | * to understand the layout of structures used here! | ||
212 | * | ||
213 | * The code here will replace portions of the ORIGINAL request with | ||
214 | * pointers to new code and buffers so the hashing operation can store | ||
215 | * the result in aligned buffer. We will call the modified request | ||
216 | * an ADJUSTED request. | ||
217 | * | ||
218 | * The newly mangled request will look as such: | ||
219 | * | ||
220 | * req { | ||
221 | * .result = ADJUSTED[new aligned buffer] | ||
222 | * .base.complete = ADJUSTED[pointer to completion function] | ||
223 | * .base.data = ADJUSTED[*req (pointer to self)] | ||
224 | * .priv = ADJUSTED[new priv] { | ||
225 | * .result = ORIGINAL(result) | ||
226 | * .complete = ORIGINAL(base.complete) | ||
227 | * .data = ORIGINAL(base.data) | ||
228 | * } | ||
229 | */ | ||
230 | |||
231 | priv->result = req->result; | ||
232 | priv->complete = req->base.complete; | ||
233 | priv->data = req->base.data; | ||
234 | /* | ||
235 | * WARNING: We do not backup req->priv here! The req->priv | ||
236 | * is for internal use of the Crypto API and the | ||
237 | * user must _NOT_ _EVER_ depend on it's content! | ||
238 | */ | ||
239 | |||
240 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | ||
241 | req->base.complete = cplt; | ||
242 | req->base.data = req; | ||
243 | req->priv = priv; | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static void ahash_restore_req(struct ahash_request *req) | ||
249 | { | ||
250 | struct ahash_request_priv *priv = req->priv; | ||
251 | |||
252 | /* Restore the original crypto request. */ | ||
253 | req->result = priv->result; | ||
254 | req->base.complete = priv->complete; | ||
255 | req->base.data = priv->data; | ||
256 | req->priv = NULL; | ||
257 | |||
258 | /* Free the req->priv.priv from the ADJUSTED request. */ | ||
259 | kzfree(priv); | ||
260 | } | ||
261 | |||
193 | static void ahash_op_unaligned_finish(struct ahash_request *req, int err) | 262 | static void ahash_op_unaligned_finish(struct ahash_request *req, int err) |
194 | { | 263 | { |
195 | struct ahash_request_priv *priv = req->priv; | 264 | struct ahash_request_priv *priv = req->priv; |
@@ -201,47 +270,37 @@ static void ahash_op_unaligned_finish(struct ahash_request *req, int err) | |||
201 | memcpy(priv->result, req->result, | 270 | memcpy(priv->result, req->result, |
202 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | 271 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); |
203 | 272 | ||
204 | kzfree(priv); | 273 | ahash_restore_req(req); |
205 | } | 274 | } |
206 | 275 | ||
207 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) | 276 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) |
208 | { | 277 | { |
209 | struct ahash_request *areq = req->data; | 278 | struct ahash_request *areq = req->data; |
210 | struct ahash_request_priv *priv = areq->priv; | ||
211 | crypto_completion_t complete = priv->complete; | ||
212 | void *data = priv->data; | ||
213 | 279 | ||
214 | ahash_op_unaligned_finish(areq, err); | 280 | /* |
281 | * Restore the original request, see ahash_op_unaligned() for what | ||
282 | * goes where. | ||
283 | * | ||
284 | * The "struct ahash_request *req" here is in fact the "req.base" | ||
285 | * from the ADJUSTED request from ahash_op_unaligned(), thus as it | ||
286 | * is a pointer to self, it is also the ADJUSTED "req" . | ||
287 | */ | ||
215 | 288 | ||
216 | areq->base.complete = complete; | 289 | /* First copy req->result into req->priv.result */ |
217 | areq->base.data = data; | 290 | ahash_op_unaligned_finish(areq, err); |
218 | 291 | ||
219 | complete(&areq->base, err); | 292 | /* Complete the ORIGINAL request. */ |
293 | areq->base.complete(&areq->base, err); | ||
220 | } | 294 | } |
221 | 295 | ||
222 | static int ahash_op_unaligned(struct ahash_request *req, | 296 | static int ahash_op_unaligned(struct ahash_request *req, |
223 | int (*op)(struct ahash_request *)) | 297 | int (*op)(struct ahash_request *)) |
224 | { | 298 | { |
225 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
226 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
227 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
228 | struct ahash_request_priv *priv; | ||
229 | int err; | 299 | int err; |
230 | 300 | ||
231 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | 301 | err = ahash_save_req(req, ahash_op_unaligned_done); |
232 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 302 | if (err) |
233 | GFP_KERNEL : GFP_ATOMIC); | 303 | return err; |
234 | if (!priv) | ||
235 | return -ENOMEM; | ||
236 | |||
237 | priv->result = req->result; | ||
238 | priv->complete = req->base.complete; | ||
239 | priv->data = req->base.data; | ||
240 | |||
241 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | ||
242 | req->base.complete = ahash_op_unaligned_done; | ||
243 | req->base.data = req; | ||
244 | req->priv = priv; | ||
245 | 304 | ||
246 | err = op(req); | 305 | err = op(req); |
247 | ahash_op_unaligned_finish(req, err); | 306 | ahash_op_unaligned_finish(req, err); |
@@ -290,19 +349,16 @@ static void ahash_def_finup_finish2(struct ahash_request *req, int err) | |||
290 | memcpy(priv->result, req->result, | 349 | memcpy(priv->result, req->result, |
291 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | 350 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); |
292 | 351 | ||
293 | kzfree(priv); | 352 | ahash_restore_req(req); |
294 | } | 353 | } |
295 | 354 | ||
296 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) | 355 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
297 | { | 356 | { |
298 | struct ahash_request *areq = req->data; | 357 | struct ahash_request *areq = req->data; |
299 | struct ahash_request_priv *priv = areq->priv; | ||
300 | crypto_completion_t complete = priv->complete; | ||
301 | void *data = priv->data; | ||
302 | 358 | ||
303 | ahash_def_finup_finish2(areq, err); | 359 | ahash_def_finup_finish2(areq, err); |
304 | 360 | ||
305 | complete(data, err); | 361 | areq->base.complete(&areq->base, err); |
306 | } | 362 | } |
307 | 363 | ||
308 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) | 364 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) |
@@ -322,38 +378,23 @@ out: | |||
322 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) | 378 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) |
323 | { | 379 | { |
324 | struct ahash_request *areq = req->data; | 380 | struct ahash_request *areq = req->data; |
325 | struct ahash_request_priv *priv = areq->priv; | ||
326 | crypto_completion_t complete = priv->complete; | ||
327 | void *data = priv->data; | ||
328 | 381 | ||
329 | err = ahash_def_finup_finish1(areq, err); | 382 | err = ahash_def_finup_finish1(areq, err); |
330 | 383 | ||
331 | complete(data, err); | 384 | areq->base.complete(&areq->base, err); |
332 | } | 385 | } |
333 | 386 | ||
334 | static int ahash_def_finup(struct ahash_request *req) | 387 | static int ahash_def_finup(struct ahash_request *req) |
335 | { | 388 | { |
336 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 389 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
337 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 390 | int err; |
338 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
339 | struct ahash_request_priv *priv; | ||
340 | |||
341 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | ||
342 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
343 | GFP_KERNEL : GFP_ATOMIC); | ||
344 | if (!priv) | ||
345 | return -ENOMEM; | ||
346 | |||
347 | priv->result = req->result; | ||
348 | priv->complete = req->base.complete; | ||
349 | priv->data = req->base.data; | ||
350 | 391 | ||
351 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | 392 | err = ahash_save_req(req, ahash_def_finup_done1); |
352 | req->base.complete = ahash_def_finup_done1; | 393 | if (err) |
353 | req->base.data = req; | 394 | return err; |
354 | req->priv = priv; | ||
355 | 395 | ||
356 | return ahash_def_finup_finish1(req, tfm->update(req)); | 396 | err = tfm->update(req); |
397 | return ahash_def_finup_finish1(req, err); | ||
357 | } | 398 | } |
358 | 399 | ||
359 | static int ahash_no_export(struct ahash_request *req, void *out) | 400 | static int ahash_no_export(struct ahash_request *req, void *out) |