aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-15 12:26:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-15 12:26:04 -0400
commitdefc7d752265c2d8d212aee9fb499243260883e9 (patch)
tree8a6f1bfd8dc5c38ab350f96e08abe5a34e8d73ec
parentae50dfd61665086e617cc9e554a1285d52765670 (diff)
parent28b62b1458685d8f68f67d9b2d511bf8fa32b746 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: - self-test failure of crc32c on powerpc - regressions of ecb(aes) when used with xts/lrw in s5p-sss - a number of bugs in the omap RNG driver * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: s5p-sss - Fix spinlock recursion on LRW(AES) hwrng: omap - Do not access INTMASK_REG on EIP76 hwrng: omap - use devm_clk_get() instead of of_clk_get() hwrng: omap - write registers after enabling the clock crypto: s5p-sss - Fix completing crypto request in IRQ handler crypto: powerpc - Fix initialisation of crc32c context
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/crypto/s5p-sss.c132
3 files changed, 100 insertions, 50 deletions
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d56eba..411994551afc 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
52{ 52{
53 u32 *key = crypto_tfm_ctx(tfm); 53 u32 *key = crypto_tfm_ctx(tfm);
54 54
55 *key = 0; 55 *key = ~0;
56 56
57 return 0; 57 return 0;
58} 58}
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 3ad86fdf954e..b1ad12552b56 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
397 irq, err); 397 irq, err);
398 return err; 398 return err;
399 } 399 }
400 omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
401 400
402 priv->clk = of_clk_get(pdev->dev.of_node, 0); 401 priv->clk = devm_clk_get(&pdev->dev, NULL);
403 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER) 402 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
404 return -EPROBE_DEFER; 403 return -EPROBE_DEFER;
405 if (!IS_ERR(priv->clk)) { 404 if (!IS_ERR(priv->clk)) {
@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
408 dev_err(&pdev->dev, "unable to enable the clk, " 407 dev_err(&pdev->dev, "unable to enable the clk, "
409 "err = %d\n", err); 408 "err = %d\n", err);
410 } 409 }
410
411 /*
412 * On OMAP4, enabling the shutdown_oflo interrupt is
413 * done in the interrupt mask register. There is no
414 * such register on EIP76, and it's enabled by the
415 * same bit in the control register
416 */
417 if (priv->pdata->regs[RNG_INTMASK_REG])
418 omap_rng_write(priv, RNG_INTMASK_REG,
419 RNG_SHUTDOWN_OFLO_MASK);
420 else
421 omap_rng_write(priv, RNG_CONTROL_REG,
422 RNG_SHUTDOWN_OFLO_MASK);
411 } 423 }
412 return 0; 424 return 0;
413} 425}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index dce1af0ce85c..1b9da3dc799b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
270 scatterwalk_done(&walk, out, 0); 270 scatterwalk_done(&walk, out, 0);
271} 271}
272 272
273static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 273static void s5p_sg_done(struct s5p_aes_dev *dev)
274{ 274{
275 if (dev->sg_dst_cpy) { 275 if (dev->sg_dst_cpy) {
276 dev_dbg(dev->dev, 276 dev_dbg(dev->dev,
@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
281 } 281 }
282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
284}
284 285
285 /* holding a lock outside */ 286/* Calls the completion. Cannot be called with dev->lock hold. */
287static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
288{
286 dev->req->base.complete(&dev->req->base, err); 289 dev->req->base.complete(&dev->req->base, err);
287 dev->busy = false; 290 dev->busy = false;
288} 291}
@@ -368,51 +371,44 @@ exit:
368} 371}
369 372
370/* 373/*
371 * Returns true if new transmitting (output) data is ready and its 374 * Returns -ERRNO on error (mapping of new data failed).
372 * address+length have to be written to device (by calling 375 * On success returns:
373 * s5p_set_dma_outdata()). False otherwise. 376 * - 0 if there is no more data,
377 * - 1 if new transmitting (output) data is ready and its address+length
378 * have to be written to device (by calling s5p_set_dma_outdata()).
374 */ 379 */
375static bool s5p_aes_tx(struct s5p_aes_dev *dev) 380static int s5p_aes_tx(struct s5p_aes_dev *dev)
376{ 381{
377 int err = 0; 382 int ret = 0;
378 bool ret = false;
379 383
380 s5p_unset_outdata(dev); 384 s5p_unset_outdata(dev);
381 385
382 if (!sg_is_last(dev->sg_dst)) { 386 if (!sg_is_last(dev->sg_dst)) {
383 err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 387 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
384 if (err) 388 if (!ret)
385 s5p_aes_complete(dev, err); 389 ret = 1;
386 else
387 ret = true;
388 } else {
389 s5p_aes_complete(dev, err);
390
391 dev->busy = true;
392 tasklet_schedule(&dev->tasklet);
393 } 390 }
394 391
395 return ret; 392 return ret;
396} 393}
397 394
398/* 395/*
399 * Returns true if new receiving (input) data is ready and its 396 * Returns -ERRNO on error (mapping of new data failed).
400 * address+length have to be written to device (by calling 397 * On success returns:
401 * s5p_set_dma_indata()). False otherwise. 398 * - 0 if there is no more data,
399 * - 1 if new receiving (input) data is ready and its address+length
400 * have to be written to device (by calling s5p_set_dma_indata()).
402 */ 401 */
403static bool s5p_aes_rx(struct s5p_aes_dev *dev) 402static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
404{ 403{
405 int err; 404 int ret = 0;
406 bool ret = false;
407 405
408 s5p_unset_indata(dev); 406 s5p_unset_indata(dev);
409 407
410 if (!sg_is_last(dev->sg_src)) { 408 if (!sg_is_last(dev->sg_src)) {
411 err = s5p_set_indata(dev, sg_next(dev->sg_src)); 409 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
412 if (err) 410 if (!ret)
413 s5p_aes_complete(dev, err); 411 ret = 1;
414 else
415 ret = true;
416 } 412 }
417 413
418 return ret; 414 return ret;
@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
422{ 418{
423 struct platform_device *pdev = dev_id; 419 struct platform_device *pdev = dev_id;
424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 420 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
425 bool set_dma_tx = false; 421 int err_dma_tx = 0;
426 bool set_dma_rx = false; 422 int err_dma_rx = 0;
423 bool tx_end = false;
427 unsigned long flags; 424 unsigned long flags;
428 uint32_t status; 425 uint32_t status;
426 int err;
429 427
430 spin_lock_irqsave(&dev->lock, flags); 428 spin_lock_irqsave(&dev->lock, flags);
431 429
430 /*
431 * Handle rx or tx interrupt. If there is still data (scatterlist did not
432 * reach end), then map next scatterlist entry.
433 * In case of such mapping error, s5p_aes_complete() should be called.
434 *
435 * If there is no more data in tx scatter list, call s5p_aes_complete()
436 * and schedule new tasklet.
437 */
432 status = SSS_READ(dev, FCINTSTAT); 438 status = SSS_READ(dev, FCINTSTAT);
433 if (status & SSS_FCINTSTAT_BRDMAINT) 439 if (status & SSS_FCINTSTAT_BRDMAINT)
434 set_dma_rx = s5p_aes_rx(dev); 440 err_dma_rx = s5p_aes_rx(dev);
435 if (status & SSS_FCINTSTAT_BTDMAINT) 441
436 set_dma_tx = s5p_aes_tx(dev); 442 if (status & SSS_FCINTSTAT_BTDMAINT) {
443 if (sg_is_last(dev->sg_dst))
444 tx_end = true;
445 err_dma_tx = s5p_aes_tx(dev);
446 }
437 447
438 SSS_WRITE(dev, FCINTPEND, status); 448 SSS_WRITE(dev, FCINTPEND, status);
439 449
440 /* 450 if (err_dma_rx < 0) {
441 * Writing length of DMA block (either receiving or transmitting) 451 err = err_dma_rx;
442 * will start the operation immediately, so this should be done 452 goto error;
443 * at the end (even after clearing pending interrupts to not miss the 453 }
444 * interrupt). 454 if (err_dma_tx < 0) {
445 */ 455 err = err_dma_tx;
446 if (set_dma_tx) 456 goto error;
447 s5p_set_dma_outdata(dev, dev->sg_dst); 457 }
448 if (set_dma_rx) 458
449 s5p_set_dma_indata(dev, dev->sg_src); 459 if (tx_end) {
460 s5p_sg_done(dev);
461
462 spin_unlock_irqrestore(&dev->lock, flags);
463
464 s5p_aes_complete(dev, 0);
465 dev->busy = true;
466 tasklet_schedule(&dev->tasklet);
467 } else {
468 /*
469 * Writing length of DMA block (either receiving or
470 * transmitting) will start the operation immediately, so this
471 * should be done at the end (even after clearing pending
472 * interrupts to not miss the interrupt).
473 */
474 if (err_dma_tx == 1)
475 s5p_set_dma_outdata(dev, dev->sg_dst);
476 if (err_dma_rx == 1)
477 s5p_set_dma_indata(dev, dev->sg_src);
450 478
479 spin_unlock_irqrestore(&dev->lock, flags);
480 }
481
482 return IRQ_HANDLED;
483
484error:
485 s5p_sg_done(dev);
451 spin_unlock_irqrestore(&dev->lock, flags); 486 spin_unlock_irqrestore(&dev->lock, flags);
487 s5p_aes_complete(dev, err);
452 488
453 return IRQ_HANDLED; 489 return IRQ_HANDLED;
454} 490}
@@ -597,8 +633,9 @@ outdata_error:
597 s5p_unset_indata(dev); 633 s5p_unset_indata(dev);
598 634
599indata_error: 635indata_error:
600 s5p_aes_complete(dev, err); 636 s5p_sg_done(dev);
601 spin_unlock_irqrestore(&dev->lock, flags); 637 spin_unlock_irqrestore(&dev->lock, flags);
638 s5p_aes_complete(dev, err);
602} 639}
603 640
604static void s5p_tasklet_cb(unsigned long data) 641static void s5p_tasklet_cb(unsigned long data)
@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
805 dev_warn(dev, "feed control interrupt is not available.\n"); 842 dev_warn(dev, "feed control interrupt is not available.\n");
806 goto err_irq; 843 goto err_irq;
807 } 844 }
808 err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, 845 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
809 IRQF_SHARED, pdev->name, pdev); 846 s5p_aes_interrupt, IRQF_ONESHOT,
847 pdev->name, pdev);
810 if (err < 0) { 848 if (err < 0) {
811 dev_warn(dev, "feed control interrupt is not available.\n"); 849 dev_warn(dev, "feed control interrupt is not available.\n");
812 goto err_irq; 850 goto err_irq;