diff options
| -rw-r--r-- | drivers/crypto/s5p-sss.c | 127 |
1 files changed, 82 insertions, 45 deletions
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index a668286d62cb..1b9da3dc799b 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
| @@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, | |||
| 270 | scatterwalk_done(&walk, out, 0); | 270 | scatterwalk_done(&walk, out, 0); |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) | 273 | static void s5p_sg_done(struct s5p_aes_dev *dev) |
| 274 | { | 274 | { |
| 275 | if (dev->sg_dst_cpy) { | 275 | if (dev->sg_dst_cpy) { |
| 276 | dev_dbg(dev->dev, | 276 | dev_dbg(dev->dev, |
| @@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) | |||
| 281 | } | 281 | } |
| 282 | s5p_free_sg_cpy(dev, &dev->sg_src_cpy); | 282 | s5p_free_sg_cpy(dev, &dev->sg_src_cpy); |
| 283 | s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); | 283 | s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); |
| 284 | } | ||
| 284 | 285 | ||
| 285 | /* holding a lock outside */ | 286 | /* Calls the completion. Cannot be called with dev->lock hold. */ |
| 287 | static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) | ||
| 288 | { | ||
| 286 | dev->req->base.complete(&dev->req->base, err); | 289 | dev->req->base.complete(&dev->req->base, err); |
| 287 | dev->busy = false; | 290 | dev->busy = false; |
| 288 | } | 291 | } |
| @@ -368,51 +371,44 @@ exit: | |||
| 368 | } | 371 | } |
| 369 | 372 | ||
| 370 | /* | 373 | /* |
| 371 | * Returns true if new transmitting (output) data is ready and its | 374 | * Returns -ERRNO on error (mapping of new data failed). |
| 372 | * address+length have to be written to device (by calling | 375 | * On success returns: |
| 373 | * s5p_set_dma_outdata()). False otherwise. | 376 | * - 0 if there is no more data, |
| 377 | * - 1 if new transmitting (output) data is ready and its address+length | ||
| 378 | * have to be written to device (by calling s5p_set_dma_outdata()). | ||
| 374 | */ | 379 | */ |
| 375 | static bool s5p_aes_tx(struct s5p_aes_dev *dev) | 380 | static int s5p_aes_tx(struct s5p_aes_dev *dev) |
| 376 | { | 381 | { |
| 377 | int err = 0; | 382 | int ret = 0; |
| 378 | bool ret = false; | ||
| 379 | 383 | ||
| 380 | s5p_unset_outdata(dev); | 384 | s5p_unset_outdata(dev); |
| 381 | 385 | ||
| 382 | if (!sg_is_last(dev->sg_dst)) { | 386 | if (!sg_is_last(dev->sg_dst)) { |
| 383 | err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); | 387 | ret = s5p_set_outdata(dev, sg_next(dev->sg_dst)); |
| 384 | if (err) | 388 | if (!ret) |
| 385 | s5p_aes_complete(dev, err); | 389 | ret = 1; |
| 386 | else | ||
| 387 | ret = true; | ||
| 388 | } else { | ||
| 389 | s5p_aes_complete(dev, err); | ||
| 390 | |||
| 391 | dev->busy = true; | ||
| 392 | tasklet_schedule(&dev->tasklet); | ||
| 393 | } | 390 | } |
| 394 | 391 | ||
| 395 | return ret; | 392 | return ret; |
| 396 | } | 393 | } |
| 397 | 394 | ||
| 398 | /* | 395 | /* |
| 399 | * Returns true if new receiving (input) data is ready and its | 396 | * Returns -ERRNO on error (mapping of new data failed). |
| 400 | * address+length have to be written to device (by calling | 397 | * On success returns: |
| 401 | * s5p_set_dma_indata()). False otherwise. | 398 | * - 0 if there is no more data, |
| 399 | * - 1 if new receiving (input) data is ready and its address+length | ||
| 400 | * have to be written to device (by calling s5p_set_dma_indata()). | ||
| 402 | */ | 401 | */ |
| 403 | static bool s5p_aes_rx(struct s5p_aes_dev *dev) | 402 | static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/) |
| 404 | { | 403 | { |
| 405 | int err; | 404 | int ret = 0; |
| 406 | bool ret = false; | ||
| 407 | 405 | ||
| 408 | s5p_unset_indata(dev); | 406 | s5p_unset_indata(dev); |
| 409 | 407 | ||
| 410 | if (!sg_is_last(dev->sg_src)) { | 408 | if (!sg_is_last(dev->sg_src)) { |
| 411 | err = s5p_set_indata(dev, sg_next(dev->sg_src)); | 409 | ret = s5p_set_indata(dev, sg_next(dev->sg_src)); |
| 412 | if (err) | 410 | if (!ret) |
| 413 | s5p_aes_complete(dev, err); | 411 | ret = 1; |
| 414 | else | ||
| 415 | ret = true; | ||
| 416 | } | 412 | } |
| 417 | 413 | ||
| 418 | return ret; | 414 | return ret; |
| @@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) | |||
| 422 | { | 418 | { |
| 423 | struct platform_device *pdev = dev_id; | 419 | struct platform_device *pdev = dev_id; |
| 424 | struct s5p_aes_dev *dev = platform_get_drvdata(pdev); | 420 | struct s5p_aes_dev *dev = platform_get_drvdata(pdev); |
| 425 | bool set_dma_tx = false; | 421 | int err_dma_tx = 0; |
| 426 | bool set_dma_rx = false; | 422 | int err_dma_rx = 0; |
| 423 | bool tx_end = false; | ||
| 427 | unsigned long flags; | 424 | unsigned long flags; |
| 428 | uint32_t status; | 425 | uint32_t status; |
| 426 | int err; | ||
| 429 | 427 | ||
| 430 | spin_lock_irqsave(&dev->lock, flags); | 428 | spin_lock_irqsave(&dev->lock, flags); |
| 431 | 429 | ||
| 430 | /* | ||
| 431 | * Handle rx or tx interrupt. If there is still data (scatterlist did not | ||
| 432 | * reach end), then map next scatterlist entry. | ||
| 433 | * In case of such mapping error, s5p_aes_complete() should be called. | ||
| 434 | * | ||
| 435 | * If there is no more data in tx scatter list, call s5p_aes_complete() | ||
| 436 | * and schedule new tasklet. | ||
| 437 | */ | ||
| 432 | status = SSS_READ(dev, FCINTSTAT); | 438 | status = SSS_READ(dev, FCINTSTAT); |
| 433 | if (status & SSS_FCINTSTAT_BRDMAINT) | 439 | if (status & SSS_FCINTSTAT_BRDMAINT) |
| 434 | set_dma_rx = s5p_aes_rx(dev); | 440 | err_dma_rx = s5p_aes_rx(dev); |
| 435 | if (status & SSS_FCINTSTAT_BTDMAINT) | 441 | |
| 436 | set_dma_tx = s5p_aes_tx(dev); | 442 | if (status & SSS_FCINTSTAT_BTDMAINT) { |
| 443 | if (sg_is_last(dev->sg_dst)) | ||
| 444 | tx_end = true; | ||
| 445 | err_dma_tx = s5p_aes_tx(dev); | ||
| 446 | } | ||
| 437 | 447 | ||
| 438 | SSS_WRITE(dev, FCINTPEND, status); | 448 | SSS_WRITE(dev, FCINTPEND, status); |
| 439 | 449 | ||
| 440 | /* | 450 | if (err_dma_rx < 0) { |
| 441 | * Writing length of DMA block (either receiving or transmitting) | 451 | err = err_dma_rx; |
| 442 | * will start the operation immediately, so this should be done | 452 | goto error; |
| 443 | * at the end (even after clearing pending interrupts to not miss the | 453 | } |
| 444 | * interrupt). | 454 | if (err_dma_tx < 0) { |
| 445 | */ | 455 | err = err_dma_tx; |
| 446 | if (set_dma_tx) | 456 | goto error; |
| 447 | s5p_set_dma_outdata(dev, dev->sg_dst); | 457 | } |
| 448 | if (set_dma_rx) | 458 | |
| 449 | s5p_set_dma_indata(dev, dev->sg_src); | 459 | if (tx_end) { |
| 460 | s5p_sg_done(dev); | ||
| 461 | |||
| 462 | spin_unlock_irqrestore(&dev->lock, flags); | ||
| 463 | |||
| 464 | s5p_aes_complete(dev, 0); | ||
| 465 | dev->busy = true; | ||
| 466 | tasklet_schedule(&dev->tasklet); | ||
| 467 | } else { | ||
| 468 | /* | ||
| 469 | * Writing length of DMA block (either receiving or | ||
| 470 | * transmitting) will start the operation immediately, so this | ||
| 471 | * should be done at the end (even after clearing pending | ||
| 472 | * interrupts to not miss the interrupt). | ||
| 473 | */ | ||
| 474 | if (err_dma_tx == 1) | ||
| 475 | s5p_set_dma_outdata(dev, dev->sg_dst); | ||
| 476 | if (err_dma_rx == 1) | ||
| 477 | s5p_set_dma_indata(dev, dev->sg_src); | ||
| 450 | 478 | ||
| 479 | spin_unlock_irqrestore(&dev->lock, flags); | ||
| 480 | } | ||
| 481 | |||
| 482 | return IRQ_HANDLED; | ||
| 483 | |||
| 484 | error: | ||
| 485 | s5p_sg_done(dev); | ||
| 451 | spin_unlock_irqrestore(&dev->lock, flags); | 486 | spin_unlock_irqrestore(&dev->lock, flags); |
| 487 | s5p_aes_complete(dev, err); | ||
| 452 | 488 | ||
| 453 | return IRQ_HANDLED; | 489 | return IRQ_HANDLED; |
| 454 | } | 490 | } |
| @@ -597,8 +633,9 @@ outdata_error: | |||
| 597 | s5p_unset_indata(dev); | 633 | s5p_unset_indata(dev); |
| 598 | 634 | ||
| 599 | indata_error: | 635 | indata_error: |
| 600 | s5p_aes_complete(dev, err); | 636 | s5p_sg_done(dev); |
| 601 | spin_unlock_irqrestore(&dev->lock, flags); | 637 | spin_unlock_irqrestore(&dev->lock, flags); |
| 638 | s5p_aes_complete(dev, err); | ||
| 602 | } | 639 | } |
| 603 | 640 | ||
| 604 | static void s5p_tasklet_cb(unsigned long data) | 641 | static void s5p_tasklet_cb(unsigned long data) |
