diff options
author | Varadarajan Narayanan <varada@codeaurora.org> | 2017-07-28 02:52:59 -0400 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2017-08-08 07:15:46 -0400 |
commit | 5884e17ef3cb3dac2e83e466246cf033bfba0e2f (patch) | |
tree | 36df15a12a35a74f7179cd36469077ac5b6b0ec4 | |
parent | a841b24e627ca2d3b6a23ca00a4908bfe8f3a5ef (diff) |
spi: qup: allow multiple DMA transactions per spi xfer
Much like the block mode changes, we are breaking up DMA transactions
into 64K chunks so we can reset the QUP engine.
Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
Signed-off-by: Varadarajan Narayanan <varada@codeaurora.org>
Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r-- | drivers/spi/spi-qup.c | 92 |
1 files changed, 66 insertions, 26 deletions
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 1af3b41ac12d..3c2c2c0ed9ab 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
@@ -418,12 +418,35 @@ static void spi_qup_dma_terminate(struct spi_master *master, | |||
418 | dmaengine_terminate_all(master->dma_rx); | 418 | dmaengine_terminate_all(master->dma_rx); |
419 | } | 419 | } |
420 | 420 | ||
421 | static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max, | ||
422 | u32 *nents) | ||
423 | { | ||
424 | struct scatterlist *sg; | ||
425 | u32 total = 0; | ||
426 | |||
427 | *nents = 0; | ||
428 | |||
429 | for (sg = sgl; sg; sg = sg_next(sg)) { | ||
430 | unsigned int len = sg_dma_len(sg); | ||
431 | |||
432 | /* check for overflow as well as limit */ | ||
433 | if (((total + len) < total) || ((total + len) > max)) | ||
434 | break; | ||
435 | |||
436 | total += len; | ||
437 | (*nents)++; | ||
438 | } | ||
439 | |||
440 | return total; | ||
441 | } | ||
442 | |||
421 | static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, | 443 | static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, |
422 | unsigned long timeout) | 444 | unsigned long timeout) |
423 | { | 445 | { |
424 | dma_async_tx_callback rx_done = NULL, tx_done = NULL; | 446 | dma_async_tx_callback rx_done = NULL, tx_done = NULL; |
425 | struct spi_master *master = spi->master; | 447 | struct spi_master *master = spi->master; |
426 | struct spi_qup *qup = spi_master_get_devdata(master); | 448 | struct spi_qup *qup = spi_master_get_devdata(master); |
449 | struct scatterlist *tx_sgl, *rx_sgl; | ||
427 | int ret; | 450 | int ret; |
428 | 451 | ||
429 | if (xfer->rx_buf) | 452 | if (xfer->rx_buf) |
@@ -431,40 +454,57 @@ static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, | |||
431 | else if (xfer->tx_buf) | 454 | else if (xfer->tx_buf) |
432 | tx_done = spi_qup_dma_done; | 455 | tx_done = spi_qup_dma_done; |
433 | 456 | ||
434 | ret = spi_qup_io_config(spi, xfer); | 457 | rx_sgl = xfer->rx_sg.sgl; |
435 | if (ret) | 458 | tx_sgl = xfer->tx_sg.sgl; |
436 | return ret; | ||
437 | 459 | ||
438 | /* before issuing the descriptors, set the QUP to run */ | 460 | do { |
439 | ret = spi_qup_set_state(qup, QUP_STATE_RUN); | 461 | u32 rx_nents, tx_nents; |
440 | if (ret) { | 462 | |
441 | dev_warn(qup->dev, "%s(%d): cannot set RUN state\n", | 463 | if (rx_sgl) |
442 | __func__, __LINE__); | 464 | qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl, |
443 | return ret; | 465 | SPI_MAX_XFER, &rx_nents) / qup->w_size; |
444 | } | 466 | if (tx_sgl) |
467 | qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl, | ||
468 | SPI_MAX_XFER, &tx_nents) / qup->w_size; | ||
469 | if (!qup->n_words) | ||
470 | return -EIO; | ||
445 | 471 | ||
446 | if (xfer->rx_buf) { | 472 | ret = spi_qup_io_config(spi, xfer); |
447 | ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl, | ||
448 | xfer->rx_sg.nents, DMA_DEV_TO_MEM, | ||
449 | rx_done); | ||
450 | if (ret) | 473 | if (ret) |
451 | return ret; | 474 | return ret; |
452 | 475 | ||
453 | dma_async_issue_pending(master->dma_rx); | 476 | /* before issuing the descriptors, set the QUP to run */ |
454 | } | 477 | ret = spi_qup_set_state(qup, QUP_STATE_RUN); |
455 | 478 | if (ret) { | |
456 | if (xfer->tx_buf) { | 479 | dev_warn(qup->dev, "cannot set RUN state\n"); |
457 | ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl, | ||
458 | xfer->tx_sg.nents, DMA_MEM_TO_DEV, | ||
459 | tx_done); | ||
460 | if (ret) | ||
461 | return ret; | 480 | return ret; |
481 | } | ||
482 | if (rx_sgl) { | ||
483 | ret = spi_qup_prep_sg(master, rx_sgl, rx_nents, | ||
484 | DMA_DEV_TO_MEM, rx_done); | ||
485 | if (ret) | ||
486 | return ret; | ||
487 | dma_async_issue_pending(master->dma_rx); | ||
488 | } | ||
462 | 489 | ||
463 | dma_async_issue_pending(master->dma_tx); | 490 | if (tx_sgl) { |
464 | } | 491 | ret = spi_qup_prep_sg(master, tx_sgl, tx_nents, |
492 | DMA_MEM_TO_DEV, tx_done); | ||
493 | if (ret) | ||
494 | return ret; | ||
495 | |||
496 | dma_async_issue_pending(master->dma_tx); | ||
497 | } | ||
498 | |||
499 | if (!wait_for_completion_timeout(&qup->done, timeout)) | ||
500 | return -ETIMEDOUT; | ||
501 | |||
502 | for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl)) | ||
503 | ; | ||
504 | for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl)) | ||
505 | ; | ||
465 | 506 | ||
466 | if (!wait_for_completion_timeout(&qup->done, timeout)) | 507 | } while (rx_sgl || tx_sgl); |
467 | return -ETIMEDOUT; | ||
468 | 508 | ||
469 | return 0; | 509 | return 0; |
470 | } | 510 | } |