aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2011-03-07 05:33:11 -0500
committerChris Ball <cjb@laptop.org>2011-03-25 10:39:05 -0400
commit5f52c3552946eafa13f89c949449d16ce2b58b24 (patch)
tree193ab366bf12a261235a2f8c0ae4c5ae182b941d /drivers/mmc
parent51fc7b2ce74d31b7d9a6bd3c57bf6c1cc77e837d (diff)
mmc: tmio: use PIO for short transfers
This patch allows transferring of some requests in PIO and some in DMA mode and defaults to using DMA only for transfers longer than 8 bytes. This is especially useful with SDIO, which can have lots of 2- and 4-byte transfers, creating unnecessary high overhead, when executed in DMA. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/tmio_mmc.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e88627b3337..32ab1452d97 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -100,6 +100,8 @@
100 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 100 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
101#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 101#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
102 102
103#define TMIO_MIN_DMA_LEN 8
104
103#define enable_mmc_irqs(host, i) \ 105#define enable_mmc_irqs(host, i) \
104 do { \ 106 do { \
105 u32 mask;\ 107 u32 mask;\
@@ -147,6 +149,7 @@ struct tmio_mmc_host {
147 struct platform_device *pdev; 149 struct platform_device *pdev;
148 150
149 /* DMA support */ 151 /* DMA support */
152 bool force_pio;
150 struct dma_chan *chan_rx; 153 struct dma_chan *chan_rx;
151 struct dma_chan *chan_tx; 154 struct dma_chan *chan_tx;
152 struct tasklet_struct dma_complete; 155 struct tasklet_struct dma_complete;
@@ -385,6 +388,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
385 host->cmd = NULL; 388 host->cmd = NULL;
386 host->data = NULL; 389 host->data = NULL;
387 host->mrq = NULL; 390 host->mrq = NULL;
391 host->force_pio = false;
388 392
389 spin_unlock_irqrestore(&host->lock, flags); 393 spin_unlock_irqrestore(&host->lock, flags);
390 394
@@ -404,6 +408,7 @@ tmio_mmc_finish_request(struct tmio_mmc_host *host)
404 host->mrq = NULL; 408 host->mrq = NULL;
405 host->cmd = NULL; 409 host->cmd = NULL;
406 host->data = NULL; 410 host->data = NULL;
411 host->force_pio = false;
407 412
408 cancel_delayed_work(&host->delayed_reset_work); 413 cancel_delayed_work(&host->delayed_reset_work);
409 414
@@ -485,7 +490,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
485 unsigned int count; 490 unsigned int count;
486 unsigned long flags; 491 unsigned long flags;
487 492
488 if (host->chan_tx || host->chan_rx) { 493 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
489 pr_err("PIO IRQ in DMA mode!\n"); 494 pr_err("PIO IRQ in DMA mode!\n");
490 return; 495 return;
491 } else if (!data) { 496 } else if (!data) {
@@ -551,15 +556,11 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
551 */ 556 */
552 557
553 if (data->flags & MMC_DATA_READ) { 558 if (data->flags & MMC_DATA_READ) {
554 if (!host->chan_rx) 559 if (host->chan_rx && !host->force_pio)
555 disable_mmc_irqs(host, TMIO_MASK_READOP);
556 else
557 tmio_check_bounce_buffer(host); 560 tmio_check_bounce_buffer(host);
558 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 561 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
559 host->mrq); 562 host->mrq);
560 } else { 563 } else {
561 if (!host->chan_tx)
562 disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
563 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 564 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
564 host->mrq); 565 host->mrq);
565 } 566 }
@@ -583,7 +584,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
583 if (!data) 584 if (!data)
584 goto out; 585 goto out;
585 586
586 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 587 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
587 /* 588 /*
588 * Has all data been written out yet? Testing on SuperH showed, 589 * Has all data been written out yet? Testing on SuperH showed,
589 * that in most cases the first interrupt comes already with the 590 * that in most cases the first interrupt comes already with the
@@ -596,11 +597,12 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
596 disable_mmc_irqs(host, TMIO_STAT_DATAEND); 597 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
597 tasklet_schedule(&host->dma_complete); 598 tasklet_schedule(&host->dma_complete);
598 } 599 }
599 } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { 600 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
600 disable_mmc_irqs(host, TMIO_STAT_DATAEND); 601 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
601 tasklet_schedule(&host->dma_complete); 602 tasklet_schedule(&host->dma_complete);
602 } else { 603 } else {
603 tmio_mmc_do_data_irq(host); 604 tmio_mmc_do_data_irq(host);
605 disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
604 } 606 }
605out: 607out:
606 spin_unlock(&host->lock); 608 spin_unlock(&host->lock);
@@ -649,12 +651,12 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
649 */ 651 */
650 if (host->data && !cmd->error) { 652 if (host->data && !cmd->error) {
651 if (host->data->flags & MMC_DATA_READ) { 653 if (host->data->flags & MMC_DATA_READ) {
652 if (!host->chan_rx) 654 if (host->force_pio || !host->chan_rx)
653 enable_mmc_irqs(host, TMIO_MASK_READOP); 655 enable_mmc_irqs(host, TMIO_MASK_READOP);
654 else 656 else
655 tasklet_schedule(&host->dma_issue); 657 tasklet_schedule(&host->dma_issue);
656 } else { 658 } else {
657 if (!host->chan_tx) 659 if (host->force_pio || !host->chan_tx)
658 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 660 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
659 else 661 else
660 tasklet_schedule(&host->dma_issue); 662 tasklet_schedule(&host->dma_issue);
@@ -810,6 +812,11 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
810 goto pio; 812 goto pio;
811 } 813 }
812 814
815 if (sg->length < TMIO_MIN_DMA_LEN) {
816 host->force_pio = true;
817 return;
818 }
819
813 disable_mmc_irqs(host, TMIO_STAT_RXRDY); 820 disable_mmc_irqs(host, TMIO_STAT_RXRDY);
814 821
815 /* The only sg element can be unaligned, use our bounce buffer then */ 822 /* The only sg element can be unaligned, use our bounce buffer then */
@@ -878,6 +885,11 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
878 goto pio; 885 goto pio;
879 } 886 }
880 887
888 if (sg->length < TMIO_MIN_DMA_LEN) {
889 host->force_pio = true;
890 return;
891 }
892
881 disable_mmc_irqs(host, TMIO_STAT_TXRQ); 893 disable_mmc_irqs(host, TMIO_STAT_TXRQ);
882 894
883 /* The only sg element can be unaligned, use our bounce buffer then */ 895 /* The only sg element can be unaligned, use our bounce buffer then */
@@ -1119,6 +1131,7 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
1119 1131
1120fail: 1132fail:
1121 host->mrq = NULL; 1133 host->mrq = NULL;
1134 host->force_pio = false;
1122 mrq->cmd->error = ret; 1135 mrq->cmd->error = ret;
1123 mmc_request_done(mmc, mrq); 1136 mmc_request_done(mmc, mrq);
1124} 1137}