diff options
author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2010-12-22 06:02:15 -0500 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2011-01-08 23:52:29 -0500 |
commit | 93173054f2979de41b1912b19f0b57edfb35fcdc (patch) | |
tree | 14c2872048dde7e15d386f1211836c86b1b6c69f /drivers/mmc/host | |
parent | e0bc6ff8b8d5c066d978d23e690d5599db4cb2b3 (diff) |
mmc: tmio_mmc: implement a bounce buffer for unaligned DMA
For example, with SDIO WLAN cards, some transfers happen with buffers at
odd addresses, whereas the SH-Mobile DMA engine requires even addresses
for SDHI. This patch extends the tmio driver with a bounce buffer, that
is used for single entry scatter-gather lists both for sending and
receiving. If we ever encounter unaligned transfers with multi-element
sg lists, this patch will have to be extended. For now it just falls
back to PIO in this and other unsupported cases.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Samuel Ortiz <sameo@linux.intel.com>
Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r-- | drivers/mmc/host/tmio_mmc.c | 89 |
1 files changed, 83 insertions, 6 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index e04c032abb1c..595b7b3f160d 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -111,6 +111,8 @@ | |||
111 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ | 111 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ |
112 | } while (0) | 112 | } while (0) |
113 | 113 | ||
114 | /* This is arbitrary, just noone needed any higher alignment yet */ | ||
115 | #define MAX_ALIGN 4 | ||
114 | 116 | ||
115 | struct tmio_mmc_host { | 117 | struct tmio_mmc_host { |
116 | void __iomem *ctl; | 118 | void __iomem *ctl; |
@@ -127,6 +129,7 @@ struct tmio_mmc_host { | |||
127 | 129 | ||
128 | /* pio related stuff */ | 130 | /* pio related stuff */ |
129 | struct scatterlist *sg_ptr; | 131 | struct scatterlist *sg_ptr; |
132 | struct scatterlist *sg_orig; | ||
130 | unsigned int sg_len; | 133 | unsigned int sg_len; |
131 | unsigned int sg_off; | 134 | unsigned int sg_off; |
132 | 135 | ||
@@ -139,9 +142,13 @@ struct tmio_mmc_host { | |||
139 | struct tasklet_struct dma_issue; | 142 | struct tasklet_struct dma_issue; |
140 | #ifdef CONFIG_TMIO_MMC_DMA | 143 | #ifdef CONFIG_TMIO_MMC_DMA |
141 | unsigned int dma_sglen; | 144 | unsigned int dma_sglen; |
145 | u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); | ||
146 | struct scatterlist bounce_sg; | ||
142 | #endif | 147 | #endif |
143 | }; | 148 | }; |
144 | 149 | ||
150 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); | ||
151 | |||
145 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | 152 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) |
146 | { | 153 | { |
147 | return readw(host->ctl + (addr << host->bus_shift)); | 154 | return readw(host->ctl + (addr << host->bus_shift)); |
@@ -180,6 +187,7 @@ static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | |||
180 | { | 187 | { |
181 | host->sg_len = data->sg_len; | 188 | host->sg_len = data->sg_len; |
182 | host->sg_ptr = data->sg; | 189 | host->sg_ptr = data->sg; |
190 | host->sg_orig = data->sg; | ||
183 | host->sg_off = 0; | 191 | host->sg_off = 0; |
184 | } | 192 | } |
185 | 193 | ||
@@ -438,6 +446,8 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | |||
438 | if (data->flags & MMC_DATA_READ) { | 446 | if (data->flags & MMC_DATA_READ) { |
439 | if (!host->chan_rx) | 447 | if (!host->chan_rx) |
440 | disable_mmc_irqs(host, TMIO_MASK_READOP); | 448 | disable_mmc_irqs(host, TMIO_MASK_READOP); |
449 | else | ||
450 | tmio_check_bounce_buffer(host); | ||
441 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | 451 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", |
442 | host->mrq); | 452 | host->mrq); |
443 | } else { | 453 | } else { |
@@ -529,8 +539,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | |||
529 | if (!host->chan_rx) | 539 | if (!host->chan_rx) |
530 | enable_mmc_irqs(host, TMIO_MASK_READOP); | 540 | enable_mmc_irqs(host, TMIO_MASK_READOP); |
531 | } else { | 541 | } else { |
532 | struct dma_chan *chan = host->chan_tx; | 542 | if (!host->chan_tx) |
533 | if (!chan) | ||
534 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | 543 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); |
535 | else | 544 | else |
536 | tasklet_schedule(&host->dma_issue); | 545 | tasklet_schedule(&host->dma_issue); |
@@ -612,6 +621,16 @@ out: | |||
612 | } | 621 | } |
613 | 622 | ||
614 | #ifdef CONFIG_TMIO_MMC_DMA | 623 | #ifdef CONFIG_TMIO_MMC_DMA |
624 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
625 | { | ||
626 | if (host->sg_ptr == &host->bounce_sg) { | ||
627 | unsigned long flags; | ||
628 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
629 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
630 | tmio_mmc_kunmap_atomic(sg_vaddr, &flags); | ||
631 | } | ||
632 | } | ||
633 | |||
615 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | 634 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) |
616 | { | 635 | { |
617 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | 636 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) |
@@ -634,11 +653,35 @@ static void tmio_dma_complete(void *arg) | |||
634 | 653 | ||
635 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | 654 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) |
636 | { | 655 | { |
637 | struct scatterlist *sg = host->sg_ptr; | 656 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; |
638 | struct dma_async_tx_descriptor *desc = NULL; | 657 | struct dma_async_tx_descriptor *desc = NULL; |
639 | struct dma_chan *chan = host->chan_rx; | 658 | struct dma_chan *chan = host->chan_rx; |
659 | struct mfd_cell *cell = host->pdev->dev.platform_data; | ||
660 | struct tmio_mmc_data *pdata = cell->driver_data; | ||
640 | dma_cookie_t cookie; | 661 | dma_cookie_t cookie; |
641 | int ret; | 662 | int ret, i; |
663 | bool aligned = true, multiple = true; | ||
664 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
665 | |||
666 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
667 | if (sg_tmp->offset & align) | ||
668 | aligned = false; | ||
669 | if (sg_tmp->length & align) { | ||
670 | multiple = false; | ||
671 | break; | ||
672 | } | ||
673 | } | ||
674 | |||
675 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
676 | align >= MAX_ALIGN)) || !multiple) | ||
677 | goto pio; | ||
678 | |||
679 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
680 | if (!aligned) { | ||
681 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
682 | host->sg_ptr = &host->bounce_sg; | ||
683 | sg = host->sg_ptr; | ||
684 | } | ||
642 | 685 | ||
643 | ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); | 686 | ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); |
644 | if (ret > 0) { | 687 | if (ret > 0) { |
@@ -661,6 +704,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |||
661 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | 704 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", |
662 | __func__, host->sg_len, ret, cookie, host->mrq); | 705 | __func__, host->sg_len, ret, cookie, host->mrq); |
663 | 706 | ||
707 | pio: | ||
664 | if (!desc) { | 708 | if (!desc) { |
665 | /* DMA failed, fall back to PIO */ | 709 | /* DMA failed, fall back to PIO */ |
666 | if (ret >= 0) | 710 | if (ret >= 0) |
@@ -684,11 +728,39 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |||
684 | 728 | ||
685 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | 729 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) |
686 | { | 730 | { |
687 | struct scatterlist *sg = host->sg_ptr; | 731 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; |
688 | struct dma_async_tx_descriptor *desc = NULL; | 732 | struct dma_async_tx_descriptor *desc = NULL; |
689 | struct dma_chan *chan = host->chan_tx; | 733 | struct dma_chan *chan = host->chan_tx; |
734 | struct mfd_cell *cell = host->pdev->dev.platform_data; | ||
735 | struct tmio_mmc_data *pdata = cell->driver_data; | ||
690 | dma_cookie_t cookie; | 736 | dma_cookie_t cookie; |
691 | int ret; | 737 | int ret, i; |
738 | bool aligned = true, multiple = true; | ||
739 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
740 | |||
741 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
742 | if (sg_tmp->offset & align) | ||
743 | aligned = false; | ||
744 | if (sg_tmp->length & align) { | ||
745 | multiple = false; | ||
746 | break; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
751 | align >= MAX_ALIGN)) || !multiple) | ||
752 | goto pio; | ||
753 | |||
754 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
755 | if (!aligned) { | ||
756 | unsigned long flags; | ||
757 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
758 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
759 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
760 | tmio_mmc_kunmap_atomic(sg_vaddr, &flags); | ||
761 | host->sg_ptr = &host->bounce_sg; | ||
762 | sg = host->sg_ptr; | ||
763 | } | ||
692 | 764 | ||
693 | ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); | 765 | ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); |
694 | if (ret > 0) { | 766 | if (ret > 0) { |
@@ -709,6 +781,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |||
709 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | 781 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", |
710 | __func__, host->sg_len, ret, cookie, host->mrq); | 782 | __func__, host->sg_len, ret, cookie, host->mrq); |
711 | 783 | ||
784 | pio: | ||
712 | if (!desc) { | 785 | if (!desc) { |
713 | /* DMA failed, fall back to PIO */ | 786 | /* DMA failed, fall back to PIO */ |
714 | if (ret >= 0) | 787 | if (ret >= 0) |
@@ -822,6 +895,10 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | |||
822 | } | 895 | } |
823 | } | 896 | } |
824 | #else | 897 | #else |
898 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
899 | { | ||
900 | } | ||
901 | |||
825 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | 902 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, |
826 | struct mmc_data *data) | 903 | struct mmc_data *data) |
827 | { | 904 | { |