aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorApelete Seketeli <apelete@seketeli.net>2014-07-21 00:37:44 -0400
committerUlf Hansson <ulf.hansson@linaro.org>2014-09-09 07:58:59 -0400
commit7ca27a6f80a4042666a28977ff8ee3aa527c6cd4 (patch)
tree0a4ed5285c9c81216ad90c9c8b1b2784259c29ad
parentf629ba2c04c949aa62c85b48c0b73b915b98defc (diff)
mmc: jz4740: add dma infrastructure for data transfers
Until now the MMC driver for JZ4740 SoC was relying on PIO mode only for data transfers. This patch allows the use of DMA for data trasnfers in addition to PIO mode by relying on DMA Engine. DMA tranfers performance might be further improved by taking advantage of the asynchronous request capability of the MMC framework. Signed-off-by: Apelete Seketeli <apelete@seketeli.net> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
-rw-r--r--drivers/mmc/host/jz4740_mmc.c174
1 files changed, 166 insertions, 8 deletions
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 537d6c7a5ae4..049b13353917 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -30,7 +30,9 @@
30#include <asm/mach-jz4740/gpio.h> 30#include <asm/mach-jz4740/gpio.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
33 34
35#include <asm/mach-jz4740/dma.h>
34#include <asm/mach-jz4740/jz4740_mmc.h> 36#include <asm/mach-jz4740/jz4740_mmc.h>
35 37
36#define JZ_REG_MMC_STRPCL 0x00 38#define JZ_REG_MMC_STRPCL 0x00
@@ -122,6 +124,7 @@ struct jz4740_mmc_host {
122 int card_detect_irq; 124 int card_detect_irq;
123 125
124 void __iomem *base; 126 void __iomem *base;
127 struct resource *mem_res;
125 struct mmc_request *req; 128 struct mmc_request *req;
126 struct mmc_command *cmd; 129 struct mmc_command *cmd;
127 130
@@ -136,8 +139,138 @@ struct jz4740_mmc_host {
136 struct timer_list timeout_timer; 139 struct timer_list timeout_timer;
137 struct sg_mapping_iter miter; 140 struct sg_mapping_iter miter;
138 enum jz4740_mmc_state state; 141 enum jz4740_mmc_state state;
142
143 /* DMA support */
144 struct dma_chan *dma_rx;
145 struct dma_chan *dma_tx;
146 bool use_dma;
147 int sg_len;
148
149/* The DMA trigger level is 8 words, that is to say, the DMA read
150 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
151 * trigger is when data words in MSC_TXFIFO is < 8.
152 */
153#define JZ4740_MMC_FIFO_HALF_SIZE 8
139}; 154};
140 155
156/*----------------------------------------------------------------------------*/
157/* DMA infrastructure */
158
159static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
160{
161 if (!host->use_dma)
162 return;
163
164 dma_release_channel(host->dma_tx);
165 dma_release_channel(host->dma_rx);
166}
167
168static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
169{
170 dma_cap_mask_t mask;
171
172 dma_cap_zero(mask);
173 dma_cap_set(DMA_SLAVE, mask);
174
175 host->dma_tx = dma_request_channel(mask, NULL, host);
176 if (!host->dma_tx) {
177 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
178 return -ENODEV;
179 }
180
181 host->dma_rx = dma_request_channel(mask, NULL, host);
182 if (!host->dma_rx) {
183 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
184 goto free_master_write;
185 }
186
187 return 0;
188
189free_master_write:
190 dma_release_channel(host->dma_tx);
191 return -ENODEV;
192}
193
194static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
195{
196 return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
197}
198
199static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
200 struct mmc_data *data)
201{
202 struct dma_chan *chan;
203 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
204
205 if (dir == DMA_TO_DEVICE)
206 chan = host->dma_tx;
207 else
208 chan = host->dma_rx;
209
210 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
211}
212
213static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
214 struct mmc_data *data)
215{
216 struct dma_chan *chan;
217 struct dma_async_tx_descriptor *desc;
218 struct dma_slave_config conf = {
219 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
220 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
221 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
222 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
223 };
224 enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
225
226 if (dir == DMA_TO_DEVICE) {
227 conf.direction = DMA_MEM_TO_DEV;
228 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
229 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
230 chan = host->dma_tx;
231 } else {
232 conf.direction = DMA_DEV_TO_MEM;
233 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
234 conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
235 chan = host->dma_rx;
236 }
237
238 host->sg_len = dma_map_sg(chan->device->dev,
239 data->sg,
240 data->sg_len,
241 dir);
242
243 if (host->sg_len == 0) {
244 dev_err(mmc_dev(host->mmc),
245 "Failed to map scatterlist for DMA operation\n");
246 return -EINVAL;
247 }
248
249 dmaengine_slave_config(chan, &conf);
250 desc = dmaengine_prep_slave_sg(chan,
251 data->sg,
252 host->sg_len,
253 conf.direction,
254 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
255 if (!desc) {
256 dev_err(mmc_dev(host->mmc),
257 "Failed to allocate DMA %s descriptor",
258 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
259 goto dma_unmap;
260 }
261
262 dmaengine_submit(desc);
263 dma_async_issue_pending(chan);
264
265 return 0;
266
267dma_unmap:
268 jz4740_mmc_dma_unmap(host, data);
269 return -ENOMEM;
270}
271
272/*----------------------------------------------------------------------------*/
273
141static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, 274static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
142 unsigned int irq, bool enabled) 275 unsigned int irq, bool enabled)
143{ 276{
@@ -442,6 +575,8 @@ static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
442 cmdat |= JZ_MMC_CMDAT_WRITE; 575 cmdat |= JZ_MMC_CMDAT_WRITE;
443 if (cmd->data->flags & MMC_DATA_STREAM) 576 if (cmd->data->flags & MMC_DATA_STREAM)
444 cmdat |= JZ_MMC_CMDAT_STREAM; 577 cmdat |= JZ_MMC_CMDAT_STREAM;
578 if (host->use_dma)
579 cmdat |= JZ_MMC_CMDAT_DMA_EN;
445 580
446 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); 581 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
447 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); 582 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
@@ -474,6 +609,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
474 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; 609 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
475 struct mmc_command *cmd = host->req->cmd; 610 struct mmc_command *cmd = host->req->cmd;
476 struct mmc_request *req = host->req; 611 struct mmc_request *req = host->req;
612 struct mmc_data *data = cmd->data;
477 bool timeout = false; 613 bool timeout = false;
478 614
479 if (cmd->error) 615 if (cmd->error)
@@ -484,23 +620,32 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
484 if (cmd->flags & MMC_RSP_PRESENT) 620 if (cmd->flags & MMC_RSP_PRESENT)
485 jz4740_mmc_read_response(host, cmd); 621 jz4740_mmc_read_response(host, cmd);
486 622
487 if (!cmd->data) 623 if (!data)
488 break; 624 break;
489 625
490 jz_mmc_prepare_data_transfer(host); 626 jz_mmc_prepare_data_transfer(host);
491 627
492 case JZ4740_MMC_STATE_TRANSFER_DATA: 628 case JZ4740_MMC_STATE_TRANSFER_DATA:
493 if (cmd->data->flags & MMC_DATA_READ) 629 if (host->use_dma) {
494 timeout = jz4740_mmc_read_data(host, cmd->data); 630 /* Use DMA if enabled, data transfer direction was
631 * defined before in jz_mmc_prepare_data_transfer().
632 */
633 timeout = jz4740_mmc_start_dma_transfer(host, data);
634 data->bytes_xfered = data->blocks * data->blksz;
635 } else if (data->flags & MMC_DATA_READ)
636 /* If DMA is not enabled, rely on data flags
637 * to establish data transfer direction.
638 */
639 timeout = jz4740_mmc_read_data(host, data);
495 else 640 else
496 timeout = jz4740_mmc_write_data(host, cmd->data); 641 timeout = jz4740_mmc_write_data(host, data);
497 642
498 if (unlikely(timeout)) { 643 if (unlikely(timeout)) {
499 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; 644 host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
500 break; 645 break;
501 } 646 }
502 647
503 jz4740_mmc_transfer_check_state(host, cmd->data); 648 jz4740_mmc_transfer_check_state(host, data);
504 649
505 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 650 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
506 if (unlikely(timeout)) { 651 if (unlikely(timeout)) {
@@ -757,7 +902,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
757 struct mmc_host *mmc; 902 struct mmc_host *mmc;
758 struct jz4740_mmc_host *host; 903 struct jz4740_mmc_host *host;
759 struct jz4740_mmc_platform_data *pdata; 904 struct jz4740_mmc_platform_data *pdata;
760 struct resource *res;
761 905
762 pdata = pdev->dev.platform_data; 906 pdata = pdev->dev.platform_data;
763 907
@@ -784,10 +928,11 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
784 goto err_free_host; 928 goto err_free_host;
785 } 929 }
786 930
787 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 931 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
788 host->base = devm_ioremap_resource(&pdev->dev, res); 932 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
789 if (IS_ERR(host->base)) { 933 if (IS_ERR(host->base)) {
790 ret = PTR_ERR(host->base); 934 ret = PTR_ERR(host->base);
935 dev_err(&pdev->dev, "Failed to ioremap base memory\n");
791 goto err_free_host; 936 goto err_free_host;
792 } 937 }
793 938
@@ -834,6 +979,10 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
834 /* It is not important when it times out, it just needs to timeout. */ 979 /* It is not important when it times out, it just needs to timeout. */
835 set_timer_slack(&host->timeout_timer, HZ); 980 set_timer_slack(&host->timeout_timer, HZ);
836 981
982 host->use_dma = true;
983 if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
984 host->use_dma = false;
985
837 platform_set_drvdata(pdev, host); 986 platform_set_drvdata(pdev, host);
838 ret = mmc_add_host(mmc); 987 ret = mmc_add_host(mmc);
839 988
@@ -843,6 +992,10 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
843 } 992 }
844 dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n"); 993 dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
845 994
995 dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
996 host->use_dma ? "DMA" : "PIO",
997 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
998
846 return 0; 999 return 0;
847 1000
848err_free_irq: 1001err_free_irq:
@@ -850,6 +1003,8 @@ err_free_irq:
850err_free_gpios: 1003err_free_gpios:
851 jz4740_mmc_free_gpios(pdev); 1004 jz4740_mmc_free_gpios(pdev);
852err_gpio_bulk_free: 1005err_gpio_bulk_free:
1006 if (host->use_dma)
1007 jz4740_mmc_release_dma_channels(host);
853 jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); 1008 jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
854err_free_host: 1009err_free_host:
855 mmc_free_host(mmc); 1010 mmc_free_host(mmc);
@@ -872,6 +1027,9 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
872 jz4740_mmc_free_gpios(pdev); 1027 jz4740_mmc_free_gpios(pdev);
873 jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); 1028 jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
874 1029
1030 if (host->use_dma)
1031 jz4740_mmc_release_dma_channels(host);
1032
875 mmc_free_host(host->mmc); 1033 mmc_free_host(host->mmc);
876 1034
877 return 0; 1035 return 0;