aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorBastian Hecht <hechtb@googlemail.com>2012-10-19 06:15:35 -0400
committerArtem Bityutskiy <artem.bityutskiy@linux.intel.com>2012-11-15 08:37:50 -0500
commit83738d87e3a0a4096e1419a65b8228130d183df6 (patch)
treec0a837ba214d479f4f1fa26476aef11d7da58751 /drivers/mtd
parente8a9d8f31c592eea89f1b0d3fd425e7a96944e88 (diff)
mtd: sh_flctl: Add DMA capabilty
The code probes if DMA channels can get allocated and tears them down at removal/failure if needed. If available it uses them to transfer the data part (not ECC). On failure we fall back to PIO mode. Based on Guennadi Liakhovetski's code from the sh_mmcif driver. Signed-off-by: Bastian Hecht <hechtb@gmail.com> Reviewed-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/nand/sh_flctl.c173
1 files changed, 171 insertions, 2 deletions
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 78d18c0f132f..6dc0369aa44b 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -23,11 +23,15 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/completion.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/dmaengine.h>
29#include <linux/dma-mapping.h>
27#include <linux/interrupt.h> 30#include <linux/interrupt.h>
28#include <linux/io.h> 31#include <linux/io.h>
29#include <linux/platform_device.h> 32#include <linux/platform_device.h>
30#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/sh_dma.h>
31#include <linux/slab.h> 35#include <linux/slab.h>
32#include <linux/string.h> 36#include <linux/string.h>
33 37
@@ -106,6 +110,84 @@ static void wait_completion(struct sh_flctl *flctl)
106 writeb(0x0, FLTRCR(flctl)); 110 writeb(0x0, FLTRCR(flctl));
107} 111}
108 112
113static void flctl_dma_complete(void *param)
114{
115 struct sh_flctl *flctl = param;
116
117 complete(&flctl->dma_complete);
118}
119
120static void flctl_release_dma(struct sh_flctl *flctl)
121{
122 if (flctl->chan_fifo0_rx) {
123 dma_release_channel(flctl->chan_fifo0_rx);
124 flctl->chan_fifo0_rx = NULL;
125 }
126 if (flctl->chan_fifo0_tx) {
127 dma_release_channel(flctl->chan_fifo0_tx);
128 flctl->chan_fifo0_tx = NULL;
129 }
130}
131
132static void flctl_setup_dma(struct sh_flctl *flctl)
133{
134 dma_cap_mask_t mask;
135 struct dma_slave_config cfg;
136 struct platform_device *pdev = flctl->pdev;
137 struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
138 int ret;
139
140 if (!pdata)
141 return;
142
143 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
144 return;
145
146 /* We can only either use DMA for both Tx and Rx or not use it at all */
147 dma_cap_zero(mask);
148 dma_cap_set(DMA_SLAVE, mask);
149
150 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
151 (void *)pdata->slave_id_fifo0_tx);
152 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
153 flctl->chan_fifo0_tx);
154
155 if (!flctl->chan_fifo0_tx)
156 return;
157
158 memset(&cfg, 0, sizeof(cfg));
159 cfg.slave_id = pdata->slave_id_fifo0_tx;
160 cfg.direction = DMA_MEM_TO_DEV;
161 cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
162 cfg.src_addr = 0;
163 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
164 if (ret < 0)
165 goto err;
166
167 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
168 (void *)pdata->slave_id_fifo0_rx);
169 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
170 flctl->chan_fifo0_rx);
171
172 if (!flctl->chan_fifo0_rx)
173 goto err;
174
175 cfg.slave_id = pdata->slave_id_fifo0_rx;
176 cfg.direction = DMA_DEV_TO_MEM;
177 cfg.dst_addr = 0;
178 cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
179 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
180 if (ret < 0)
181 goto err;
182
183 init_completion(&flctl->dma_complete);
184
185 return;
186
187err:
188 flctl_release_dma(flctl);
189}
190
109static void set_addr(struct mtd_info *mtd, int column, int page_addr) 191static void set_addr(struct mtd_info *mtd, int column, int page_addr)
110{ 192{
111 struct sh_flctl *flctl = mtd_to_flctl(mtd); 193 struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -261,6 +343,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
261 timeout_error(flctl, __func__); 343 timeout_error(flctl, __func__);
262} 344}
263 345
346static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
347 int len, enum dma_data_direction dir)
348{
349 struct dma_async_tx_descriptor *desc = NULL;
350 struct dma_chan *chan;
351 enum dma_transfer_direction tr_dir;
352 dma_addr_t dma_addr;
353 dma_cookie_t cookie = -EINVAL;
354 uint32_t reg;
355 int ret;
356
357 if (dir == DMA_FROM_DEVICE) {
358 chan = flctl->chan_fifo0_rx;
359 tr_dir = DMA_DEV_TO_MEM;
360 } else {
361 chan = flctl->chan_fifo0_tx;
362 tr_dir = DMA_MEM_TO_DEV;
363 }
364
365 dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
366
367 if (dma_addr)
368 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
369 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
370
371 if (desc) {
372 reg = readl(FLINTDMACR(flctl));
373 reg |= DREQ0EN;
374 writel(reg, FLINTDMACR(flctl));
375
376 desc->callback = flctl_dma_complete;
377 desc->callback_param = flctl;
378 cookie = dmaengine_submit(desc);
379
380 dma_async_issue_pending(chan);
381 } else {
382 /* DMA failed, fall back to PIO */
383 flctl_release_dma(flctl);
384 dev_warn(&flctl->pdev->dev,
385 "DMA failed, falling back to PIO\n");
386 ret = -EIO;
387 goto out;
388 }
389
390 ret =
391 wait_for_completion_timeout(&flctl->dma_complete,
392 msecs_to_jiffies(3000));
393
394 if (ret <= 0) {
395 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
396 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
397 }
398
399out:
400 reg = readl(FLINTDMACR(flctl));
401 reg &= ~DREQ0EN;
402 writel(reg, FLINTDMACR(flctl));
403
404 dma_unmap_single(chan->device->dev, dma_addr, len, dir);
405
406 /* ret > 0 is success */
407 return ret;
408}
409
264static void read_datareg(struct sh_flctl *flctl, int offset) 410static void read_datareg(struct sh_flctl *flctl, int offset)
265{ 411{
266 unsigned long data; 412 unsigned long data;
@@ -279,11 +425,20 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
279 425
280 len_4align = (rlen + 3) / 4; 426 len_4align = (rlen + 3) / 4;
281 427
428 /* initiate DMA transfer */
429 if (flctl->chan_fifo0_rx && rlen >= 32 &&
430 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
431 goto convert; /* DMA success */
432
433 /* do polling transfer */
282 for (i = 0; i < len_4align; i++) { 434 for (i = 0; i < len_4align; i++) {
283 wait_rfifo_ready(flctl); 435 wait_rfifo_ready(flctl);
284 buf[i] = readl(FLDTFIFO(flctl)); 436 buf[i] = readl(FLDTFIFO(flctl));
285 buf[i] = be32_to_cpu(buf[i]);
286 } 437 }
438
439convert:
440 for (i = 0; i < len_4align; i++)
441 buf[i] = be32_to_cpu(buf[i]);
287} 442}
288 443
289static enum flctl_ecc_res_t read_ecfiforeg 444static enum flctl_ecc_res_t read_ecfiforeg
@@ -325,9 +480,19 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
325 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 480 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
326 481
327 len_4align = (rlen + 3) / 4; 482 len_4align = (rlen + 3) / 4;
483
484 for (i = 0; i < len_4align; i++)
485 buf[i] = cpu_to_be32(buf[i]);
486
487 /* initiate DMA transfer */
488 if (flctl->chan_fifo0_tx && rlen >= 32 &&
489 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
490 return; /* DMA success */
491
492 /* do polling transfer */
328 for (i = 0; i < len_4align; i++) { 493 for (i = 0; i < len_4align; i++) {
329 wait_wecfifo_ready(flctl); 494 wait_wecfifo_ready(flctl);
330 writel(cpu_to_be32(buf[i]), FLECFIFO(flctl)); 495 writel(buf[i], FLECFIFO(flctl));
331 } 496 }
332} 497}
333 498
@@ -925,6 +1090,8 @@ static int __devinit flctl_probe(struct platform_device *pdev)
925 pm_runtime_enable(&pdev->dev); 1090 pm_runtime_enable(&pdev->dev);
926 pm_runtime_resume(&pdev->dev); 1091 pm_runtime_resume(&pdev->dev);
927 1092
1093 flctl_setup_dma(flctl);
1094
928 ret = nand_scan_ident(flctl_mtd, 1, NULL); 1095 ret = nand_scan_ident(flctl_mtd, 1, NULL);
929 if (ret) 1096 if (ret)
930 goto err_chip; 1097 goto err_chip;
@@ -942,6 +1109,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
942 return 0; 1109 return 0;
943 1110
944err_chip: 1111err_chip:
1112 flctl_release_dma(flctl);
945 pm_runtime_disable(&pdev->dev); 1113 pm_runtime_disable(&pdev->dev);
946 free_irq(irq, flctl); 1114 free_irq(irq, flctl);
947err_flste: 1115err_flste:
@@ -955,6 +1123,7 @@ static int __devexit flctl_remove(struct platform_device *pdev)
955{ 1123{
956 struct sh_flctl *flctl = platform_get_drvdata(pdev); 1124 struct sh_flctl *flctl = platform_get_drvdata(pdev);
957 1125
1126 flctl_release_dma(flctl);
958 nand_release(&flctl->mtd); 1127 nand_release(&flctl->mtd);
959 pm_runtime_disable(&pdev->dev); 1128 pm_runtime_disable(&pdev->dev);
960 free_irq(platform_get_irq(pdev, 0), flctl); 1129 free_irq(platform_get_irq(pdev, 0), flctl);