aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 19:41:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 19:41:07 -0400
commita6dc77254b3c3eb0307b372b77b861d5cd2ead08 (patch)
tree5770a808b0527eebeff43f16508ea8f03e459b58 /drivers/mtd
parent02a6ec6a24077ffda33b99cb193e8a536b90711d (diff)
parent0e52d987c0b242fe3fe4c8e9732bd663cce0e50b (diff)
Merge branch 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM DMA engine updates from Russell King: "This looks scary at first glance, but what it is is: - a rework of the sa11x0 DMA engine driver merged during the previous cycle, to extract a common set of helper functions for DMA engine implementations. - conversion of amba-pl08x.c to use these helper functions. - addition of OMAP DMA engine driver (using these helper functions), and conversion of some of the OMAP DMA users to use DMA engine. Nothing in the helper functions is ARM specific, so I hope that other implementations can consolidate some of their code by making use of these helpers. This has been sitting in linux-next most of the merge cycle, and has been tested by several OMAP folk. I've tested it on sa11x0 platforms, and given it my best shot on my broken platforms which have the amba-pl08x controller. The last point is the addition to feature-removal-schedule.txt, which will have a merge conflict. Between myself and TI, we're planning to remove the old TI DMA implementation next year." Fix up trivial add/add conflicts in Documentation/feature-removal-schedule.txt and drivers/dma/{Kconfig,Makefile} * 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm: (53 commits) ARM: 7481/1: OMAP2+: omap2plus_defconfig: enable OMAP DMA engine ARM: 7464/1: mmc: omap_hsmmc: ensure probe returns error if DMA channel request fails Add feature removal of old OMAP private DMA implementation mtd: omap2: remove private DMA API implementation mtd: omap2: add DMA engine support spi: omap2-mcspi: remove private DMA API implementation spi: omap2-mcspi: add DMA engine support ARM: omap: remove mmc platform data dma_mask and initialization mmc: omap: remove private DMA API implementation mmc: omap: add DMA engine support mmc: omap_hsmmc: remove private DMA API implementation mmc: omap_hsmmc: add DMA engine support dmaengine: omap: add support for cyclic DMA dmaengine: omap: add support for setting fi dmaengine: omap: add support for returning residue in tx_state method dmaengine: add OMAP DMA engine driver dmaengine: sa11x0-dma: add cyclic DMA support dmaengine: sa11x0-dma: fix DMA residue support dmaengine: PL08x: ensure all descriptors are freed when channel is released dmaengine: PL08x: get rid of write only pool_ctr and free_txd locking ...
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/nand/omap2.c106
1 files changed, 56 insertions, 50 deletions
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d0c9b9..e9309b3659e7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -18,6 +19,7 @@
18#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/omap-dma.h>
21#include <linux/io.h> 23#include <linux/io.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
23 25
@@ -123,7 +125,7 @@ struct omap_nand_info {
123 int gpmc_cs; 125 int gpmc_cs;
124 unsigned long phys_base; 126 unsigned long phys_base;
125 struct completion comp; 127 struct completion comp;
126 int dma_ch; 128 struct dma_chan *dma;
127 int gpmc_irq; 129 int gpmc_irq;
128 enum { 130 enum {
129 OMAP_NAND_IO_READ = 0, /* read */ 131 OMAP_NAND_IO_READ = 0, /* read */
@@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
336} 338}
337 339
338/* 340/*
339 * omap_nand_dma_cb: callback on the completion of dma transfer 341 * omap_nand_dma_callback: callback on the completion of dma transfer
340 * @lch: logical channel
341 * @ch_satuts: channel status
342 * @data: pointer to completion data structure 342 * @data: pointer to completion data structure
343 */ 343 */
344static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) 344static void omap_nand_dma_callback(void *data)
345{ 345{
346 complete((struct completion *) data); 346 complete((struct completion *) data);
347} 347}
@@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
358{ 358{
359 struct omap_nand_info *info = container_of(mtd, 359 struct omap_nand_info *info = container_of(mtd,
360 struct omap_nand_info, mtd); 360 struct omap_nand_info, mtd);
361 struct dma_async_tx_descriptor *tx;
361 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 362 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
362 DMA_FROM_DEVICE; 363 DMA_FROM_DEVICE;
363 dma_addr_t dma_addr; 364 struct scatterlist sg;
364 int ret;
365 unsigned long tim, limit; 365 unsigned long tim, limit;
366 366 unsigned n;
367 /* The fifo depth is 64 bytes max. 367 int ret;
368 * But configure the FIFO-threahold to 32 to get a sync at each frame
369 * and frame length is 32 bytes.
370 */
371 int buf_len = len >> 6;
372 368
373 if (addr >= high_memory) { 369 if (addr >= high_memory) {
374 struct page *p1; 370 struct page *p1;
@@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
382 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 378 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
383 } 379 }
384 380
385 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); 381 sg_init_one(&sg, addr, len);
386 if (dma_mapping_error(&info->pdev->dev, dma_addr)) { 382 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
383 if (n == 0) {
387 dev_err(&info->pdev->dev, 384 dev_err(&info->pdev->dev,
388 "Couldn't DMA map a %d byte buffer\n", len); 385 "Couldn't DMA map a %d byte buffer\n", len);
389 goto out_copy; 386 goto out_copy;
390 } 387 }
391 388
392 if (is_write) { 389 tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
393 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 390 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
394 info->phys_base, 0, 0); 391 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
395 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 392 if (!tx)
396 dma_addr, 0, 0); 393 goto out_copy_unmap;
397 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 394
398 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 395 tx->callback = omap_nand_dma_callback;
399 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); 396 tx->callback_param = &info->comp;
400 } else { 397 dmaengine_submit(tx);
401 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 398
402 info->phys_base, 0, 0); 399 /* configure and start prefetch transfer */
403 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
404 dma_addr, 0, 0);
405 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
406 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
407 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
408 }
409 /* configure and start prefetch transfer */
410 ret = gpmc_prefetch_enable(info->gpmc_cs, 400 ret = gpmc_prefetch_enable(info->gpmc_cs,
411 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); 401 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
412 if (ret) 402 if (ret)
413 /* PFPW engine is busy, use cpu copy method */ 403 /* PFPW engine is busy, use cpu copy method */
414 goto out_copy_unmap; 404 goto out_copy_unmap;
415 405
416 init_completion(&info->comp); 406 init_completion(&info->comp);
417 407 dma_async_issue_pending(info->dma);
418 omap_start_dma(info->dma_ch);
419 408
420 /* setup and start DMA using dma_addr */ 409 /* setup and start DMA using dma_addr */
421 wait_for_completion(&info->comp); 410 wait_for_completion(&info->comp);
@@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
427 /* disable and stop the PFPW engine */ 416 /* disable and stop the PFPW engine */
428 gpmc_prefetch_reset(info->gpmc_cs); 417 gpmc_prefetch_reset(info->gpmc_cs);
429 418
430 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 419 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
431 return 0; 420 return 0;
432 421
433out_copy_unmap: 422out_copy_unmap:
434 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 423 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
435out_copy: 424out_copy:
436 if (info->nand.options & NAND_BUSWIDTH_16) 425 if (info->nand.options & NAND_BUSWIDTH_16)
437 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 426 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1164 struct omap_nand_platform_data *pdata; 1153 struct omap_nand_platform_data *pdata;
1165 int err; 1154 int err;
1166 int i, offset; 1155 int i, offset;
1156 dma_cap_mask_t mask;
1157 unsigned sig;
1167 1158
1168 pdata = pdev->dev.platform_data; 1159 pdata = pdev->dev.platform_data;
1169 if (pdata == NULL) { 1160 if (pdata == NULL) {
@@ -1244,18 +1235,31 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1244 break; 1235 break;
1245 1236
1246 case NAND_OMAP_PREFETCH_DMA: 1237 case NAND_OMAP_PREFETCH_DMA:
1247 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1238 dma_cap_zero(mask);
1248 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1239 dma_cap_set(DMA_SLAVE, mask);
1249 if (err < 0) { 1240 sig = OMAP24XX_DMA_GPMC;
1250 info->dma_ch = -1; 1241 info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1251 dev_err(&pdev->dev, "DMA request failed!\n"); 1242 if (!info->dma) {
1243 dev_err(&pdev->dev, "DMA engine request failed\n");
1244 err = -ENXIO;
1252 goto out_release_mem_region; 1245 goto out_release_mem_region;
1253 } else { 1246 } else {
1254 omap_set_dma_dest_burst_mode(info->dma_ch, 1247 struct dma_slave_config cfg;
1255 OMAP_DMA_DATA_BURST_16); 1248 int rc;
1256 omap_set_dma_src_burst_mode(info->dma_ch, 1249
1257 OMAP_DMA_DATA_BURST_16); 1250 memset(&cfg, 0, sizeof(cfg));
1258 1251 cfg.src_addr = info->phys_base;
1252 cfg.dst_addr = info->phys_base;
1253 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1254 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1255 cfg.src_maxburst = 16;
1256 cfg.dst_maxburst = 16;
1257 rc = dmaengine_slave_config(info->dma, &cfg);
1258 if (rc) {
1259 dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
1260 rc);
1261 goto out_release_mem_region;
1262 }
1259 info->nand.read_buf = omap_read_buf_dma_pref; 1263 info->nand.read_buf = omap_read_buf_dma_pref;
1260 info->nand.write_buf = omap_write_buf_dma_pref; 1264 info->nand.write_buf = omap_write_buf_dma_pref;
1261 } 1265 }
@@ -1358,6 +1362,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1358 return 0; 1362 return 0;
1359 1363
1360out_release_mem_region: 1364out_release_mem_region:
1365 if (info->dma)
1366 dma_release_channel(info->dma);
1361 release_mem_region(info->phys_base, NAND_IO_SIZE); 1367 release_mem_region(info->phys_base, NAND_IO_SIZE);
1362out_free_info: 1368out_free_info:
1363 kfree(info); 1369 kfree(info);
@@ -1373,8 +1379,8 @@ static int omap_nand_remove(struct platform_device *pdev)
1373 omap3_free_bch(&info->mtd); 1379 omap3_free_bch(&info->mtd);
1374 1380
1375 platform_set_drvdata(pdev, NULL); 1381 platform_set_drvdata(pdev, NULL);
1376 if (info->dma_ch != -1) 1382 if (info->dma)
1377 omap_free_dma(info->dma_ch); 1383 dma_release_channel(info->dma);
1378 1384
1379 if (info->gpmc_irq) 1385 if (info->gpmc_irq)
1380 free_irq(info->gpmc_irq, info); 1386 free_irq(info->gpmc_irq, info);