aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorVipin Kumar <vipin.kumar@st.com>2012-03-14 02:17:18 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-03-26 20:00:24 -0400
commit4774fb0a48aacfec206e6d54ecf58706f6a5320a (patch)
tree073282f1f2514cb524019042fc081c62f5daec8c /drivers/mtd
parent604e75444fa82cfdcba339e3bd4da1dfd6947539 (diff)
mtd: nand/fsmc: Add DMA support
The fsmc_nand driver uses cpu to read/write onto the device. This is inefficient because of two reasons - the cpu gets locked on AHB bus while reading from NAND - the cpu is unnecessarily used when dma can do the job This patch adds the support for accessing the device through DMA Signed-off-by: Vipin Kumar <vipin.kumar@st.com> Reviewed-by: Viresh Kumar <viresh.kumar@st.com> Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/nand/fsmc_nand.c168
1 files changed, 163 insertions, 5 deletions
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 81fc8e6b8cb8..d20a0c63251e 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -17,6 +17,10 @@
17 */ 17 */
18 18
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/dmaengine.h>
22#include <linux/dma-direction.h>
23#include <linux/dma-mapping.h>
20#include <linux/err.h> 24#include <linux/err.h>
21#include <linux/init.h> 25#include <linux/init.h>
22#include <linux/module.h> 26#include <linux/module.h>
@@ -282,6 +286,11 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
282 * @bank: Bank number for probed device. 286 * @bank: Bank number for probed device.
283 * @clk: Clock structure for FSMC. 287 * @clk: Clock structure for FSMC.
284 * 288 *
289 * @read_dma_chan: DMA channel for read access
290 * @write_dma_chan: DMA channel for write access to NAND
291 * @dma_access_complete: Completion structure
292 *
293 * @data_pa: NAND Physical port for Data.
285 * @data_va: NAND port for Data. 294 * @data_va: NAND port for Data.
286 * @cmd_va: NAND port for Command. 295 * @cmd_va: NAND port for Command.
287 * @addr_va: NAND port for Address. 296 * @addr_va: NAND port for Address.
@@ -297,10 +306,17 @@ struct fsmc_nand_data {
297 struct fsmc_eccplace *ecc_place; 306 struct fsmc_eccplace *ecc_place;
298 unsigned int bank; 307 unsigned int bank;
299 struct device *dev; 308 struct device *dev;
309 enum access_mode mode;
300 struct clk *clk; 310 struct clk *clk;
301 311
312 /* DMA related objects */
313 struct dma_chan *read_dma_chan;
314 struct dma_chan *write_dma_chan;
315 struct completion dma_access_complete;
316
302 struct fsmc_nand_timings *dev_timings; 317 struct fsmc_nand_timings *dev_timings;
303 318
319 dma_addr_t data_pa;
304 void __iomem *data_va; 320 void __iomem *data_va;
305 void __iomem *cmd_va; 321 void __iomem *cmd_va;
306 void __iomem *addr_va; 322 void __iomem *addr_va;
@@ -523,6 +539,77 @@ static int count_written_bits(uint8_t *buff, int size, int max_bits)
523 return written_bits; 539 return written_bits;
524} 540}
525 541
542static void dma_complete(void *param)
543{
544 struct fsmc_nand_data *host = param;
545
546 complete(&host->dma_access_complete);
547}
548
549static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
550 enum dma_data_direction direction)
551{
552 struct dma_chan *chan;
553 struct dma_device *dma_dev;
554 struct dma_async_tx_descriptor *tx;
555 dma_addr_t dma_dst, dma_src, dma_addr;
556 dma_cookie_t cookie;
557 unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
558 int ret;
559
560 if (direction == DMA_TO_DEVICE)
561 chan = host->write_dma_chan;
562 else if (direction == DMA_FROM_DEVICE)
563 chan = host->read_dma_chan;
564 else
565 return -EINVAL;
566
567 dma_dev = chan->device;
568 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
569
570 if (direction == DMA_TO_DEVICE) {
571 dma_src = dma_addr;
572 dma_dst = host->data_pa;
573 flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
574 } else {
575 dma_src = host->data_pa;
576 dma_dst = dma_addr;
577 flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
578 }
579
580 tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
581 len, flags);
582
583 if (!tx) {
584 dev_err(host->dev, "device_prep_dma_memcpy error\n");
585 dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
586 return -EIO;
587 }
588
589 tx->callback = dma_complete;
590 tx->callback_param = host;
591 cookie = tx->tx_submit(tx);
592
593 ret = dma_submit_error(cookie);
594 if (ret) {
595 dev_err(host->dev, "dma_submit_error %d\n", cookie);
596 return ret;
597 }
598
599 dma_async_issue_pending(chan);
600
601 ret =
602 wait_for_completion_interruptible_timeout(&host->dma_access_complete,
603 msecs_to_jiffies(3000));
604 if (ret <= 0) {
605 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
606 dev_err(host->dev, "wait_for_completion_timeout\n");
607 return ret ? ret : -ETIMEDOUT;
608 }
609
610 return 0;
611}
612
526/* 613/*
527 * fsmc_write_buf - write buffer to chip 614 * fsmc_write_buf - write buffer to chip
528 * @mtd: MTD device structure 615 * @mtd: MTD device structure
@@ -570,6 +657,35 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
570} 657}
571 658
572/* 659/*
660 * fsmc_read_buf_dma - read chip data into buffer
661 * @mtd: MTD device structure
662 * @buf: buffer to store date
663 * @len: number of bytes to read
664 */
665static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
666{
667 struct fsmc_nand_data *host;
668
669 host = container_of(mtd, struct fsmc_nand_data, mtd);
670 dma_xfer(host, buf, len, DMA_FROM_DEVICE);
671}
672
673/*
674 * fsmc_write_buf_dma - write buffer to chip
675 * @mtd: MTD device structure
676 * @buf: data buffer
677 * @len: number of bytes to write
678 */
679static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
680 int len)
681{
682 struct fsmc_nand_data *host;
683
684 host = container_of(mtd, struct fsmc_nand_data, mtd);
685 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
686}
687
688/*
573 * fsmc_read_page_hwecc 689 * fsmc_read_page_hwecc
574 * @mtd: mtd info structure 690 * @mtd: mtd info structure
575 * @chip: nand chip info structure 691 * @chip: nand chip info structure
@@ -731,6 +847,12 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
731 return i; 847 return i;
732} 848}
733 849
850static bool filter(struct dma_chan *chan, void *slave)
851{
852 chan->private = slave;
853 return true;
854}
855
734/* 856/*
735 * fsmc_nand_probe - Probe function 857 * fsmc_nand_probe - Probe function
736 * @pdev: platform device structure 858 * @pdev: platform device structure
@@ -743,6 +865,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
743 struct nand_chip *nand; 865 struct nand_chip *nand;
744 struct fsmc_regs *regs; 866 struct fsmc_regs *regs;
745 struct resource *res; 867 struct resource *res;
868 dma_cap_mask_t mask;
746 int ret = 0; 869 int ret = 0;
747 u32 pid; 870 u32 pid;
748 int i; 871 int i;
@@ -769,6 +892,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
769 return -ENOENT; 892 return -ENOENT;
770 } 893 }
771 894
895 host->data_pa = (dma_addr_t)res->start;
772 host->data_va = devm_ioremap(&pdev->dev, res->start, 896 host->data_va = devm_ioremap(&pdev->dev, res->start,
773 resource_size(res)); 897 resource_size(res));
774 if (!host->data_va) { 898 if (!host->data_va) {
@@ -847,6 +971,11 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
847 host->nr_partitions = pdata->nr_partitions; 971 host->nr_partitions = pdata->nr_partitions;
848 host->dev = &pdev->dev; 972 host->dev = &pdev->dev;
849 host->dev_timings = pdata->nand_timings; 973 host->dev_timings = pdata->nand_timings;
974 host->mode = pdata->mode;
975
976 if (host->mode == USE_DMA_ACCESS)
977 init_completion(&host->dma_access_complete);
978
850 regs = host->regs_va; 979 regs = host->regs_va;
851 980
852 /* Link all private pointers */ 981 /* Link all private pointers */
@@ -871,13 +1000,31 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
871 if (pdata->width == FSMC_NAND_BW16) 1000 if (pdata->width == FSMC_NAND_BW16)
872 nand->options |= NAND_BUSWIDTH_16; 1001 nand->options |= NAND_BUSWIDTH_16;
873 1002
874 /* 1003 switch (host->mode) {
875 * use customized (word by word) version of read_buf, write_buf if 1004 case USE_DMA_ACCESS:
876 * access_with_dev_width is reset supported 1005 dma_cap_zero(mask);
877 */ 1006 dma_cap_set(DMA_MEMCPY, mask);
878 if (pdata->mode == USE_WORD_ACCESS) { 1007 host->read_dma_chan = dma_request_channel(mask, filter,
1008 pdata->read_dma_priv);
1009 if (!host->read_dma_chan) {
1010 dev_err(&pdev->dev, "Unable to get read dma channel\n");
1011 goto err_req_read_chnl;
1012 }
1013 host->write_dma_chan = dma_request_channel(mask, filter,
1014 pdata->write_dma_priv);
1015 if (!host->write_dma_chan) {
1016 dev_err(&pdev->dev, "Unable to get write dma channel\n");
1017 goto err_req_write_chnl;
1018 }
1019 nand->read_buf = fsmc_read_buf_dma;
1020 nand->write_buf = fsmc_write_buf_dma;
1021 break;
1022
1023 default:
1024 case USE_WORD_ACCESS:
879 nand->read_buf = fsmc_read_buf; 1025 nand->read_buf = fsmc_read_buf;
880 nand->write_buf = fsmc_write_buf; 1026 nand->write_buf = fsmc_write_buf;
1027 break;
881 } 1028 }
882 1029
883 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16, 1030 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16,
@@ -978,6 +1125,12 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
978 1125
979err_probe: 1126err_probe:
980err_scan_ident: 1127err_scan_ident:
1128 if (host->mode == USE_DMA_ACCESS)
1129 dma_release_channel(host->write_dma_chan);
1130err_req_write_chnl:
1131 if (host->mode == USE_DMA_ACCESS)
1132 dma_release_channel(host->read_dma_chan);
1133err_req_read_chnl:
981 clk_disable(host->clk); 1134 clk_disable(host->clk);
982err_clk_enable: 1135err_clk_enable:
983 clk_put(host->clk); 1136 clk_put(host->clk);
@@ -995,6 +1148,11 @@ static int fsmc_nand_remove(struct platform_device *pdev)
995 1148
996 if (host) { 1149 if (host) {
997 nand_release(&host->mtd); 1150 nand_release(&host->mtd);
1151
1152 if (host->mode == USE_DMA_ACCESS) {
1153 dma_release_channel(host->write_dma_chan);
1154 dma_release_channel(host->read_dma_chan);
1155 }
998 clk_disable(host->clk); 1156 clk_disable(host->clk);
999 clk_put(host->clk); 1157 clk_put(host->clk);
1000 } 1158 }