aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@st.com>2011-08-05 06:02:42 -0400
committerVinod Koul <vinod.koul@intel.com>2011-08-25 10:03:39 -0400
commit0a2356572b1910cc977f4ccf3c9ee1ecab08327a (patch)
treea2e5c45222c72d2f9d5b89e8e25e9e463fafb319 /drivers/dma
parent036f05fd6dcdb6a6b9e55703cb663112fa4c4e42 (diff)
dmaengine/amba-pl08x: Pass flow controller information with slave channel data
At least, on SPEAr platforms there is one peripheral, JPEG, which can be flow controller for DMA transfer. Currently DMA controller driver didn't support peripheral flow controller configurations. This patch adds device_fc field in struct pl08x_channel_data, which will be used only for slave transfers and is not used in case of mem2mem transfers. Signed-off-by: Viresh Kumar <viresh.kumar@st.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/amba-pl08x.c61
1 files changed, 53 insertions, 8 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index f70aa574c58f..a59c3c47286c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,11 +66,6 @@
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry. 67 * will then move to the next LLI entry.
68 * 68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
73 *
74 * Global TODO: 69 * Global TODO:
75 * - Break out common code from arch/arm/mach-s3c64xx and share 70 * - Break out common code from arch/arm/mach-s3c64xx and share
76 */ 71 */
@@ -618,6 +613,49 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
618 sbus == &bd.srcbus ? "src" : "dst"); 613 sbus == &bd.srcbus ? "src" : "dst");
619 614
620 /* 615 /*
616 * Zero length is only allowed if all these requirements are met:
617 * - flow controller is peripheral.
618 * - src.addr is aligned to src.width
619 * - dst.addr is aligned to dst.width
620 *
621 * sg_len == 1 should be true, as there can be two cases here:
622 * - Memory addresses are contiguous and are not scattered. Here, Only
623 * one sg will be passed by user driver, with memory address and zero
624 * length. We pass this to controller and after the transfer it will
625 * receive the last burst request from peripheral and so transfer
626 * finishes.
627 *
628 * - Memory addresses are scattered and are not contiguous. Here,
629 * Obviously as DMA controller doesn't know when a lli's transfer gets
630 * over, it can't load next lli. So in this case, there has to be an
631 * assumption that only one lli is supported. Thus, we can't have
632 * scattered addresses.
633 */
634 if (!bd.remainder) {
635 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
636 PL080_CONFIG_FLOW_CONTROL_SHIFT;
637 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
638 (fc <= PL080_FLOW_SRC2DST_SRC))) {
639 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
640 __func__);
641 return 0;
642 }
643
644 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
645 (bd.srcbus.addr % bd.srcbus.buswidth)) {
646 dev_err(&pl08x->adev->dev,
647 "%s src & dst address must be aligned to src"
648 " & dst width if peripheral is flow controller",
649 __func__);
650 return 0;
651 }
652
653 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
654 bd.dstbus.buswidth, 0);
655 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
656 }
657
658 /*
621 * Send byte by byte for following cases 659 * Send byte by byte for following cases
622 * - Less than a bus width available 660 * - Less than a bus width available
623 * - until master bus is aligned 661 * - until master bus is aligned
@@ -1250,7 +1288,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1250 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1288 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1251 struct pl08x_driver_data *pl08x = plchan->host; 1289 struct pl08x_driver_data *pl08x = plchan->host;
1252 struct pl08x_txd *txd; 1290 struct pl08x_txd *txd;
1253 int ret; 1291 int ret, tmp;
1254 1292
1255 /* 1293 /*
1256 * Current implementation ASSUMES only one sg 1294 * Current implementation ASSUMES only one sg
@@ -1284,12 +1322,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1284 txd->len = sgl->length; 1322 txd->len = sgl->length;
1285 1323
1286 if (direction == DMA_TO_DEVICE) { 1324 if (direction == DMA_TO_DEVICE) {
1287 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1288 txd->cctl = plchan->dst_cctl; 1325 txd->cctl = plchan->dst_cctl;
1289 txd->src_addr = sgl->dma_address; 1326 txd->src_addr = sgl->dma_address;
1290 txd->dst_addr = plchan->dst_addr; 1327 txd->dst_addr = plchan->dst_addr;
1291 } else if (direction == DMA_FROM_DEVICE) { 1328 } else if (direction == DMA_FROM_DEVICE) {
1292 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1293 txd->cctl = plchan->src_cctl; 1329 txd->cctl = plchan->src_cctl;
1294 txd->src_addr = plchan->src_addr; 1330 txd->src_addr = plchan->src_addr;
1295 txd->dst_addr = sgl->dma_address; 1331 txd->dst_addr = sgl->dma_address;
@@ -1299,6 +1335,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1299 return NULL; 1335 return NULL;
1300 } 1336 }
1301 1337
1338 if (plchan->cd->device_fc)
1339 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
1340 PL080_FLOW_PER2MEM_PER;
1341 else
1342 tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
1343 PL080_FLOW_PER2MEM;
1344
1345 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1346
1302 ret = pl08x_prep_channel_resources(plchan, txd); 1347 ret = pl08x_prep_channel_resources(plchan, txd);
1303 if (ret) 1348 if (ret)
1304 return NULL; 1349 return NULL;