aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw_dmac.c
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2012-09-21 08:05:47 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-09-27 06:05:23 -0400
commit4a63a8b3e8d2e4f56174deb728085010aa3ac2a1 (patch)
tree0c6d4fb4cd88f9c7fff748909e5956ea55793ef0 /drivers/dma/dw_dmac.c
parent482c67ea7bab80b956185a3e7553151820bc5876 (diff)
dw_dmac: autoconfigure block_size or use platform data
The maximum block size is a configurable parameter for the chip. So, driver will try to get it from the encoded component parameters. Otherwise it will come from the platform data. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r--drivers/dma/dw_dmac.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d71bc7167891..c143b7e40716 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -56,16 +56,6 @@
56 }) 56 })
57 57
58/* 58/*
59 * This is configuration-dependent and usually a funny size like 4095.
60 *
61 * Note that this is a transfer count, i.e. if we transfer 32-bit
62 * words, we can do 16380 bytes per descriptor.
63 *
64 * This parameter is also system-specific.
65 */
66#define DWC_MAX_COUNT 4095U
67
68/*
69 * Number of descriptors to allocate for each channel. This should be 59 * Number of descriptors to allocate for each channel. This should be
70 * made configurable somehow; preferably, the clients (at least the 60 * made configurable somehow; preferably, the clients (at least the
71 * ones using slave transfers) should be able to give us a hint. 61 * ones using slave transfers) should be able to give us a hint.
@@ -672,7 +662,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
672 662
673 for (offset = 0; offset < len; offset += xfer_count << src_width) { 663 for (offset = 0; offset < len; offset += xfer_count << src_width) {
674 xfer_count = min_t(size_t, (len - offset) >> src_width, 664 xfer_count = min_t(size_t, (len - offset) >> src_width,
675 DWC_MAX_COUNT); 665 dwc->block_size);
676 666
677 desc = dwc_desc_get(dwc); 667 desc = dwc_desc_get(dwc);
678 if (!desc) 668 if (!desc)
@@ -773,8 +763,8 @@ slave_sg_todev_fill_desc:
773 desc->lli.sar = mem; 763 desc->lli.sar = mem;
774 desc->lli.dar = reg; 764 desc->lli.dar = reg;
775 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 765 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
776 if ((len >> mem_width) > DWC_MAX_COUNT) { 766 if ((len >> mem_width) > dwc->block_size) {
777 dlen = DWC_MAX_COUNT << mem_width; 767 dlen = dwc->block_size << mem_width;
778 mem += dlen; 768 mem += dlen;
779 len -= dlen; 769 len -= dlen;
780 } else { 770 } else {
@@ -833,8 +823,8 @@ slave_sg_fromdev_fill_desc:
833 desc->lli.sar = reg; 823 desc->lli.sar = reg;
834 desc->lli.dar = mem; 824 desc->lli.dar = mem;
835 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 825 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
836 if ((len >> reg_width) > DWC_MAX_COUNT) { 826 if ((len >> reg_width) > dwc->block_size) {
837 dlen = DWC_MAX_COUNT << reg_width; 827 dlen = dwc->block_size << reg_width;
838 mem += dlen; 828 mem += dlen;
839 len -= dlen; 829 len -= dlen;
840 } else { 830 } else {
@@ -1217,7 +1207,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1217 periods = buf_len / period_len; 1207 periods = buf_len / period_len;
1218 1208
1219 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1209 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1220 if (period_len > (DWC_MAX_COUNT << reg_width)) 1210 if (period_len > (dwc->block_size << reg_width))
1221 goto out_err; 1211 goto out_err;
1222 if (unlikely(period_len & ((1 << reg_width) - 1))) 1212 if (unlikely(period_len & ((1 << reg_width) - 1)))
1223 goto out_err; 1213 goto out_err;
@@ -1383,6 +1373,7 @@ static int __devinit dw_probe(struct platform_device *pdev)
1383 bool autocfg; 1373 bool autocfg;
1384 unsigned int dw_params; 1374 unsigned int dw_params;
1385 unsigned int nr_channels; 1375 unsigned int nr_channels;
1376 unsigned int max_blk_size = 0;
1386 int irq; 1377 int irq;
1387 int err; 1378 int err;
1388 int i; 1379 int i;
@@ -1423,6 +1414,10 @@ static int __devinit dw_probe(struct platform_device *pdev)
1423 1414
1424 dw->regs = regs; 1415 dw->regs = regs;
1425 1416
1417 /* get hardware configuration parameters */
1418 if (autocfg)
1419 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1420
1426 /* Calculate all channel mask before DMA setup */ 1421 /* Calculate all channel mask before DMA setup */
1427 dw->all_chan_mask = (1 << nr_channels) - 1; 1422 dw->all_chan_mask = (1 << nr_channels) - 1;
1428 1423
@@ -1468,6 +1463,16 @@ static int __devinit dw_probe(struct platform_device *pdev)
1468 INIT_LIST_HEAD(&dwc->free_list); 1463 INIT_LIST_HEAD(&dwc->free_list);
1469 1464
1470 channel_clear_bit(dw, CH_EN, dwc->mask); 1465 channel_clear_bit(dw, CH_EN, dwc->mask);
1466
1467 /* hardware configuration */
1468 if (autocfg)
1469 /* Decode maximum block size for given channel. The
1470 * stored 4 bit value represents blocks from 0x00 for 3
1471 * up to 0x0a for 4095. */
1472 dwc->block_size =
1473 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1474 else
1475 dwc->block_size = pdata->block_size;
1471 } 1476 }
1472 1477
1473 /* Clear all interrupts on all channels. */ 1478 /* Clear all interrupts on all channels. */