aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/edma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 12:24:48 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 12:24:48 -0500
commit5115f3c19d17851aaff5a857f55b4a019c908775 (patch)
tree0d02cf01e12e86365f4f5e3b234f986daef181a7 /drivers/dma/edma.c
parentc41b3810c09e60664433548c5218cc6ece6a8903 (diff)
parent17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff)
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul: "This is fairly big pull by my standards as I had missed last merge window. So we have the support for device tree for slave-dmaengine, large updates to dw_dmac driver from Andy for reusing on different architectures. Along with this we have fixes on bunch of the drivers" Fix up trivial conflicts, usually due to #include line movement next to each other. * 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits) Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT" ARM: dts: pl330: Add #dma-cells for generic dma binding support DMA: PL330: Register the DMA controller with the generic DMA helpers DMA: PL330: Add xlate function DMA: PL330: Add new pl330 filter for DT case. dma: tegra20-apb-dma: remove unnecessary assignment edma: do not waste memory for dma_mask dma: coh901318: set residue only if dma is in progress dma: coh901318: avoid unbalanced locking dmaengine.h: remove redundant else keyword dma: of-dma: protect list write operation by spin_lock dmaengine: ste_dma40: do not remove descriptors for cyclic transfers dma: of-dma.c: fix memory leakage dw_dmac: apply default dma_mask if needed dmaengine: ioat - fix spare sparse complain dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING dw_dmac: add support for Lynxpoint DMA controllers dw_dmac: return proper residue value dw_dmac: fill individual length of descriptor ...
Diffstat (limited to 'drivers/dma/edma.c')
-rw-r--r--drivers/dma/edma.c61
1 files changed, 31 insertions, 30 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index f424298f1ac5..cd7e3280fadd 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -69,9 +69,7 @@ struct edma_chan {
69 int ch_num; 69 int ch_num;
70 bool alloced; 70 bool alloced;
71 int slot[EDMA_MAX_SLOTS]; 71 int slot[EDMA_MAX_SLOTS];
72 dma_addr_t addr; 72 struct dma_slave_config cfg;
73 int addr_width;
74 int maxburst;
75}; 73};
76 74
77struct edma_cc { 75struct edma_cc {
@@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan)
178 return 0; 176 return 0;
179} 177}
180 178
181
182static int edma_slave_config(struct edma_chan *echan, 179static int edma_slave_config(struct edma_chan *echan,
183 struct dma_slave_config *config) 180 struct dma_slave_config *cfg)
184{ 181{
185 if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || 182 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
186 (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) 183 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
187 return -EINVAL; 184 return -EINVAL;
188 185
189 if (config->direction == DMA_MEM_TO_DEV) { 186 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
190 if (config->dst_addr)
191 echan->addr = config->dst_addr;
192 if (config->dst_addr_width)
193 echan->addr_width = config->dst_addr_width;
194 if (config->dst_maxburst)
195 echan->maxburst = config->dst_maxburst;
196 } else if (config->direction == DMA_DEV_TO_MEM) {
197 if (config->src_addr)
198 echan->addr = config->src_addr;
199 if (config->src_addr_width)
200 echan->addr_width = config->src_addr_width;
201 if (config->src_maxburst)
202 echan->maxburst = config->src_maxburst;
203 }
204 187
205 return 0; 188 return 0;
206} 189}
@@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
235 struct edma_chan *echan = to_edma_chan(chan); 218 struct edma_chan *echan = to_edma_chan(chan);
236 struct device *dev = chan->device->dev; 219 struct device *dev = chan->device->dev;
237 struct edma_desc *edesc; 220 struct edma_desc *edesc;
221 dma_addr_t dev_addr;
222 enum dma_slave_buswidth dev_width;
223 u32 burst;
238 struct scatterlist *sg; 224 struct scatterlist *sg;
239 int i; 225 int i;
240 int acnt, bcnt, ccnt, src, dst, cidx; 226 int acnt, bcnt, ccnt, src, dst, cidx;
@@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
243 if (unlikely(!echan || !sgl || !sg_len)) 229 if (unlikely(!echan || !sgl || !sg_len))
244 return NULL; 230 return NULL;
245 231
246 if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 232 if (direction == DMA_DEV_TO_MEM) {
233 dev_addr = echan->cfg.src_addr;
234 dev_width = echan->cfg.src_addr_width;
235 burst = echan->cfg.src_maxburst;
236 } else if (direction == DMA_MEM_TO_DEV) {
237 dev_addr = echan->cfg.dst_addr;
238 dev_width = echan->cfg.dst_addr_width;
239 burst = echan->cfg.dst_maxburst;
240 } else {
241 dev_err(dev, "%s: bad direction?\n", __func__);
242 return NULL;
243 }
244
245 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
247 dev_err(dev, "Undefined slave buswidth\n"); 246 dev_err(dev, "Undefined slave buswidth\n");
248 return NULL; 247 return NULL;
249 } 248 }
@@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
275 } 274 }
276 } 275 }
277 276
278 acnt = echan->addr_width; 277 acnt = dev_width;
279 278
280 /* 279 /*
281 * If the maxburst is equal to the fifo width, use 280 * If the maxburst is equal to the fifo width, use
282 * A-synced transfers. This allows for large contiguous 281 * A-synced transfers. This allows for large contiguous
283 * buffer transfers using only one PaRAM set. 282 * buffer transfers using only one PaRAM set.
284 */ 283 */
285 if (echan->maxburst == 1) { 284 if (burst == 1) {
286 edesc->absync = false; 285 edesc->absync = false;
287 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); 286 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
288 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); 287 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
@@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
302 */ 301 */
303 } else { 302 } else {
304 edesc->absync = true; 303 edesc->absync = true;
305 bcnt = echan->maxburst; 304 bcnt = burst;
306 ccnt = sg_dma_len(sg) / (acnt * bcnt); 305 ccnt = sg_dma_len(sg) / (acnt * bcnt);
307 if (ccnt > (SZ_64K - 1)) { 306 if (ccnt > (SZ_64K - 1)) {
308 dev_err(dev, "Exceeded max SG segment size\n"); 307 dev_err(dev, "Exceeded max SG segment size\n");
@@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
313 312
314 if (direction == DMA_MEM_TO_DEV) { 313 if (direction == DMA_MEM_TO_DEV) {
315 src = sg_dma_address(sg); 314 src = sg_dma_address(sg);
316 dst = echan->addr; 315 dst = dev_addr;
317 src_bidx = acnt; 316 src_bidx = acnt;
318 src_cidx = cidx; 317 src_cidx = cidx;
319 dst_bidx = 0; 318 dst_bidx = 0;
320 dst_cidx = 0; 319 dst_cidx = 0;
321 } else { 320 } else {
322 src = echan->addr; 321 src = dev_addr;
323 dst = sg_dma_address(sg); 322 dst = sg_dma_address(sg);
324 src_bidx = 0; 323 src_bidx = 0;
325 src_cidx = 0; 324 src_cidx = 0;
@@ -621,13 +620,11 @@ static struct platform_device *pdev0, *pdev1;
621static const struct platform_device_info edma_dev_info0 = { 620static const struct platform_device_info edma_dev_info0 = {
622 .name = "edma-dma-engine", 621 .name = "edma-dma-engine",
623 .id = 0, 622 .id = 0,
624 .dma_mask = DMA_BIT_MASK(32),
625}; 623};
626 624
627static const struct platform_device_info edma_dev_info1 = { 625static const struct platform_device_info edma_dev_info1 = {
628 .name = "edma-dma-engine", 626 .name = "edma-dma-engine",
629 .id = 1, 627 .id = 1,
630 .dma_mask = DMA_BIT_MASK(32),
631}; 628};
632 629
633static int edma_init(void) 630static int edma_init(void)
@@ -641,6 +638,8 @@ static int edma_init(void)
641 ret = PTR_ERR(pdev0); 638 ret = PTR_ERR(pdev0);
642 goto out; 639 goto out;
643 } 640 }
641 pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
642 pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
644 } 643 }
645 644
646 if (EDMA_CTLRS == 2) { 645 if (EDMA_CTLRS == 2) {
@@ -650,6 +649,8 @@ static int edma_init(void)
650 platform_device_unregister(pdev0); 649 platform_device_unregister(pdev0);
651 ret = PTR_ERR(pdev1); 650 ret = PTR_ERR(pdev1);
652 } 651 }
652 pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
653 pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
653 } 654 }
654 655
655out: 656out: