diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
commit | 5115f3c19d17851aaff5a857f55b4a019c908775 (patch) | |
tree | 0d02cf01e12e86365f4f5e3b234f986daef181a7 /drivers/dma/dw_dmac.c | |
parent | c41b3810c09e60664433548c5218cc6ece6a8903 (diff) | |
parent | 17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"This is fairly big pull by my standards as I had missed last merge
window. So we have the support for device tree for slave-dmaengine,
large updates to dw_dmac driver from Andy for reusing on different
architectures. Along with this we have fixes on bunch of the drivers"
Fix up trivial conflicts, usually due to #include line movement next to
each other.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits)
Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT"
ARM: dts: pl330: Add #dma-cells for generic dma binding support
DMA: PL330: Register the DMA controller with the generic DMA helpers
DMA: PL330: Add xlate function
DMA: PL330: Add new pl330 filter for DT case.
dma: tegra20-apb-dma: remove unnecessary assignment
edma: do not waste memory for dma_mask
dma: coh901318: set residue only if dma is in progress
dma: coh901318: avoid unbalanced locking
dmaengine.h: remove redundant else keyword
dma: of-dma: protect list write operation by spin_lock
dmaengine: ste_dma40: do not remove descriptors for cyclic transfers
dma: of-dma.c: fix memory leakage
dw_dmac: apply default dma_mask if needed
dmaengine: ioat - fix spare sparse complain
dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c
ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING
dw_dmac: add support for Lynxpoint DMA controllers
dw_dmac: return proper residue value
dw_dmac: fill individual length of descriptor
...
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r-- | drivers/dma/dw_dmac.c | 523 |
1 files changed, 368 insertions, 155 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index b33d1f6e1333..51c3ea2ed41a 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -1,6 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | 2 | * Core driver for the Synopsys DesignWare DMA Controller |
3 | * AVR32 systems.) | ||
4 | * | 3 | * |
5 | * Copyright (C) 2007-2008 Atmel Corporation | 4 | * Copyright (C) 2007-2008 Atmel Corporation |
6 | * Copyright (C) 2010-2011 ST Microelectronics | 5 | * Copyright (C) 2010-2011 ST Microelectronics |
@@ -9,11 +8,13 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
11 | */ | 10 | */ |
11 | |||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
16 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
17 | #include <linux/dmapool.h> | ||
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
@@ -47,15 +48,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | |||
47 | return slave ? slave->src_master : 1; | 48 | return slave ? slave->src_master : 1; |
48 | } | 49 | } |
49 | 50 | ||
51 | #define SRC_MASTER 0 | ||
52 | #define DST_MASTER 1 | ||
53 | |||
54 | static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) | ||
55 | { | ||
56 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
57 | struct dw_dma_slave *dws = chan->private; | ||
58 | unsigned int m; | ||
59 | |||
60 | if (master == SRC_MASTER) | ||
61 | m = dwc_get_sms(dws); | ||
62 | else | ||
63 | m = dwc_get_dms(dws); | ||
64 | |||
65 | return min_t(unsigned int, dw->nr_masters - 1, m); | ||
66 | } | ||
67 | |||
50 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 68 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
51 | struct dw_dma_slave *__slave = (_chan->private); \ | ||
52 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | 69 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
53 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | 70 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
54 | int _dms = dwc_get_dms(__slave); \ | 71 | bool _is_slave = is_slave_direction(_dwc->direction); \ |
55 | int _sms = dwc_get_sms(__slave); \ | 72 | int _dms = dwc_get_master(_chan, DST_MASTER); \ |
56 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | 73 | int _sms = dwc_get_master(_chan, SRC_MASTER); \ |
74 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ | ||
57 | DW_DMA_MSIZE_16; \ | 75 | DW_DMA_MSIZE_16; \ |
58 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | 76 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
59 | DW_DMA_MSIZE_16; \ | 77 | DW_DMA_MSIZE_16; \ |
60 | \ | 78 | \ |
61 | (DWC_CTLL_DST_MSIZE(_dmsize) \ | 79 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
@@ -73,15 +91,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | |||
73 | */ | 91 | */ |
74 | #define NR_DESCS_PER_CHANNEL 64 | 92 | #define NR_DESCS_PER_CHANNEL 64 |
75 | 93 | ||
76 | /*----------------------------------------------------------------------*/ | 94 | static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) |
95 | { | ||
96 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
77 | 97 | ||
78 | /* | 98 | return dw->data_width[dwc_get_master(chan, master)]; |
79 | * Because we're not relying on writeback from the controller (it may not | 99 | } |
80 | * even be configured into the core!) we don't need to use dma_pool. These | 100 | |
81 | * descriptors -- and associated data -- are cacheable. We do need to make | 101 | /*----------------------------------------------------------------------*/ |
82 | * sure their dcache entries are written back before handing them off to | ||
83 | * the controller, though. | ||
84 | */ | ||
85 | 102 | ||
86 | static struct device *chan2dev(struct dma_chan *chan) | 103 | static struct device *chan2dev(struct dma_chan *chan) |
87 | { | 104 | { |
@@ -94,7 +111,7 @@ static struct device *chan2parent(struct dma_chan *chan) | |||
94 | 111 | ||
95 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 112 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
96 | { | 113 | { |
97 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 114 | return to_dw_desc(dwc->active_list.next); |
98 | } | 115 | } |
99 | 116 | ||
100 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 117 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
@@ -121,19 +138,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
121 | return ret; | 138 | return ret; |
122 | } | 139 | } |
123 | 140 | ||
124 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
125 | { | ||
126 | struct dw_desc *child; | ||
127 | |||
128 | list_for_each_entry(child, &desc->tx_list, desc_node) | ||
129 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | ||
130 | child->txd.phys, sizeof(child->lli), | ||
131 | DMA_TO_DEVICE); | ||
132 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | ||
133 | desc->txd.phys, sizeof(desc->lli), | ||
134 | DMA_TO_DEVICE); | ||
135 | } | ||
136 | |||
137 | /* | 141 | /* |
138 | * Move a descriptor, including any children, to the free list. | 142 | * Move a descriptor, including any children, to the free list. |
139 | * `desc' must not be on any lists. | 143 | * `desc' must not be on any lists. |
@@ -145,8 +149,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
145 | if (desc) { | 149 | if (desc) { |
146 | struct dw_desc *child; | 150 | struct dw_desc *child; |
147 | 151 | ||
148 | dwc_sync_desc_for_cpu(dwc, desc); | ||
149 | |||
150 | spin_lock_irqsave(&dwc->lock, flags); | 152 | spin_lock_irqsave(&dwc->lock, flags); |
151 | list_for_each_entry(child, &desc->tx_list, desc_node) | 153 | list_for_each_entry(child, &desc->tx_list, desc_node) |
152 | dev_vdbg(chan2dev(&dwc->chan), | 154 | dev_vdbg(chan2dev(&dwc->chan), |
@@ -179,9 +181,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
179 | cfghi = dws->cfg_hi; | 181 | cfghi = dws->cfg_hi; |
180 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | 182 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
181 | } else { | 183 | } else { |
182 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | 184 | if (dwc->direction == DMA_MEM_TO_DEV) |
183 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); | 185 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); |
184 | else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) | 186 | else if (dwc->direction == DMA_DEV_TO_MEM) |
185 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); | 187 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); |
186 | } | 188 | } |
187 | 189 | ||
@@ -223,7 +225,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | |||
223 | channel_readl(dwc, CTL_LO)); | 225 | channel_readl(dwc, CTL_LO)); |
224 | } | 226 | } |
225 | 227 | ||
226 | |||
227 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | 228 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
228 | { | 229 | { |
229 | channel_clear_bit(dw, CH_EN, dwc->mask); | 230 | channel_clear_bit(dw, CH_EN, dwc->mask); |
@@ -249,6 +250,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |||
249 | channel_writel(dwc, CTL_LO, ctllo); | 250 | channel_writel(dwc, CTL_LO, ctllo); |
250 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | 251 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); |
251 | channel_set_bit(dw, CH_EN, dwc->mask); | 252 | channel_set_bit(dw, CH_EN, dwc->mask); |
253 | |||
254 | /* Move pointer to next descriptor */ | ||
255 | dwc->tx_node_active = dwc->tx_node_active->next; | ||
252 | } | 256 | } |
253 | 257 | ||
254 | /* Called with dwc->lock held and bh disabled */ | 258 | /* Called with dwc->lock held and bh disabled */ |
@@ -279,9 +283,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
279 | 283 | ||
280 | dwc_initialize(dwc); | 284 | dwc_initialize(dwc); |
281 | 285 | ||
282 | dwc->tx_list = &first->tx_list; | 286 | dwc->residue = first->total_len; |
283 | dwc->tx_node_active = first->tx_list.next; | 287 | dwc->tx_node_active = &first->tx_list; |
284 | 288 | ||
289 | /* Submit first block */ | ||
285 | dwc_do_single_block(dwc, first); | 290 | dwc_do_single_block(dwc, first); |
286 | 291 | ||
287 | return; | 292 | return; |
@@ -317,8 +322,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
317 | param = txd->callback_param; | 322 | param = txd->callback_param; |
318 | } | 323 | } |
319 | 324 | ||
320 | dwc_sync_desc_for_cpu(dwc, desc); | ||
321 | |||
322 | /* async_tx_ack */ | 325 | /* async_tx_ack */ |
323 | list_for_each_entry(child, &desc->tx_list, desc_node) | 326 | list_for_each_entry(child, &desc->tx_list, desc_node) |
324 | async_tx_ack(&child->txd); | 327 | async_tx_ack(&child->txd); |
@@ -327,29 +330,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
327 | list_splice_init(&desc->tx_list, &dwc->free_list); | 330 | list_splice_init(&desc->tx_list, &dwc->free_list); |
328 | list_move(&desc->desc_node, &dwc->free_list); | 331 | list_move(&desc->desc_node, &dwc->free_list); |
329 | 332 | ||
330 | if (!dwc->chan.private) { | 333 | if (!is_slave_direction(dwc->direction)) { |
331 | struct device *parent = chan2parent(&dwc->chan); | 334 | struct device *parent = chan2parent(&dwc->chan); |
332 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 335 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
333 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 336 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
334 | dma_unmap_single(parent, desc->lli.dar, | 337 | dma_unmap_single(parent, desc->lli.dar, |
335 | desc->len, DMA_FROM_DEVICE); | 338 | desc->total_len, DMA_FROM_DEVICE); |
336 | else | 339 | else |
337 | dma_unmap_page(parent, desc->lli.dar, | 340 | dma_unmap_page(parent, desc->lli.dar, |
338 | desc->len, DMA_FROM_DEVICE); | 341 | desc->total_len, DMA_FROM_DEVICE); |
339 | } | 342 | } |
340 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 343 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
341 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 344 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
342 | dma_unmap_single(parent, desc->lli.sar, | 345 | dma_unmap_single(parent, desc->lli.sar, |
343 | desc->len, DMA_TO_DEVICE); | 346 | desc->total_len, DMA_TO_DEVICE); |
344 | else | 347 | else |
345 | dma_unmap_page(parent, desc->lli.sar, | 348 | dma_unmap_page(parent, desc->lli.sar, |
346 | desc->len, DMA_TO_DEVICE); | 349 | desc->total_len, DMA_TO_DEVICE); |
347 | } | 350 | } |
348 | } | 351 | } |
349 | 352 | ||
350 | spin_unlock_irqrestore(&dwc->lock, flags); | 353 | spin_unlock_irqrestore(&dwc->lock, flags); |
351 | 354 | ||
352 | if (callback_required && callback) | 355 | if (callback) |
353 | callback(param); | 356 | callback(param); |
354 | } | 357 | } |
355 | 358 | ||
@@ -384,6 +387,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
384 | dwc_descriptor_complete(dwc, desc, true); | 387 | dwc_descriptor_complete(dwc, desc, true); |
385 | } | 388 | } |
386 | 389 | ||
390 | /* Returns how many bytes were already received from source */ | ||
391 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | ||
392 | { | ||
393 | u32 ctlhi = channel_readl(dwc, CTL_HI); | ||
394 | u32 ctllo = channel_readl(dwc, CTL_LO); | ||
395 | |||
396 | return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); | ||
397 | } | ||
398 | |||
387 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 399 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
388 | { | 400 | { |
389 | dma_addr_t llp; | 401 | dma_addr_t llp; |
@@ -399,6 +411,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
399 | if (status_xfer & dwc->mask) { | 411 | if (status_xfer & dwc->mask) { |
400 | /* Everything we've submitted is done */ | 412 | /* Everything we've submitted is done */ |
401 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 413 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
414 | |||
415 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
416 | struct list_head *head, *active = dwc->tx_node_active; | ||
417 | |||
418 | /* | ||
419 | * We are inside first active descriptor. | ||
420 | * Otherwise something is really wrong. | ||
421 | */ | ||
422 | desc = dwc_first_active(dwc); | ||
423 | |||
424 | head = &desc->tx_list; | ||
425 | if (active != head) { | ||
426 | /* Update desc to reflect last sent one */ | ||
427 | if (active != head->next) | ||
428 | desc = to_dw_desc(active->prev); | ||
429 | |||
430 | dwc->residue -= desc->len; | ||
431 | |||
432 | child = to_dw_desc(active); | ||
433 | |||
434 | /* Submit next block */ | ||
435 | dwc_do_single_block(dwc, child); | ||
436 | |||
437 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
438 | return; | ||
439 | } | ||
440 | |||
441 | /* We are done here */ | ||
442 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
443 | } | ||
444 | |||
445 | dwc->residue = 0; | ||
446 | |||
402 | spin_unlock_irqrestore(&dwc->lock, flags); | 447 | spin_unlock_irqrestore(&dwc->lock, flags); |
403 | 448 | ||
404 | dwc_complete_all(dw, dwc); | 449 | dwc_complete_all(dw, dwc); |
@@ -406,6 +451,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
406 | } | 451 | } |
407 | 452 | ||
408 | if (list_empty(&dwc->active_list)) { | 453 | if (list_empty(&dwc->active_list)) { |
454 | dwc->residue = 0; | ||
455 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
456 | return; | ||
457 | } | ||
458 | |||
459 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
460 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); | ||
409 | spin_unlock_irqrestore(&dwc->lock, flags); | 461 | spin_unlock_irqrestore(&dwc->lock, flags); |
410 | return; | 462 | return; |
411 | } | 463 | } |
@@ -414,6 +466,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
414 | (unsigned long long)llp); | 466 | (unsigned long long)llp); |
415 | 467 | ||
416 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 468 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
469 | /* initial residue value */ | ||
470 | dwc->residue = desc->total_len; | ||
471 | |||
417 | /* check first descriptors addr */ | 472 | /* check first descriptors addr */ |
418 | if (desc->txd.phys == llp) { | 473 | if (desc->txd.phys == llp) { |
419 | spin_unlock_irqrestore(&dwc->lock, flags); | 474 | spin_unlock_irqrestore(&dwc->lock, flags); |
@@ -423,16 +478,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
423 | /* check first descriptors llp */ | 478 | /* check first descriptors llp */ |
424 | if (desc->lli.llp == llp) { | 479 | if (desc->lli.llp == llp) { |
425 | /* This one is currently in progress */ | 480 | /* This one is currently in progress */ |
481 | dwc->residue -= dwc_get_sent(dwc); | ||
426 | spin_unlock_irqrestore(&dwc->lock, flags); | 482 | spin_unlock_irqrestore(&dwc->lock, flags); |
427 | return; | 483 | return; |
428 | } | 484 | } |
429 | 485 | ||
430 | list_for_each_entry(child, &desc->tx_list, desc_node) | 486 | dwc->residue -= desc->len; |
487 | list_for_each_entry(child, &desc->tx_list, desc_node) { | ||
431 | if (child->lli.llp == llp) { | 488 | if (child->lli.llp == llp) { |
432 | /* Currently in progress */ | 489 | /* Currently in progress */ |
490 | dwc->residue -= dwc_get_sent(dwc); | ||
433 | spin_unlock_irqrestore(&dwc->lock, flags); | 491 | spin_unlock_irqrestore(&dwc->lock, flags); |
434 | return; | 492 | return; |
435 | } | 493 | } |
494 | dwc->residue -= child->len; | ||
495 | } | ||
436 | 496 | ||
437 | /* | 497 | /* |
438 | * No descriptors so far seem to be in progress, i.e. | 498 | * No descriptors so far seem to be in progress, i.e. |
@@ -458,9 +518,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
458 | 518 | ||
459 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 519 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
460 | { | 520 | { |
461 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 521 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
462 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 522 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); |
463 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); | ||
464 | } | 523 | } |
465 | 524 | ||
466 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 525 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -488,16 +547,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
488 | dwc_dostart(dwc, dwc_first_active(dwc)); | 547 | dwc_dostart(dwc, dwc_first_active(dwc)); |
489 | 548 | ||
490 | /* | 549 | /* |
491 | * KERN_CRITICAL may seem harsh, but since this only happens | 550 | * WARN may seem harsh, but since this only happens |
492 | * when someone submits a bad physical address in a | 551 | * when someone submits a bad physical address in a |
493 | * descriptor, we should consider ourselves lucky that the | 552 | * descriptor, we should consider ourselves lucky that the |
494 | * controller flagged an error instead of scribbling over | 553 | * controller flagged an error instead of scribbling over |
495 | * random memory locations. | 554 | * random memory locations. |
496 | */ | 555 | */ |
497 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 556 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
498 | "Bad descriptor submitted for DMA!\n"); | 557 | " cookie: %d\n", bad_desc->txd.cookie); |
499 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | ||
500 | " cookie: %d\n", bad_desc->txd.cookie); | ||
501 | dwc_dump_lli(dwc, &bad_desc->lli); | 558 | dwc_dump_lli(dwc, &bad_desc->lli); |
502 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 559 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
503 | dwc_dump_lli(dwc, &child->lli); | 560 | dwc_dump_lli(dwc, &child->lli); |
@@ -598,36 +655,8 @@ static void dw_dma_tasklet(unsigned long data) | |||
598 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); | 655 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
599 | else if (status_err & (1 << i)) | 656 | else if (status_err & (1 << i)) |
600 | dwc_handle_error(dw, dwc); | 657 | dwc_handle_error(dw, dwc); |
601 | else if (status_xfer & (1 << i)) { | 658 | else if (status_xfer & (1 << i)) |
602 | unsigned long flags; | ||
603 | |||
604 | spin_lock_irqsave(&dwc->lock, flags); | ||
605 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
606 | if (dwc->tx_node_active != dwc->tx_list) { | ||
607 | struct dw_desc *desc = | ||
608 | list_entry(dwc->tx_node_active, | ||
609 | struct dw_desc, | ||
610 | desc_node); | ||
611 | |||
612 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
613 | |||
614 | /* move pointer to next descriptor */ | ||
615 | dwc->tx_node_active = | ||
616 | dwc->tx_node_active->next; | ||
617 | |||
618 | dwc_do_single_block(dwc, desc); | ||
619 | |||
620 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
621 | continue; | ||
622 | } else { | ||
623 | /* we are done here */ | ||
624 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
625 | } | ||
626 | } | ||
627 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
628 | |||
629 | dwc_scan_descriptors(dw, dwc); | 659 | dwc_scan_descriptors(dw, dwc); |
630 | } | ||
631 | } | 660 | } |
632 | 661 | ||
633 | /* | 662 | /* |
@@ -709,7 +738,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
709 | size_t len, unsigned long flags) | 738 | size_t len, unsigned long flags) |
710 | { | 739 | { |
711 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 740 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
712 | struct dw_dma_slave *dws = chan->private; | ||
713 | struct dw_desc *desc; | 741 | struct dw_desc *desc; |
714 | struct dw_desc *first; | 742 | struct dw_desc *first; |
715 | struct dw_desc *prev; | 743 | struct dw_desc *prev; |
@@ -730,8 +758,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
730 | return NULL; | 758 | return NULL; |
731 | } | 759 | } |
732 | 760 | ||
733 | data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], | 761 | dwc->direction = DMA_MEM_TO_MEM; |
734 | dwc->dw->data_width[dwc_get_dms(dws)]); | 762 | |
763 | data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), | ||
764 | dwc_get_data_width(chan, DST_MASTER)); | ||
735 | 765 | ||
736 | src_width = dst_width = min_t(unsigned int, data_width, | 766 | src_width = dst_width = min_t(unsigned int, data_width, |
737 | dwc_fast_fls(src | dest | len)); | 767 | dwc_fast_fls(src | dest | len)); |
@@ -756,32 +786,25 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
756 | desc->lli.dar = dest + offset; | 786 | desc->lli.dar = dest + offset; |
757 | desc->lli.ctllo = ctllo; | 787 | desc->lli.ctllo = ctllo; |
758 | desc->lli.ctlhi = xfer_count; | 788 | desc->lli.ctlhi = xfer_count; |
789 | desc->len = xfer_count << src_width; | ||
759 | 790 | ||
760 | if (!first) { | 791 | if (!first) { |
761 | first = desc; | 792 | first = desc; |
762 | } else { | 793 | } else { |
763 | prev->lli.llp = desc->txd.phys; | 794 | prev->lli.llp = desc->txd.phys; |
764 | dma_sync_single_for_device(chan2parent(chan), | ||
765 | prev->txd.phys, sizeof(prev->lli), | ||
766 | DMA_TO_DEVICE); | ||
767 | list_add_tail(&desc->desc_node, | 795 | list_add_tail(&desc->desc_node, |
768 | &first->tx_list); | 796 | &first->tx_list); |
769 | } | 797 | } |
770 | prev = desc; | 798 | prev = desc; |
771 | } | 799 | } |
772 | 800 | ||
773 | |||
774 | if (flags & DMA_PREP_INTERRUPT) | 801 | if (flags & DMA_PREP_INTERRUPT) |
775 | /* Trigger interrupt after last block */ | 802 | /* Trigger interrupt after last block */ |
776 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 803 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
777 | 804 | ||
778 | prev->lli.llp = 0; | 805 | prev->lli.llp = 0; |
779 | dma_sync_single_for_device(chan2parent(chan), | ||
780 | prev->txd.phys, sizeof(prev->lli), | ||
781 | DMA_TO_DEVICE); | ||
782 | |||
783 | first->txd.flags = flags; | 806 | first->txd.flags = flags; |
784 | first->len = len; | 807 | first->total_len = len; |
785 | 808 | ||
786 | return &first->txd; | 809 | return &first->txd; |
787 | 810 | ||
@@ -796,7 +819,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
796 | unsigned long flags, void *context) | 819 | unsigned long flags, void *context) |
797 | { | 820 | { |
798 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 821 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
799 | struct dw_dma_slave *dws = chan->private; | ||
800 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | 822 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
801 | struct dw_desc *prev; | 823 | struct dw_desc *prev; |
802 | struct dw_desc *first; | 824 | struct dw_desc *first; |
@@ -811,9 +833,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
811 | 833 | ||
812 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 834 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
813 | 835 | ||
814 | if (unlikely(!dws || !sg_len)) | 836 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
815 | return NULL; | 837 | return NULL; |
816 | 838 | ||
839 | dwc->direction = direction; | ||
840 | |||
817 | prev = first = NULL; | 841 | prev = first = NULL; |
818 | 842 | ||
819 | switch (direction) { | 843 | switch (direction) { |
@@ -828,7 +852,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
828 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 852 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
829 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 853 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
830 | 854 | ||
831 | data_width = dwc->dw->data_width[dwc_get_sms(dws)]; | 855 | data_width = dwc_get_data_width(chan, SRC_MASTER); |
832 | 856 | ||
833 | for_each_sg(sgl, sg, sg_len, i) { | 857 | for_each_sg(sgl, sg, sg_len, i) { |
834 | struct dw_desc *desc; | 858 | struct dw_desc *desc; |
@@ -861,15 +885,12 @@ slave_sg_todev_fill_desc: | |||
861 | } | 885 | } |
862 | 886 | ||
863 | desc->lli.ctlhi = dlen >> mem_width; | 887 | desc->lli.ctlhi = dlen >> mem_width; |
888 | desc->len = dlen; | ||
864 | 889 | ||
865 | if (!first) { | 890 | if (!first) { |
866 | first = desc; | 891 | first = desc; |
867 | } else { | 892 | } else { |
868 | prev->lli.llp = desc->txd.phys; | 893 | prev->lli.llp = desc->txd.phys; |
869 | dma_sync_single_for_device(chan2parent(chan), | ||
870 | prev->txd.phys, | ||
871 | sizeof(prev->lli), | ||
872 | DMA_TO_DEVICE); | ||
873 | list_add_tail(&desc->desc_node, | 894 | list_add_tail(&desc->desc_node, |
874 | &first->tx_list); | 895 | &first->tx_list); |
875 | } | 896 | } |
@@ -891,7 +912,7 @@ slave_sg_todev_fill_desc: | |||
891 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 912 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
892 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 913 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
893 | 914 | ||
894 | data_width = dwc->dw->data_width[dwc_get_dms(dws)]; | 915 | data_width = dwc_get_data_width(chan, DST_MASTER); |
895 | 916 | ||
896 | for_each_sg(sgl, sg, sg_len, i) { | 917 | for_each_sg(sgl, sg, sg_len, i) { |
897 | struct dw_desc *desc; | 918 | struct dw_desc *desc; |
@@ -923,15 +944,12 @@ slave_sg_fromdev_fill_desc: | |||
923 | len = 0; | 944 | len = 0; |
924 | } | 945 | } |
925 | desc->lli.ctlhi = dlen >> reg_width; | 946 | desc->lli.ctlhi = dlen >> reg_width; |
947 | desc->len = dlen; | ||
926 | 948 | ||
927 | if (!first) { | 949 | if (!first) { |
928 | first = desc; | 950 | first = desc; |
929 | } else { | 951 | } else { |
930 | prev->lli.llp = desc->txd.phys; | 952 | prev->lli.llp = desc->txd.phys; |
931 | dma_sync_single_for_device(chan2parent(chan), | ||
932 | prev->txd.phys, | ||
933 | sizeof(prev->lli), | ||
934 | DMA_TO_DEVICE); | ||
935 | list_add_tail(&desc->desc_node, | 953 | list_add_tail(&desc->desc_node, |
936 | &first->tx_list); | 954 | &first->tx_list); |
937 | } | 955 | } |
@@ -951,11 +969,7 @@ slave_sg_fromdev_fill_desc: | |||
951 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 969 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
952 | 970 | ||
953 | prev->lli.llp = 0; | 971 | prev->lli.llp = 0; |
954 | dma_sync_single_for_device(chan2parent(chan), | 972 | first->total_len = total_len; |
955 | prev->txd.phys, sizeof(prev->lli), | ||
956 | DMA_TO_DEVICE); | ||
957 | |||
958 | first->len = total_len; | ||
959 | 973 | ||
960 | return &first->txd; | 974 | return &first->txd; |
961 | 975 | ||
@@ -985,11 +999,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
985 | { | 999 | { |
986 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1000 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
987 | 1001 | ||
988 | /* Check if it is chan is configured for slave transfers */ | 1002 | /* Check if chan will be configured for slave transfers */ |
989 | if (!chan->private) | 1003 | if (!is_slave_direction(sconfig->direction)) |
990 | return -EINVAL; | 1004 | return -EINVAL; |
991 | 1005 | ||
992 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | 1006 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
1007 | dwc->direction = sconfig->direction; | ||
993 | 1008 | ||
994 | convert_burst(&dwc->dma_sconfig.src_maxburst); | 1009 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
995 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | 1010 | convert_burst(&dwc->dma_sconfig.dst_maxburst); |
@@ -997,6 +1012,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
997 | return 0; | 1012 | return 0; |
998 | } | 1013 | } |
999 | 1014 | ||
1015 | static inline void dwc_chan_pause(struct dw_dma_chan *dwc) | ||
1016 | { | ||
1017 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
1018 | |||
1019 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
1020 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | ||
1021 | cpu_relax(); | ||
1022 | |||
1023 | dwc->paused = true; | ||
1024 | } | ||
1025 | |||
1026 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | ||
1027 | { | ||
1028 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
1029 | |||
1030 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
1031 | |||
1032 | dwc->paused = false; | ||
1033 | } | ||
1034 | |||
1000 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1035 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1001 | unsigned long arg) | 1036 | unsigned long arg) |
1002 | { | 1037 | { |
@@ -1004,18 +1039,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1004 | struct dw_dma *dw = to_dw_dma(chan->device); | 1039 | struct dw_dma *dw = to_dw_dma(chan->device); |
1005 | struct dw_desc *desc, *_desc; | 1040 | struct dw_desc *desc, *_desc; |
1006 | unsigned long flags; | 1041 | unsigned long flags; |
1007 | u32 cfglo; | ||
1008 | LIST_HEAD(list); | 1042 | LIST_HEAD(list); |
1009 | 1043 | ||
1010 | if (cmd == DMA_PAUSE) { | 1044 | if (cmd == DMA_PAUSE) { |
1011 | spin_lock_irqsave(&dwc->lock, flags); | 1045 | spin_lock_irqsave(&dwc->lock, flags); |
1012 | 1046 | ||
1013 | cfglo = channel_readl(dwc, CFG_LO); | 1047 | dwc_chan_pause(dwc); |
1014 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
1015 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | ||
1016 | cpu_relax(); | ||
1017 | 1048 | ||
1018 | dwc->paused = true; | ||
1019 | spin_unlock_irqrestore(&dwc->lock, flags); | 1049 | spin_unlock_irqrestore(&dwc->lock, flags); |
1020 | } else if (cmd == DMA_RESUME) { | 1050 | } else if (cmd == DMA_RESUME) { |
1021 | if (!dwc->paused) | 1051 | if (!dwc->paused) |
@@ -1023,9 +1053,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1023 | 1053 | ||
1024 | spin_lock_irqsave(&dwc->lock, flags); | 1054 | spin_lock_irqsave(&dwc->lock, flags); |
1025 | 1055 | ||
1026 | cfglo = channel_readl(dwc, CFG_LO); | 1056 | dwc_chan_resume(dwc); |
1027 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
1028 | dwc->paused = false; | ||
1029 | 1057 | ||
1030 | spin_unlock_irqrestore(&dwc->lock, flags); | 1058 | spin_unlock_irqrestore(&dwc->lock, flags); |
1031 | } else if (cmd == DMA_TERMINATE_ALL) { | 1059 | } else if (cmd == DMA_TERMINATE_ALL) { |
@@ -1035,7 +1063,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1035 | 1063 | ||
1036 | dwc_chan_disable(dw, dwc); | 1064 | dwc_chan_disable(dw, dwc); |
1037 | 1065 | ||
1038 | dwc->paused = false; | 1066 | dwc_chan_resume(dwc); |
1039 | 1067 | ||
1040 | /* active_list entries will end up before queued entries */ | 1068 | /* active_list entries will end up before queued entries */ |
1041 | list_splice_init(&dwc->queue, &list); | 1069 | list_splice_init(&dwc->queue, &list); |
@@ -1055,6 +1083,21 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1055 | return 0; | 1083 | return 0; |
1056 | } | 1084 | } |
1057 | 1085 | ||
1086 | static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) | ||
1087 | { | ||
1088 | unsigned long flags; | ||
1089 | u32 residue; | ||
1090 | |||
1091 | spin_lock_irqsave(&dwc->lock, flags); | ||
1092 | |||
1093 | residue = dwc->residue; | ||
1094 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | ||
1095 | residue -= dwc_get_sent(dwc); | ||
1096 | |||
1097 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1098 | return residue; | ||
1099 | } | ||
1100 | |||
1058 | static enum dma_status | 1101 | static enum dma_status |
1059 | dwc_tx_status(struct dma_chan *chan, | 1102 | dwc_tx_status(struct dma_chan *chan, |
1060 | dma_cookie_t cookie, | 1103 | dma_cookie_t cookie, |
@@ -1071,7 +1114,7 @@ dwc_tx_status(struct dma_chan *chan, | |||
1071 | } | 1114 | } |
1072 | 1115 | ||
1073 | if (ret != DMA_SUCCESS) | 1116 | if (ret != DMA_SUCCESS) |
1074 | dma_set_residue(txstate, dwc_first_active(dwc)->len); | 1117 | dma_set_residue(txstate, dwc_get_residue(dwc)); |
1075 | 1118 | ||
1076 | if (dwc->paused) | 1119 | if (dwc->paused) |
1077 | return DMA_PAUSED; | 1120 | return DMA_PAUSED; |
@@ -1114,22 +1157,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1114 | spin_lock_irqsave(&dwc->lock, flags); | 1157 | spin_lock_irqsave(&dwc->lock, flags); |
1115 | i = dwc->descs_allocated; | 1158 | i = dwc->descs_allocated; |
1116 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | 1159 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { |
1160 | dma_addr_t phys; | ||
1161 | |||
1117 | spin_unlock_irqrestore(&dwc->lock, flags); | 1162 | spin_unlock_irqrestore(&dwc->lock, flags); |
1118 | 1163 | ||
1119 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 1164 | desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); |
1120 | if (!desc) { | 1165 | if (!desc) |
1121 | dev_info(chan2dev(chan), | 1166 | goto err_desc_alloc; |
1122 | "only allocated %d descriptors\n", i); | 1167 | |
1123 | spin_lock_irqsave(&dwc->lock, flags); | 1168 | memset(desc, 0, sizeof(struct dw_desc)); |
1124 | break; | ||
1125 | } | ||
1126 | 1169 | ||
1127 | INIT_LIST_HEAD(&desc->tx_list); | 1170 | INIT_LIST_HEAD(&desc->tx_list); |
1128 | dma_async_tx_descriptor_init(&desc->txd, chan); | 1171 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1129 | desc->txd.tx_submit = dwc_tx_submit; | 1172 | desc->txd.tx_submit = dwc_tx_submit; |
1130 | desc->txd.flags = DMA_CTRL_ACK; | 1173 | desc->txd.flags = DMA_CTRL_ACK; |
1131 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, | 1174 | desc->txd.phys = phys; |
1132 | sizeof(desc->lli), DMA_TO_DEVICE); | 1175 | |
1133 | dwc_desc_put(dwc, desc); | 1176 | dwc_desc_put(dwc, desc); |
1134 | 1177 | ||
1135 | spin_lock_irqsave(&dwc->lock, flags); | 1178 | spin_lock_irqsave(&dwc->lock, flags); |
@@ -1141,6 +1184,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1141 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | 1184 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
1142 | 1185 | ||
1143 | return i; | 1186 | return i; |
1187 | |||
1188 | err_desc_alloc: | ||
1189 | dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); | ||
1190 | |||
1191 | return i; | ||
1144 | } | 1192 | } |
1145 | 1193 | ||
1146 | static void dwc_free_chan_resources(struct dma_chan *chan) | 1194 | static void dwc_free_chan_resources(struct dma_chan *chan) |
@@ -1172,14 +1220,56 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1172 | 1220 | ||
1173 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 1221 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1174 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 1222 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1175 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | 1223 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); |
1176 | sizeof(desc->lli), DMA_TO_DEVICE); | ||
1177 | kfree(desc); | ||
1178 | } | 1224 | } |
1179 | 1225 | ||
1180 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1226 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1181 | } | 1227 | } |
1182 | 1228 | ||
1229 | bool dw_dma_generic_filter(struct dma_chan *chan, void *param) | ||
1230 | { | ||
1231 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
1232 | static struct dw_dma *last_dw; | ||
1233 | static char *last_bus_id; | ||
1234 | int i = -1; | ||
1235 | |||
1236 | /* | ||
1237 | * dmaengine framework calls this routine for all channels of all dma | ||
1238 | * controller, until true is returned. If 'param' bus_id is not | ||
1239 | * registered with a dma controller (dw), then there is no need of | ||
1240 | * running below function for all channels of dw. | ||
1241 | * | ||
1242 | * This block of code does this by saving the parameters of last | ||
1243 | * failure. If dw and param are same, i.e. trying on same dw with | ||
1244 | * different channel, return false. | ||
1245 | */ | ||
1246 | if ((last_dw == dw) && (last_bus_id == param)) | ||
1247 | return false; | ||
1248 | /* | ||
1249 | * Return true: | ||
1250 | * - If dw_dma's platform data is not filled with slave info, then all | ||
1251 | * dma controllers are fine for transfer. | ||
1252 | * - Or if param is NULL | ||
1253 | */ | ||
1254 | if (!dw->sd || !param) | ||
1255 | return true; | ||
1256 | |||
1257 | while (++i < dw->sd_count) { | ||
1258 | if (!strcmp(dw->sd[i].bus_id, param)) { | ||
1259 | chan->private = &dw->sd[i]; | ||
1260 | last_dw = NULL; | ||
1261 | last_bus_id = NULL; | ||
1262 | |||
1263 | return true; | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | last_dw = dw; | ||
1268 | last_bus_id = param; | ||
1269 | return false; | ||
1270 | } | ||
1271 | EXPORT_SYMBOL(dw_dma_generic_filter); | ||
1272 | |||
1183 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 1273 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1184 | 1274 | ||
1185 | /** | 1275 | /** |
@@ -1299,6 +1389,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1299 | 1389 | ||
1300 | retval = ERR_PTR(-EINVAL); | 1390 | retval = ERR_PTR(-EINVAL); |
1301 | 1391 | ||
1392 | if (unlikely(!is_slave_direction(direction))) | ||
1393 | goto out_err; | ||
1394 | |||
1395 | dwc->direction = direction; | ||
1396 | |||
1302 | if (direction == DMA_MEM_TO_DEV) | 1397 | if (direction == DMA_MEM_TO_DEV) |
1303 | reg_width = __ffs(sconfig->dst_addr_width); | 1398 | reg_width = __ffs(sconfig->dst_addr_width); |
1304 | else | 1399 | else |
@@ -1313,8 +1408,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1313 | goto out_err; | 1408 | goto out_err; |
1314 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 1409 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1315 | goto out_err; | 1410 | goto out_err; |
1316 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) | ||
1317 | goto out_err; | ||
1318 | 1411 | ||
1319 | retval = ERR_PTR(-ENOMEM); | 1412 | retval = ERR_PTR(-ENOMEM); |
1320 | 1413 | ||
@@ -1372,20 +1465,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1372 | desc->lli.ctlhi = (period_len >> reg_width); | 1465 | desc->lli.ctlhi = (period_len >> reg_width); |
1373 | cdesc->desc[i] = desc; | 1466 | cdesc->desc[i] = desc; |
1374 | 1467 | ||
1375 | if (last) { | 1468 | if (last) |
1376 | last->lli.llp = desc->txd.phys; | 1469 | last->lli.llp = desc->txd.phys; |
1377 | dma_sync_single_for_device(chan2parent(chan), | ||
1378 | last->txd.phys, sizeof(last->lli), | ||
1379 | DMA_TO_DEVICE); | ||
1380 | } | ||
1381 | 1470 | ||
1382 | last = desc; | 1471 | last = desc; |
1383 | } | 1472 | } |
1384 | 1473 | ||
1385 | /* lets make a cyclic list */ | 1474 | /* lets make a cyclic list */ |
1386 | last->lli.llp = cdesc->desc[0]->txd.phys; | 1475 | last->lli.llp = cdesc->desc[0]->txd.phys; |
1387 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | ||
1388 | sizeof(last->lli), DMA_TO_DEVICE); | ||
1389 | 1476 | ||
1390 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " | 1477 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1391 | "period %zu periods %d\n", (unsigned long long)buf_addr, | 1478 | "period %zu periods %d\n", (unsigned long long)buf_addr, |
@@ -1463,6 +1550,91 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1463 | dw->chan[i].initialized = false; | 1550 | dw->chan[i].initialized = false; |
1464 | } | 1551 | } |
1465 | 1552 | ||
1553 | #ifdef CONFIG_OF | ||
1554 | static struct dw_dma_platform_data * | ||
1555 | dw_dma_parse_dt(struct platform_device *pdev) | ||
1556 | { | ||
1557 | struct device_node *sn, *cn, *np = pdev->dev.of_node; | ||
1558 | struct dw_dma_platform_data *pdata; | ||
1559 | struct dw_dma_slave *sd; | ||
1560 | u32 tmp, arr[4]; | ||
1561 | |||
1562 | if (!np) { | ||
1563 | dev_err(&pdev->dev, "Missing DT data\n"); | ||
1564 | return NULL; | ||
1565 | } | ||
1566 | |||
1567 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
1568 | if (!pdata) | ||
1569 | return NULL; | ||
1570 | |||
1571 | if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels)) | ||
1572 | return NULL; | ||
1573 | |||
1574 | if (of_property_read_bool(np, "is_private")) | ||
1575 | pdata->is_private = true; | ||
1576 | |||
1577 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) | ||
1578 | pdata->chan_allocation_order = (unsigned char)tmp; | ||
1579 | |||
1580 | if (!of_property_read_u32(np, "chan_priority", &tmp)) | ||
1581 | pdata->chan_priority = tmp; | ||
1582 | |||
1583 | if (!of_property_read_u32(np, "block_size", &tmp)) | ||
1584 | pdata->block_size = tmp; | ||
1585 | |||
1586 | if (!of_property_read_u32(np, "nr_masters", &tmp)) { | ||
1587 | if (tmp > 4) | ||
1588 | return NULL; | ||
1589 | |||
1590 | pdata->nr_masters = tmp; | ||
1591 | } | ||
1592 | |||
1593 | if (!of_property_read_u32_array(np, "data_width", arr, | ||
1594 | pdata->nr_masters)) | ||
1595 | for (tmp = 0; tmp < pdata->nr_masters; tmp++) | ||
1596 | pdata->data_width[tmp] = arr[tmp]; | ||
1597 | |||
1598 | /* parse slave data */ | ||
1599 | sn = of_find_node_by_name(np, "slave_info"); | ||
1600 | if (!sn) | ||
1601 | return pdata; | ||
1602 | |||
1603 | /* calculate number of slaves */ | ||
1604 | tmp = of_get_child_count(sn); | ||
1605 | if (!tmp) | ||
1606 | return NULL; | ||
1607 | |||
1608 | sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL); | ||
1609 | if (!sd) | ||
1610 | return NULL; | ||
1611 | |||
1612 | pdata->sd = sd; | ||
1613 | pdata->sd_count = tmp; | ||
1614 | |||
1615 | for_each_child_of_node(sn, cn) { | ||
1616 | sd->dma_dev = &pdev->dev; | ||
1617 | of_property_read_string(cn, "bus_id", &sd->bus_id); | ||
1618 | of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi); | ||
1619 | of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo); | ||
1620 | if (!of_property_read_u32(cn, "src_master", &tmp)) | ||
1621 | sd->src_master = tmp; | ||
1622 | |||
1623 | if (!of_property_read_u32(cn, "dst_master", &tmp)) | ||
1624 | sd->dst_master = tmp; | ||
1625 | sd++; | ||
1626 | } | ||
1627 | |||
1628 | return pdata; | ||
1629 | } | ||
1630 | #else | ||
1631 | static inline struct dw_dma_platform_data * | ||
1632 | dw_dma_parse_dt(struct platform_device *pdev) | ||
1633 | { | ||
1634 | return NULL; | ||
1635 | } | ||
1636 | #endif | ||
1637 | |||
1466 | static int dw_probe(struct platform_device *pdev) | 1638 | static int dw_probe(struct platform_device *pdev) |
1467 | { | 1639 | { |
1468 | struct dw_dma_platform_data *pdata; | 1640 | struct dw_dma_platform_data *pdata; |
@@ -1478,10 +1650,6 @@ static int dw_probe(struct platform_device *pdev) | |||
1478 | int err; | 1650 | int err; |
1479 | int i; | 1651 | int i; |
1480 | 1652 | ||
1481 | pdata = dev_get_platdata(&pdev->dev); | ||
1482 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | ||
1483 | return -EINVAL; | ||
1484 | |||
1485 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1653 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1486 | if (!io) | 1654 | if (!io) |
1487 | return -EINVAL; | 1655 | return -EINVAL; |
@@ -1494,9 +1662,33 @@ static int dw_probe(struct platform_device *pdev) | |||
1494 | if (IS_ERR(regs)) | 1662 | if (IS_ERR(regs)) |
1495 | return PTR_ERR(regs); | 1663 | return PTR_ERR(regs); |
1496 | 1664 | ||
1665 | /* Apply default dma_mask if needed */ | ||
1666 | if (!pdev->dev.dma_mask) { | ||
1667 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | ||
1668 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
1669 | } | ||
1670 | |||
1497 | dw_params = dma_read_byaddr(regs, DW_PARAMS); | 1671 | dw_params = dma_read_byaddr(regs, DW_PARAMS); |
1498 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | 1672 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; |
1499 | 1673 | ||
1674 | dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params); | ||
1675 | |||
1676 | pdata = dev_get_platdata(&pdev->dev); | ||
1677 | if (!pdata) | ||
1678 | pdata = dw_dma_parse_dt(pdev); | ||
1679 | |||
1680 | if (!pdata && autocfg) { | ||
1681 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
1682 | if (!pdata) | ||
1683 | return -ENOMEM; | ||
1684 | |||
1685 | /* Fill platform data with the default values */ | ||
1686 | pdata->is_private = true; | ||
1687 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | ||
1688 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | ||
1689 | } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | ||
1690 | return -EINVAL; | ||
1691 | |||
1500 | if (autocfg) | 1692 | if (autocfg) |
1501 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | 1693 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; |
1502 | else | 1694 | else |
@@ -1513,6 +1705,8 @@ static int dw_probe(struct platform_device *pdev) | |||
1513 | clk_prepare_enable(dw->clk); | 1705 | clk_prepare_enable(dw->clk); |
1514 | 1706 | ||
1515 | dw->regs = regs; | 1707 | dw->regs = regs; |
1708 | dw->sd = pdata->sd; | ||
1709 | dw->sd_count = pdata->sd_count; | ||
1516 | 1710 | ||
1517 | /* get hardware configuration parameters */ | 1711 | /* get hardware configuration parameters */ |
1518 | if (autocfg) { | 1712 | if (autocfg) { |
@@ -1544,6 +1738,14 @@ static int dw_probe(struct platform_device *pdev) | |||
1544 | 1738 | ||
1545 | platform_set_drvdata(pdev, dw); | 1739 | platform_set_drvdata(pdev, dw); |
1546 | 1740 | ||
1741 | /* create a pool of consistent memory blocks for hardware descriptors */ | ||
1742 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, | ||
1743 | sizeof(struct dw_desc), 4, 0); | ||
1744 | if (!dw->desc_pool) { | ||
1745 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); | ||
1746 | return -ENOMEM; | ||
1747 | } | ||
1748 | |||
1547 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1749 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1548 | 1750 | ||
1549 | INIT_LIST_HEAD(&dw->dma.channels); | 1751 | INIT_LIST_HEAD(&dw->dma.channels); |
@@ -1575,7 +1777,7 @@ static int dw_probe(struct platform_device *pdev) | |||
1575 | 1777 | ||
1576 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1778 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1577 | 1779 | ||
1578 | dwc->dw = dw; | 1780 | dwc->direction = DMA_TRANS_NONE; |
1579 | 1781 | ||
1580 | /* hardware configuration */ | 1782 | /* hardware configuration */ |
1581 | if (autocfg) { | 1783 | if (autocfg) { |
@@ -1584,6 +1786,9 @@ static int dw_probe(struct platform_device *pdev) | |||
1584 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), | 1786 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), |
1585 | DWC_PARAMS); | 1787 | DWC_PARAMS); |
1586 | 1788 | ||
1789 | dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, | ||
1790 | dwc_params); | ||
1791 | |||
1587 | /* Decode maximum block size for given channel. The | 1792 | /* Decode maximum block size for given channel. The |
1588 | * stored 4 bit value represents blocks from 0x00 for 3 | 1793 | * stored 4 bit value represents blocks from 0x00 for 3 |
1589 | * up to 0x0a for 4095. */ | 1794 | * up to 0x0a for 4095. */ |
@@ -1627,8 +1832,8 @@ static int dw_probe(struct platform_device *pdev) | |||
1627 | 1832 | ||
1628 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1833 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1629 | 1834 | ||
1630 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | 1835 | dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", |
1631 | dev_name(&pdev->dev), nr_channels); | 1836 | nr_channels); |
1632 | 1837 | ||
1633 | dma_async_device_register(&dw->dma); | 1838 | dma_async_device_register(&dw->dma); |
1634 | 1839 | ||
@@ -1658,7 +1863,7 @@ static void dw_shutdown(struct platform_device *pdev) | |||
1658 | { | 1863 | { |
1659 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1864 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1660 | 1865 | ||
1661 | dw_dma_off(platform_get_drvdata(pdev)); | 1866 | dw_dma_off(dw); |
1662 | clk_disable_unprepare(dw->clk); | 1867 | clk_disable_unprepare(dw->clk); |
1663 | } | 1868 | } |
1664 | 1869 | ||
@@ -1667,7 +1872,7 @@ static int dw_suspend_noirq(struct device *dev) | |||
1667 | struct platform_device *pdev = to_platform_device(dev); | 1872 | struct platform_device *pdev = to_platform_device(dev); |
1668 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1873 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1669 | 1874 | ||
1670 | dw_dma_off(platform_get_drvdata(pdev)); | 1875 | dw_dma_off(dw); |
1671 | clk_disable_unprepare(dw->clk); | 1876 | clk_disable_unprepare(dw->clk); |
1672 | 1877 | ||
1673 | return 0; | 1878 | return 0; |
@@ -1680,6 +1885,7 @@ static int dw_resume_noirq(struct device *dev) | |||
1680 | 1885 | ||
1681 | clk_prepare_enable(dw->clk); | 1886 | clk_prepare_enable(dw->clk); |
1682 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1887 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1888 | |||
1683 | return 0; | 1889 | return 0; |
1684 | } | 1890 | } |
1685 | 1891 | ||
@@ -1700,7 +1906,13 @@ static const struct of_device_id dw_dma_id_table[] = { | |||
1700 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); | 1906 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); |
1701 | #endif | 1907 | #endif |
1702 | 1908 | ||
1909 | static const struct platform_device_id dw_dma_ids[] = { | ||
1910 | { "INTL9C60", 0 }, | ||
1911 | { } | ||
1912 | }; | ||
1913 | |||
1703 | static struct platform_driver dw_driver = { | 1914 | static struct platform_driver dw_driver = { |
1915 | .probe = dw_probe, | ||
1704 | .remove = dw_remove, | 1916 | .remove = dw_remove, |
1705 | .shutdown = dw_shutdown, | 1917 | .shutdown = dw_shutdown, |
1706 | .driver = { | 1918 | .driver = { |
@@ -1708,11 +1920,12 @@ static struct platform_driver dw_driver = { | |||
1708 | .pm = &dw_dev_pm_ops, | 1920 | .pm = &dw_dev_pm_ops, |
1709 | .of_match_table = of_match_ptr(dw_dma_id_table), | 1921 | .of_match_table = of_match_ptr(dw_dma_id_table), |
1710 | }, | 1922 | }, |
1923 | .id_table = dw_dma_ids, | ||
1711 | }; | 1924 | }; |
1712 | 1925 | ||
1713 | static int __init dw_init(void) | 1926 | static int __init dw_init(void) |
1714 | { | 1927 | { |
1715 | return platform_driver_probe(&dw_driver, dw_probe); | 1928 | return platform_driver_register(&dw_driver); |
1716 | } | 1929 | } |
1717 | subsys_initcall(dw_init); | 1930 | subsys_initcall(dw_init); |
1718 | 1931 | ||