diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:35:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:35:15 -0400 |
commit | 4cb865deec59ef31d966622d1ec87411ae32dfab (patch) | |
tree | e060d515f62e4f334aded38c9079485d50166693 | |
parent | 55f08e1baa3ef11c952b626dbc7ef9e3e8332a63 (diff) | |
parent | 19d78a61be6dd707dcec298c486303d4ba2c840a (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (33 commits)
x86: poll waiting for I/OAT DMA channel status
maintainers: add dma engine tree details
dmaengine: add TODO items for future work on dma drivers
dmaengine: Add API documentation for slave dma usage
dmaengine/dw_dmac: Update maintainer-ship
dmaengine: move link order
dmaengine/dw_dmac: implement pause and resume in dwc_control
dmaengine/dw_dmac: Replace spin_lock* with irqsave variants and enable submission from callback
dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater than DWC_MAX_COUNT
dmaengine/dw_dmac: set residue as total len in dwc_tx_status if status is !DMA_SUCCESS
dmaengine/dw_dmac: don't call callback routine in case dmaengine_terminate_all() is called
dmaengine: at_hdmac: pause: no need to wait for FIFO empty
pch_dma: modify pci device table definition
pch_dma: Support new device ML7223 IOH
pch_dma: Support I2S for ML7213 IOH
pch_dma: Fix DMA setting issue
pch_dma: modify for checkpatch
pch_dma: fix dma direction issue for ML7213 IOH video-in
dmaengine: at_hdmac: use descriptor chaining help function
dmaengine: at_hdmac: implement pause and resume in atc_control
...
Fix up trivial conflict in drivers/dma/dw_dmac.c
-rw-r--r-- | Documentation/dmaengine.txt | 97 | ||||
-rw-r--r-- | MAINTAINERS | 9 | ||||
-rw-r--r-- | drivers/Makefile | 4 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 12 | ||||
-rw-r--r-- | drivers/dma/TODO | 14 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 376 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 30 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 2 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 272 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 2 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 17 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 8 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 6 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 6 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 96 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 8 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 4 | ||||
-rw-r--r-- | include/linux/dw_dmac.h | 1 |
18 files changed, 727 insertions, 237 deletions
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt index 0c1c2f63c0a9..5a0cb1ef6164 100644 --- a/Documentation/dmaengine.txt +++ b/Documentation/dmaengine.txt | |||
@@ -1 +1,96 @@ | |||
1 | See Documentation/crypto/async-tx-api.txt | 1 | DMA Engine API Guide |
2 | ==================== | ||
3 | |||
4 | Vinod Koul <vinod dot koul at intel.com> | ||
5 | |||
6 | NOTE: For DMA Engine usage in async_tx please see: | ||
7 | Documentation/crypto/async-tx-api.txt | ||
8 | |||
9 | |||
10 | Below is a guide to device driver writers on how to use the Slave-DMA API of the | ||
11 | DMA Engine. This is applicable only for slave DMA usage only. | ||
12 | |||
13 | The slave DMA usage consists of following steps | ||
14 | 1. Allocate a DMA slave channel | ||
15 | 2. Set slave and controller specific parameters | ||
16 | 3. Get a descriptor for transaction | ||
17 | 4. Submit the transaction and wait for callback notification | ||
18 | |||
19 | 1. Allocate a DMA slave channel | ||
20 | Channel allocation is slightly different in the slave DMA context, client | ||
21 | drivers typically need a channel from a particular DMA controller only and even | ||
22 | in some cases a specific channel is desired. To request a channel | ||
23 | dma_request_channel() API is used. | ||
24 | |||
25 | Interface: | ||
26 | struct dma_chan *dma_request_channel(dma_cap_mask_t mask, | ||
27 | dma_filter_fn filter_fn, | ||
28 | void *filter_param); | ||
29 | where dma_filter_fn is defined as: | ||
30 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | ||
31 | |||
32 | When the optional 'filter_fn' parameter is set to NULL dma_request_channel | ||
33 | simply returns the first channel that satisfies the capability mask. Otherwise, | ||
34 | when the mask parameter is insufficient for specifying the necessary channel, | ||
35 | the filter_fn routine can be used to disposition the available channels in the | ||
36 | system. The filter_fn routine is called once for each free channel in the | ||
37 | system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags | ||
38 | that channel to be the return value from dma_request_channel. A channel | ||
39 | allocated via this interface is exclusive to the caller, until | ||
40 | dma_release_channel() is called. | ||
41 | |||
42 | 2. Set slave and controller specific parameters | ||
43 | Next step is always to pass some specific information to the DMA driver. Most of | ||
44 | the generic information which a slave DMA can use is in struct dma_slave_config. | ||
45 | It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA | ||
46 | burst lengths etc. If some DMA controllers have more parameters to be sent then | ||
47 | they should try to embed struct dma_slave_config in their controller specific | ||
48 | structure. That gives flexibility to client to pass more parameters, if | ||
49 | required. | ||
50 | |||
51 | Interface: | ||
52 | int dmaengine_slave_config(struct dma_chan *chan, | ||
53 | struct dma_slave_config *config) | ||
54 | |||
55 | 3. Get a descriptor for transaction | ||
56 | For slave usage the various modes of slave transfers supported by the | ||
57 | DMA-engine are: | ||
58 | slave_sg - DMA a list of scatter gather buffers from/to a peripheral | ||
59 | dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the | ||
60 | operation is explicitly stopped. | ||
61 | The non NULL return of this transfer API represents a "descriptor" for the given | ||
62 | transaction. | ||
63 | |||
64 | Interface: | ||
65 | struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)( | ||
66 | struct dma_chan *chan, | ||
67 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
68 | struct scatterlist *src_sg, unsigned int src_nents, | ||
69 | unsigned long flags); | ||
70 | struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)( | ||
71 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
72 | size_t period_len, enum dma_data_direction direction); | ||
73 | |||
74 | 4. Submit the transaction and wait for callback notification | ||
75 | To schedule the transaction to be scheduled by dma device, the "descriptor" | ||
76 | returned in above (3) needs to be submitted. | ||
77 | To tell the dma driver that a transaction is ready to be serviced, the | ||
78 | descriptor->submit() callback needs to be invoked. This chains the descriptor to | ||
79 | the pending queue. | ||
80 | The transactions in the pending queue can be activated by calling the | ||
81 | issue_pending API. If channel is idle then the first transaction in queue is | ||
82 | started and subsequent ones queued up. | ||
83 | On completion of the DMA operation the next in queue is submitted and a tasklet | ||
84 | triggered. The tasklet would then call the client driver completion callback | ||
85 | routine for notification, if set. | ||
86 | Interface: | ||
87 | void dma_async_issue_pending(struct dma_chan *chan); | ||
88 | |||
89 | ============================================================================== | ||
90 | |||
91 | Additional usage notes for dma driver writers | ||
92 | 1/ Although DMA engine specifies that completion callback routines cannot submit | ||
93 | any new operations, but typically for slave DMA subsequent transaction may not | ||
94 | be available for submit prior to callback routine being called. This requirement | ||
95 | is not a requirement for DMA-slave devices. But they should take care to drop | ||
96 | the spin-lock they might be holding before calling the callback routine | ||
diff --git a/MAINTAINERS b/MAINTAINERS index b9f5aee36375..2e94220898d7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2178,6 +2178,8 @@ M: Dan Williams <dan.j.williams@intel.com> | |||
2178 | S: Supported | 2178 | S: Supported |
2179 | F: drivers/dma/ | 2179 | F: drivers/dma/ |
2180 | F: include/linux/dma* | 2180 | F: include/linux/dma* |
2181 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git | ||
2182 | T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma) | ||
2181 | 2183 | ||
2182 | DME1737 HARDWARE MONITOR DRIVER | 2184 | DME1737 HARDWARE MONITOR DRIVER |
2183 | M: Juerg Haefliger <juergh@gmail.com> | 2185 | M: Juerg Haefliger <juergh@gmail.com> |
@@ -5451,6 +5453,13 @@ L: linux-serial@vger.kernel.org | |||
5451 | S: Maintained | 5453 | S: Maintained |
5452 | F: drivers/tty/serial | 5454 | F: drivers/tty/serial |
5453 | 5455 | ||
5456 | SYNOPSYS DESIGNWARE DMAC DRIVER | ||
5457 | M: Viresh Kumar <viresh.kumar@st.com> | ||
5458 | S: Maintained | ||
5459 | F: include/linux/dw_dmac.h | ||
5460 | F: drivers/dma/dw_dmac_regs.h | ||
5461 | F: drivers/dma/dw_dmac.c | ||
5462 | |||
5454 | TIMEKEEPING, NTP | 5463 | TIMEKEEPING, NTP |
5455 | M: John Stultz <johnstul@us.ibm.com> | 5464 | M: John Stultz <johnstul@us.ibm.com> |
5456 | M: Thomas Gleixner <tglx@linutronix.de> | 5465 | M: Thomas Gleixner <tglx@linutronix.de> |
diff --git a/drivers/Makefile b/drivers/Makefile index 6b17f5864340..09f3232bcdcd 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/ | |||
17 | # was used and do nothing if so | 17 | # was used and do nothing if so |
18 | obj-$(CONFIG_PNP) += pnp/ | 18 | obj-$(CONFIG_PNP) += pnp/ |
19 | obj-$(CONFIG_ARM_AMBA) += amba/ | 19 | obj-$(CONFIG_ARM_AMBA) += amba/ |
20 | # Many drivers will want to use DMA so this has to be made available | ||
21 | # really early. | ||
22 | obj-$(CONFIG_DMA_ENGINE) += dma/ | ||
20 | 23 | ||
21 | obj-$(CONFIG_VIRTIO) += virtio/ | 24 | obj-$(CONFIG_VIRTIO) += virtio/ |
22 | obj-$(CONFIG_XEN) += xen/ | 25 | obj-$(CONFIG_XEN) += xen/ |
@@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/ | |||
92 | obj-y += lguest/ | 95 | obj-y += lguest/ |
93 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 96 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
94 | obj-$(CONFIG_CPU_IDLE) += cpuidle/ | 97 | obj-$(CONFIG_CPU_IDLE) += cpuidle/ |
95 | obj-$(CONFIG_DMA_ENGINE) += dma/ | ||
96 | obj-$(CONFIG_MMC) += mmc/ | 98 | obj-$(CONFIG_MMC) += mmc/ |
97 | obj-$(CONFIG_MEMSTICK) += memstick/ | 99 | obj-$(CONFIG_MEMSTICK) += memstick/ |
98 | obj-y += leds/ | 100 | obj-y += leds/ |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a572600e44eb..25cf327cd1cb 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -200,16 +200,18 @@ config PL330_DMA | |||
200 | platform_data for a dma-pl330 device. | 200 | platform_data for a dma-pl330 device. |
201 | 201 | ||
202 | config PCH_DMA | 202 | config PCH_DMA |
203 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support" | 203 | tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" |
204 | depends on PCI && X86 | 204 | depends on PCI && X86 |
205 | select DMA_ENGINE | 205 | select DMA_ENGINE |
206 | help | 206 | help |
207 | Enable support for Intel EG20T PCH DMA engine. | 207 | Enable support for Intel EG20T PCH DMA engine. |
208 | 208 | ||
209 | This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/ | 209 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ |
210 | Output Hub) which is for IVI(In-Vehicle Infotainment) use. | 210 | Output Hub), ML7213 and ML7223. |
211 | ML7213 is companion chip for Intel Atom E6xx series. | 211 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is |
212 | ML7213 is completely compatible for Intel EG20T PCH. | 212 | for MP(Media Phone) use. |
213 | ML7213/ML7223 is companion chip for Intel Atom E6xx series. | ||
214 | ML7213/ML7223 is completely compatible for Intel EG20T PCH. | ||
213 | 215 | ||
214 | config IMX_SDMA | 216 | config IMX_SDMA |
215 | tristate "i.MX SDMA support" | 217 | tristate "i.MX SDMA support" |
diff --git a/drivers/dma/TODO b/drivers/dma/TODO new file mode 100644 index 000000000000..a4af8589330c --- /dev/null +++ b/drivers/dma/TODO | |||
@@ -0,0 +1,14 @@ | |||
1 | TODO for slave dma | ||
2 | |||
3 | 1. Move remaining drivers to use new slave interface | ||
4 | 2. Remove old slave pointer machansim | ||
5 | 3. Make issue_pending to start the transaction in below drivers | ||
6 | - mpc512x_dma | ||
7 | - imx-dma | ||
8 | - imx-sdma | ||
9 | - mxs-dma.c | ||
10 | - dw_dmac | ||
11 | - intel_mid_dma | ||
12 | - ste_dma40 | ||
13 | 4. Check other subsystems for dma drivers and merge/move to dmaengine | ||
14 | 5. Remove dma_slave_config's dma direction. | ||
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 235f53bf494e..36144f88d718 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) | 38 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) |
39 | #define ATC_DEFAULT_CTRLA (0) | 39 | #define ATC_DEFAULT_CTRLA (0) |
40 | #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ | 40 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |
41 | |ATC_DIF(1)) | 41 | |ATC_DIF(AT_DMA_MEM_IF)) |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Initial number of descriptors to allocate for each channel. This could | 44 | * Initial number of descriptors to allocate for each channel. This could |
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
165 | } | 165 | } |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * atc_desc_chain - build chain adding a descripor | ||
169 | * @first: address of first descripor of the chain | ||
170 | * @prev: address of previous descripor of the chain | ||
171 | * @desc: descriptor to queue | ||
172 | * | ||
173 | * Called from prep_* functions | ||
174 | */ | ||
175 | static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, | ||
176 | struct at_desc *desc) | ||
177 | { | ||
178 | if (!(*first)) { | ||
179 | *first = desc; | ||
180 | } else { | ||
181 | /* inform the HW lli about chaining */ | ||
182 | (*prev)->lli.dscr = desc->txd.phys; | ||
183 | /* insert the link descriptor to the LD ring */ | ||
184 | list_add_tail(&desc->desc_node, | ||
185 | &(*first)->tx_list); | ||
186 | } | ||
187 | *prev = desc; | ||
188 | } | ||
189 | |||
190 | /** | ||
168 | * atc_assign_cookie - compute and assign new cookie | 191 | * atc_assign_cookie - compute and assign new cookie |
169 | * @atchan: channel we work on | 192 | * @atchan: channel we work on |
170 | * @desc: descriptor to assign cookie for | 193 | * @desc: descriptor to assign cookie for |
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
237 | static void | 260 | static void |
238 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | 261 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) |
239 | { | 262 | { |
240 | dma_async_tx_callback callback; | ||
241 | void *param; | ||
242 | struct dma_async_tx_descriptor *txd = &desc->txd; | 263 | struct dma_async_tx_descriptor *txd = &desc->txd; |
243 | 264 | ||
244 | dev_vdbg(chan2dev(&atchan->chan_common), | 265 | dev_vdbg(chan2dev(&atchan->chan_common), |
245 | "descriptor %u complete\n", txd->cookie); | 266 | "descriptor %u complete\n", txd->cookie); |
246 | 267 | ||
247 | atchan->completed_cookie = txd->cookie; | 268 | atchan->completed_cookie = txd->cookie; |
248 | callback = txd->callback; | ||
249 | param = txd->callback_param; | ||
250 | 269 | ||
251 | /* move children to free_list */ | 270 | /* move children to free_list */ |
252 | list_splice_init(&desc->tx_list, &atchan->free_list); | 271 | list_splice_init(&desc->tx_list, &atchan->free_list); |
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
278 | } | 297 | } |
279 | } | 298 | } |
280 | 299 | ||
281 | /* | 300 | /* for cyclic transfers, |
282 | * The API requires that no submissions are done from a | 301 | * no need to replay callback function while stopping */ |
283 | * callback, so we don't need to drop the lock here | 302 | if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { |
284 | */ | 303 | dma_async_tx_callback callback = txd->callback; |
285 | if (callback) | 304 | void *param = txd->callback_param; |
286 | callback(param); | 305 | |
306 | /* | ||
307 | * The API requires that no submissions are done from a | ||
308 | * callback, so we don't need to drop the lock here | ||
309 | */ | ||
310 | if (callback) | ||
311 | callback(param); | ||
312 | } | ||
287 | 313 | ||
288 | dma_run_dependencies(txd); | 314 | dma_run_dependencies(txd); |
289 | } | 315 | } |
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan) | |||
419 | atc_chain_complete(atchan, bad_desc); | 445 | atc_chain_complete(atchan, bad_desc); |
420 | } | 446 | } |
421 | 447 | ||
448 | /** | ||
449 | * atc_handle_cyclic - at the end of a period, run callback function | ||
450 | * @atchan: channel used for cyclic operations | ||
451 | * | ||
452 | * Called with atchan->lock held and bh disabled | ||
453 | */ | ||
454 | static void atc_handle_cyclic(struct at_dma_chan *atchan) | ||
455 | { | ||
456 | struct at_desc *first = atc_first_active(atchan); | ||
457 | struct dma_async_tx_descriptor *txd = &first->txd; | ||
458 | dma_async_tx_callback callback = txd->callback; | ||
459 | void *param = txd->callback_param; | ||
460 | |||
461 | dev_vdbg(chan2dev(&atchan->chan_common), | ||
462 | "new cyclic period llp 0x%08x\n", | ||
463 | channel_readl(atchan, DSCR)); | ||
464 | |||
465 | if (callback) | ||
466 | callback(param); | ||
467 | } | ||
422 | 468 | ||
423 | /*-- IRQ & Tasklet ---------------------------------------------------*/ | 469 | /*-- IRQ & Tasklet ---------------------------------------------------*/ |
424 | 470 | ||
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data) | |||
426 | { | 472 | { |
427 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; | 473 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; |
428 | 474 | ||
429 | /* Channel cannot be enabled here */ | ||
430 | if (atc_chan_is_enabled(atchan)) { | ||
431 | dev_err(chan2dev(&atchan->chan_common), | ||
432 | "BUG: channel enabled in tasklet\n"); | ||
433 | return; | ||
434 | } | ||
435 | |||
436 | spin_lock(&atchan->lock); | 475 | spin_lock(&atchan->lock); |
437 | if (test_and_clear_bit(0, &atchan->error_status)) | 476 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) |
438 | atc_handle_error(atchan); | 477 | atc_handle_error(atchan); |
478 | else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | ||
479 | atc_handle_cyclic(atchan); | ||
439 | else | 480 | else |
440 | atc_advance_work(atchan); | 481 | atc_advance_work(atchan); |
441 | 482 | ||
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |||
464 | 505 | ||
465 | for (i = 0; i < atdma->dma_common.chancnt; i++) { | 506 | for (i = 0; i < atdma->dma_common.chancnt; i++) { |
466 | atchan = &atdma->chan[i]; | 507 | atchan = &atdma->chan[i]; |
467 | if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { | 508 | if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { |
468 | if (pending & AT_DMA_ERR(i)) { | 509 | if (pending & AT_DMA_ERR(i)) { |
469 | /* Disable channel on AHB error */ | 510 | /* Disable channel on AHB error */ |
470 | dma_writel(atdma, CHDR, atchan->mask); | 511 | dma_writel(atdma, CHDR, |
512 | AT_DMA_RES(i) | atchan->mask); | ||
471 | /* Give information to tasklet */ | 513 | /* Give information to tasklet */ |
472 | set_bit(0, &atchan->error_status); | 514 | set_bit(ATC_IS_ERROR, &atchan->status); |
473 | } | 515 | } |
474 | tasklet_schedule(&atchan->tasklet); | 516 | tasklet_schedule(&atchan->tasklet); |
475 | ret = IRQ_HANDLED; | 517 | ret = IRQ_HANDLED; |
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
549 | } | 591 | } |
550 | 592 | ||
551 | ctrla = ATC_DEFAULT_CTRLA; | 593 | ctrla = ATC_DEFAULT_CTRLA; |
552 | ctrlb = ATC_DEFAULT_CTRLB | 594 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
553 | | ATC_SRC_ADDR_MODE_INCR | 595 | | ATC_SRC_ADDR_MODE_INCR |
554 | | ATC_DST_ADDR_MODE_INCR | 596 | | ATC_DST_ADDR_MODE_INCR |
555 | | ATC_FC_MEM2MEM; | 597 | | ATC_FC_MEM2MEM; |
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
584 | 626 | ||
585 | desc->txd.cookie = 0; | 627 | desc->txd.cookie = 0; |
586 | 628 | ||
587 | if (!first) { | 629 | atc_desc_chain(&first, &prev, desc); |
588 | first = desc; | ||
589 | } else { | ||
590 | /* inform the HW lli about chaining */ | ||
591 | prev->lli.dscr = desc->txd.phys; | ||
592 | /* insert the link descriptor to the LD ring */ | ||
593 | list_add_tail(&desc->desc_node, | ||
594 | &first->tx_list); | ||
595 | } | ||
596 | prev = desc; | ||
597 | } | 630 | } |
598 | 631 | ||
599 | /* First descriptor of the chain embedds additional information */ | 632 | /* First descriptor of the chain embedds additional information */ |
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
639 | struct scatterlist *sg; | 672 | struct scatterlist *sg; |
640 | size_t total_len = 0; | 673 | size_t total_len = 0; |
641 | 674 | ||
642 | dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", | 675 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
676 | sg_len, | ||
643 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 677 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", |
644 | flags); | 678 | flags); |
645 | 679 | ||
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
651 | reg_width = atslave->reg_width; | 685 | reg_width = atslave->reg_width; |
652 | 686 | ||
653 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | 687 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; |
654 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; | 688 | ctrlb = ATC_IEN; |
655 | 689 | ||
656 | switch (direction) { | 690 | switch (direction) { |
657 | case DMA_TO_DEVICE: | 691 | case DMA_TO_DEVICE: |
658 | ctrla |= ATC_DST_WIDTH(reg_width); | 692 | ctrla |= ATC_DST_WIDTH(reg_width); |
659 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 693 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
660 | | ATC_SRC_ADDR_MODE_INCR | 694 | | ATC_SRC_ADDR_MODE_INCR |
661 | | ATC_FC_MEM2PER; | 695 | | ATC_FC_MEM2PER |
696 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); | ||
662 | reg = atslave->tx_reg; | 697 | reg = atslave->tx_reg; |
663 | for_each_sg(sgl, sg, sg_len, i) { | 698 | for_each_sg(sgl, sg, sg_len, i) { |
664 | struct at_desc *desc; | 699 | struct at_desc *desc; |
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
682 | | len >> mem_width; | 717 | | len >> mem_width; |
683 | desc->lli.ctrlb = ctrlb; | 718 | desc->lli.ctrlb = ctrlb; |
684 | 719 | ||
685 | if (!first) { | 720 | atc_desc_chain(&first, &prev, desc); |
686 | first = desc; | ||
687 | } else { | ||
688 | /* inform the HW lli about chaining */ | ||
689 | prev->lli.dscr = desc->txd.phys; | ||
690 | /* insert the link descriptor to the LD ring */ | ||
691 | list_add_tail(&desc->desc_node, | ||
692 | &first->tx_list); | ||
693 | } | ||
694 | prev = desc; | ||
695 | total_len += len; | 721 | total_len += len; |
696 | } | 722 | } |
697 | break; | 723 | break; |
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
699 | ctrla |= ATC_SRC_WIDTH(reg_width); | 725 | ctrla |= ATC_SRC_WIDTH(reg_width); |
700 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 726 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
701 | | ATC_SRC_ADDR_MODE_FIXED | 727 | | ATC_SRC_ADDR_MODE_FIXED |
702 | | ATC_FC_PER2MEM; | 728 | | ATC_FC_PER2MEM |
729 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); | ||
703 | 730 | ||
704 | reg = atslave->rx_reg; | 731 | reg = atslave->rx_reg; |
705 | for_each_sg(sgl, sg, sg_len, i) { | 732 | for_each_sg(sgl, sg, sg_len, i) { |
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
724 | | len >> reg_width; | 751 | | len >> reg_width; |
725 | desc->lli.ctrlb = ctrlb; | 752 | desc->lli.ctrlb = ctrlb; |
726 | 753 | ||
727 | if (!first) { | 754 | atc_desc_chain(&first, &prev, desc); |
728 | first = desc; | ||
729 | } else { | ||
730 | /* inform the HW lli about chaining */ | ||
731 | prev->lli.dscr = desc->txd.phys; | ||
732 | /* insert the link descriptor to the LD ring */ | ||
733 | list_add_tail(&desc->desc_node, | ||
734 | &first->tx_list); | ||
735 | } | ||
736 | prev = desc; | ||
737 | total_len += len; | 755 | total_len += len; |
738 | } | 756 | } |
739 | break; | 757 | break; |
@@ -759,41 +777,211 @@ err_desc_get: | |||
759 | return NULL; | 777 | return NULL; |
760 | } | 778 | } |
761 | 779 | ||
780 | /** | ||
781 | * atc_dma_cyclic_check_values | ||
782 | * Check for too big/unaligned periods and unaligned DMA buffer | ||
783 | */ | ||
784 | static int | ||
785 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | ||
786 | size_t period_len, enum dma_data_direction direction) | ||
787 | { | ||
788 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | ||
789 | goto err_out; | ||
790 | if (unlikely(period_len & ((1 << reg_width) - 1))) | ||
791 | goto err_out; | ||
792 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | ||
793 | goto err_out; | ||
794 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | ||
795 | goto err_out; | ||
796 | |||
797 | return 0; | ||
798 | |||
799 | err_out: | ||
800 | return -EINVAL; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * atc_dma_cyclic_fill_desc - Fill one period decriptor | ||
805 | */ | ||
806 | static int | ||
807 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | ||
808 | unsigned int period_index, dma_addr_t buf_addr, | ||
809 | size_t period_len, enum dma_data_direction direction) | ||
810 | { | ||
811 | u32 ctrla; | ||
812 | unsigned int reg_width = atslave->reg_width; | ||
813 | |||
814 | /* prepare common CRTLA value */ | ||
815 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla | ||
816 | | ATC_DST_WIDTH(reg_width) | ||
817 | | ATC_SRC_WIDTH(reg_width) | ||
818 | | period_len >> reg_width; | ||
819 | |||
820 | switch (direction) { | ||
821 | case DMA_TO_DEVICE: | ||
822 | desc->lli.saddr = buf_addr + (period_len * period_index); | ||
823 | desc->lli.daddr = atslave->tx_reg; | ||
824 | desc->lli.ctrla = ctrla; | ||
825 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | ||
826 | | ATC_SRC_ADDR_MODE_INCR | ||
827 | | ATC_FC_MEM2PER | ||
828 | | ATC_SIF(AT_DMA_MEM_IF) | ||
829 | | ATC_DIF(AT_DMA_PER_IF); | ||
830 | break; | ||
831 | |||
832 | case DMA_FROM_DEVICE: | ||
833 | desc->lli.saddr = atslave->rx_reg; | ||
834 | desc->lli.daddr = buf_addr + (period_len * period_index); | ||
835 | desc->lli.ctrla = ctrla; | ||
836 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | ||
837 | | ATC_SRC_ADDR_MODE_FIXED | ||
838 | | ATC_FC_PER2MEM | ||
839 | | ATC_SIF(AT_DMA_PER_IF) | ||
840 | | ATC_DIF(AT_DMA_MEM_IF); | ||
841 | break; | ||
842 | |||
843 | default: | ||
844 | return -EINVAL; | ||
845 | } | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | /** | ||
851 | * atc_prep_dma_cyclic - prepare the cyclic DMA transfer | ||
852 | * @chan: the DMA channel to prepare | ||
853 | * @buf_addr: physical DMA address where the buffer starts | ||
854 | * @buf_len: total number of bytes for the entire buffer | ||
855 | * @period_len: number of bytes for each period | ||
856 | * @direction: transfer direction, to or from device | ||
857 | */ | ||
858 | static struct dma_async_tx_descriptor * | ||
859 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
860 | size_t period_len, enum dma_data_direction direction) | ||
861 | { | ||
862 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
863 | struct at_dma_slave *atslave = chan->private; | ||
864 | struct at_desc *first = NULL; | ||
865 | struct at_desc *prev = NULL; | ||
866 | unsigned long was_cyclic; | ||
867 | unsigned int periods = buf_len / period_len; | ||
868 | unsigned int i; | ||
869 | |||
870 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | ||
871 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | ||
872 | buf_addr, | ||
873 | periods, buf_len, period_len); | ||
874 | |||
875 | if (unlikely(!atslave || !buf_len || !period_len)) { | ||
876 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); | ||
877 | return NULL; | ||
878 | } | ||
879 | |||
880 | was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); | ||
881 | if (was_cyclic) { | ||
882 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); | ||
883 | return NULL; | ||
884 | } | ||
885 | |||
886 | /* Check for too big/unaligned periods and unaligned DMA buffer */ | ||
887 | if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, | ||
888 | period_len, direction)) | ||
889 | goto err_out; | ||
890 | |||
891 | /* build cyclic linked list */ | ||
892 | for (i = 0; i < periods; i++) { | ||
893 | struct at_desc *desc; | ||
894 | |||
895 | desc = atc_desc_get(atchan); | ||
896 | if (!desc) | ||
897 | goto err_desc_get; | ||
898 | |||
899 | if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, | ||
900 | period_len, direction)) | ||
901 | goto err_desc_get; | ||
902 | |||
903 | atc_desc_chain(&first, &prev, desc); | ||
904 | } | ||
905 | |||
906 | /* lets make a cyclic list */ | ||
907 | prev->lli.dscr = first->txd.phys; | ||
908 | |||
909 | /* First descriptor of the chain embedds additional information */ | ||
910 | first->txd.cookie = -EBUSY; | ||
911 | first->len = buf_len; | ||
912 | |||
913 | return &first->txd; | ||
914 | |||
915 | err_desc_get: | ||
916 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | ||
917 | atc_desc_put(atchan, first); | ||
918 | err_out: | ||
919 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | ||
920 | return NULL; | ||
921 | } | ||
922 | |||
923 | |||
762 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 924 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
763 | unsigned long arg) | 925 | unsigned long arg) |
764 | { | 926 | { |
765 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 927 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
766 | struct at_dma *atdma = to_at_dma(chan->device); | 928 | struct at_dma *atdma = to_at_dma(chan->device); |
767 | struct at_desc *desc, *_desc; | 929 | int chan_id = atchan->chan_common.chan_id; |
930 | |||
768 | LIST_HEAD(list); | 931 | LIST_HEAD(list); |
769 | 932 | ||
770 | /* Only supports DMA_TERMINATE_ALL */ | 933 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); |
771 | if (cmd != DMA_TERMINATE_ALL) | ||
772 | return -ENXIO; | ||
773 | 934 | ||
774 | /* | 935 | if (cmd == DMA_PAUSE) { |
775 | * This is only called when something went wrong elsewhere, so | 936 | spin_lock_bh(&atchan->lock); |
776 | * we don't really care about the data. Just disable the | ||
777 | * channel. We still have to poll the channel enable bit due | ||
778 | * to AHB/HSB limitations. | ||
779 | */ | ||
780 | spin_lock_bh(&atchan->lock); | ||
781 | 937 | ||
782 | dma_writel(atdma, CHDR, atchan->mask); | 938 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); |
939 | set_bit(ATC_IS_PAUSED, &atchan->status); | ||
783 | 940 | ||
784 | /* confirm that this channel is disabled */ | 941 | spin_unlock_bh(&atchan->lock); |
785 | while (dma_readl(atdma, CHSR) & atchan->mask) | 942 | } else if (cmd == DMA_RESUME) { |
786 | cpu_relax(); | 943 | if (!test_bit(ATC_IS_PAUSED, &atchan->status)) |
944 | return 0; | ||
787 | 945 | ||
788 | /* active_list entries will end up before queued entries */ | 946 | spin_lock_bh(&atchan->lock); |
789 | list_splice_init(&atchan->queue, &list); | ||
790 | list_splice_init(&atchan->active_list, &list); | ||
791 | 947 | ||
792 | /* Flush all pending and queued descriptors */ | 948 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
793 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 949 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
794 | atc_chain_complete(atchan, desc); | ||
795 | 950 | ||
796 | spin_unlock_bh(&atchan->lock); | 951 | spin_unlock_bh(&atchan->lock); |
952 | } else if (cmd == DMA_TERMINATE_ALL) { | ||
953 | struct at_desc *desc, *_desc; | ||
954 | /* | ||
955 | * This is only called when something went wrong elsewhere, so | ||
956 | * we don't really care about the data. Just disable the | ||
957 | * channel. We still have to poll the channel enable bit due | ||
958 | * to AHB/HSB limitations. | ||
959 | */ | ||
960 | spin_lock_bh(&atchan->lock); | ||
961 | |||
962 | /* disabling channel: must also remove suspend state */ | ||
963 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); | ||
964 | |||
965 | /* confirm that this channel is disabled */ | ||
966 | while (dma_readl(atdma, CHSR) & atchan->mask) | ||
967 | cpu_relax(); | ||
968 | |||
969 | /* active_list entries will end up before queued entries */ | ||
970 | list_splice_init(&atchan->queue, &list); | ||
971 | list_splice_init(&atchan->active_list, &list); | ||
972 | |||
973 | /* Flush all pending and queued descriptors */ | ||
974 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
975 | atc_chain_complete(atchan, desc); | ||
976 | |||
977 | clear_bit(ATC_IS_PAUSED, &atchan->status); | ||
978 | /* if channel dedicated to cyclic operations, free it */ | ||
979 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | ||
980 | |||
981 | spin_unlock_bh(&atchan->lock); | ||
982 | } else { | ||
983 | return -ENXIO; | ||
984 | } | ||
797 | 985 | ||
798 | return 0; | 986 | return 0; |
799 | } | 987 | } |
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan, | |||
835 | 1023 | ||
836 | spin_unlock_bh(&atchan->lock); | 1024 | spin_unlock_bh(&atchan->lock); |
837 | 1025 | ||
838 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 1026 | if (ret != DMA_SUCCESS) |
839 | dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", | 1027 | dma_set_tx_state(txstate, last_complete, last_used, |
840 | cookie, last_complete ? last_complete : 0, | 1028 | atc_first_active(atchan)->len); |
1029 | else | ||
1030 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
1031 | |||
1032 | if (test_bit(ATC_IS_PAUSED, &atchan->status)) | ||
1033 | ret = DMA_PAUSED; | ||
1034 | |||
1035 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", | ||
1036 | ret, cookie, last_complete ? last_complete : 0, | ||
841 | last_used ? last_used : 0); | 1037 | last_used ? last_used : 0); |
842 | 1038 | ||
843 | return ret; | 1039 | return ret; |
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
853 | 1049 | ||
854 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 1050 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
855 | 1051 | ||
1052 | /* Not needed for cyclic transfers */ | ||
1053 | if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | ||
1054 | return; | ||
1055 | |||
856 | spin_lock_bh(&atchan->lock); | 1056 | spin_lock_bh(&atchan->lock); |
857 | if (!atc_chan_is_enabled(atchan)) { | 1057 | if (!atc_chan_is_enabled(atchan)) { |
858 | atc_advance_work(atchan); | 1058 | atc_advance_work(atchan); |
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
959 | } | 1159 | } |
960 | list_splice_init(&atchan->free_list, &list); | 1160 | list_splice_init(&atchan->free_list, &list); |
961 | atchan->descs_allocated = 0; | 1161 | atchan->descs_allocated = 0; |
1162 | atchan->status = 0; | ||
962 | 1163 | ||
963 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1164 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
964 | } | 1165 | } |
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1092 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) | 1293 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) |
1093 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; | 1294 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
1094 | 1295 | ||
1095 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { | 1296 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) |
1096 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; | 1297 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; |
1298 | |||
1299 | if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) | ||
1300 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; | ||
1301 | |||
1302 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || | ||
1303 | dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) | ||
1097 | atdma->dma_common.device_control = atc_control; | 1304 | atdma->dma_common.device_control = atc_control; |
1098 | } | ||
1099 | 1305 | ||
1100 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1306 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1101 | 1307 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 495457e3dc4b..087dbf1dd39c 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -103,6 +103,10 @@ | |||
103 | /* Bitfields in CTRLB */ | 103 | /* Bitfields in CTRLB */ |
104 | #define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ | 104 | #define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ |
105 | #define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ | 105 | #define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ |
106 | /* Specify AHB interfaces */ | ||
107 | #define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */ | ||
108 | #define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */ | ||
109 | |||
106 | #define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ | 110 | #define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ |
107 | #define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ | 111 | #define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ |
108 | #define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ | 112 | #define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ |
@@ -181,12 +185,23 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) | |||
181 | /*-- Channels --------------------------------------------------------*/ | 185 | /*-- Channels --------------------------------------------------------*/ |
182 | 186 | ||
183 | /** | 187 | /** |
188 | * atc_status - information bits stored in channel status flag | ||
189 | * | ||
190 | * Manipulated with atomic operations. | ||
191 | */ | ||
192 | enum atc_status { | ||
193 | ATC_IS_ERROR = 0, | ||
194 | ATC_IS_PAUSED = 1, | ||
195 | ATC_IS_CYCLIC = 24, | ||
196 | }; | ||
197 | |||
198 | /** | ||
184 | * struct at_dma_chan - internal representation of an Atmel HDMAC channel | 199 | * struct at_dma_chan - internal representation of an Atmel HDMAC channel |
185 | * @chan_common: common dmaengine channel object members | 200 | * @chan_common: common dmaengine channel object members |
186 | * @device: parent device | 201 | * @device: parent device |
187 | * @ch_regs: memory mapped register base | 202 | * @ch_regs: memory mapped register base |
188 | * @mask: channel index in a mask | 203 | * @mask: channel index in a mask |
189 | * @error_status: transmit error status information from irq handler | 204 | * @status: transmit status information from irq/prep* functions |
190 | * to tasklet (use atomic operations) | 205 | * to tasklet (use atomic operations) |
191 | * @tasklet: bottom half to finish transaction work | 206 | * @tasklet: bottom half to finish transaction work |
192 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 207 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
@@ -201,7 +216,7 @@ struct at_dma_chan { | |||
201 | struct at_dma *device; | 216 | struct at_dma *device; |
202 | void __iomem *ch_regs; | 217 | void __iomem *ch_regs; |
203 | u8 mask; | 218 | u8 mask; |
204 | unsigned long error_status; | 219 | unsigned long status; |
205 | struct tasklet_struct tasklet; | 220 | struct tasklet_struct tasklet; |
206 | 221 | ||
207 | spinlock_t lock; | 222 | spinlock_t lock; |
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on) | |||
309 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); | 324 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); |
310 | u32 ebci; | 325 | u32 ebci; |
311 | 326 | ||
312 | /* enable interrupts on buffer chain completion & error */ | 327 | /* enable interrupts on buffer transfer completion & error */ |
313 | ebci = AT_DMA_CBTC(atchan->chan_common.chan_id) | 328 | ebci = AT_DMA_BTC(atchan->chan_common.chan_id) |
314 | | AT_DMA_ERR(atchan->chan_common.chan_id); | 329 | | AT_DMA_ERR(atchan->chan_common.chan_id); |
315 | if (on) | 330 | if (on) |
316 | dma_writel(atdma, EBCIER, ebci); | 331 | dma_writel(atdma, EBCIER, ebci); |
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) | |||
347 | */ | 362 | */ |
348 | static void set_desc_eol(struct at_desc *desc) | 363 | static void set_desc_eol(struct at_desc *desc) |
349 | { | 364 | { |
350 | desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; | 365 | u32 ctrlb = desc->lli.ctrlb; |
366 | |||
367 | ctrlb &= ~ATC_IEN; | ||
368 | ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; | ||
369 | |||
370 | desc->lli.ctrlb = ctrlb; | ||
351 | desc->lli.dscr = 0; | 371 | desc->lli.dscr = 0; |
352 | } | 372 | } |
353 | 373 | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index f48e54006518..af8c0b5ed70f 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void) | |||
1610 | { | 1610 | { |
1611 | return platform_driver_probe(&coh901318_driver, coh901318_probe); | 1611 | return platform_driver_probe(&coh901318_driver, coh901318_probe); |
1612 | } | 1612 | } |
1613 | arch_initcall(coh901318_init); | 1613 | subsys_initcall(coh901318_init); |
1614 | 1614 | ||
1615 | void __exit coh901318_exit(void) | 1615 | void __exit coh901318_exit(void) |
1616 | { | 1616 | { |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 2a2e2fa00e91..4d180ca9a1d8 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * AVR32 systems.) | 3 | * AVR32 systems.) |
4 | * | 4 | * |
5 | * Copyright (C) 2007-2008 Atmel Corporation | 5 | * Copyright (C) 2007-2008 Atmel Corporation |
6 | * Copyright (C) 2010-2011 ST Microelectronics | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
93 | struct dw_desc *desc, *_desc; | 94 | struct dw_desc *desc, *_desc; |
94 | struct dw_desc *ret = NULL; | 95 | struct dw_desc *ret = NULL; |
95 | unsigned int i = 0; | 96 | unsigned int i = 0; |
97 | unsigned long flags; | ||
96 | 98 | ||
97 | spin_lock_bh(&dwc->lock); | 99 | spin_lock_irqsave(&dwc->lock, flags); |
98 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 100 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
99 | if (async_tx_test_ack(&desc->txd)) { | 101 | if (async_tx_test_ack(&desc->txd)) { |
100 | list_del(&desc->desc_node); | 102 | list_del(&desc->desc_node); |
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
104 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 106 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
105 | i++; | 107 | i++; |
106 | } | 108 | } |
107 | spin_unlock_bh(&dwc->lock); | 109 | spin_unlock_irqrestore(&dwc->lock, flags); |
108 | 110 | ||
109 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); | 111 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
110 | 112 | ||
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
130 | */ | 132 | */ |
131 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | 133 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
132 | { | 134 | { |
135 | unsigned long flags; | ||
136 | |||
133 | if (desc) { | 137 | if (desc) { |
134 | struct dw_desc *child; | 138 | struct dw_desc *child; |
135 | 139 | ||
136 | dwc_sync_desc_for_cpu(dwc, desc); | 140 | dwc_sync_desc_for_cpu(dwc, desc); |
137 | 141 | ||
138 | spin_lock_bh(&dwc->lock); | 142 | spin_lock_irqsave(&dwc->lock, flags); |
139 | list_for_each_entry(child, &desc->tx_list, desc_node) | 143 | list_for_each_entry(child, &desc->tx_list, desc_node) |
140 | dev_vdbg(chan2dev(&dwc->chan), | 144 | dev_vdbg(chan2dev(&dwc->chan), |
141 | "moving child desc %p to freelist\n", | 145 | "moving child desc %p to freelist\n", |
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
143 | list_splice_init(&desc->tx_list, &dwc->free_list); | 147 | list_splice_init(&desc->tx_list, &dwc->free_list); |
144 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); | 148 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
145 | list_add(&desc->desc_node, &dwc->free_list); | 149 | list_add(&desc->desc_node, &dwc->free_list); |
146 | spin_unlock_bh(&dwc->lock); | 150 | spin_unlock_irqrestore(&dwc->lock, flags); |
147 | } | 151 | } |
148 | } | 152 | } |
149 | 153 | ||
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
195 | /*----------------------------------------------------------------------*/ | 199 | /*----------------------------------------------------------------------*/ |
196 | 200 | ||
197 | static void | 201 | static void |
198 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | 202 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
203 | bool callback_required) | ||
199 | { | 204 | { |
200 | dma_async_tx_callback callback; | 205 | dma_async_tx_callback callback = NULL; |
201 | void *param; | 206 | void *param = NULL; |
202 | struct dma_async_tx_descriptor *txd = &desc->txd; | 207 | struct dma_async_tx_descriptor *txd = &desc->txd; |
203 | struct dw_desc *child; | 208 | struct dw_desc *child; |
209 | unsigned long flags; | ||
204 | 210 | ||
205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 211 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
206 | 212 | ||
213 | spin_lock_irqsave(&dwc->lock, flags); | ||
207 | dwc->completed = txd->cookie; | 214 | dwc->completed = txd->cookie; |
208 | callback = txd->callback; | 215 | if (callback_required) { |
209 | param = txd->callback_param; | 216 | callback = txd->callback; |
217 | param = txd->callback_param; | ||
218 | } | ||
210 | 219 | ||
211 | dwc_sync_desc_for_cpu(dwc, desc); | 220 | dwc_sync_desc_for_cpu(dwc, desc); |
212 | 221 | ||
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
238 | } | 247 | } |
239 | } | 248 | } |
240 | 249 | ||
241 | /* | 250 | spin_unlock_irqrestore(&dwc->lock, flags); |
242 | * The API requires that no submissions are done from a | 251 | |
243 | * callback, so we don't need to drop the lock here | 252 | if (callback_required && callback) |
244 | */ | ||
245 | if (callback) | ||
246 | callback(param); | 253 | callback(param); |
247 | } | 254 | } |
248 | 255 | ||
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
250 | { | 257 | { |
251 | struct dw_desc *desc, *_desc; | 258 | struct dw_desc *desc, *_desc; |
252 | LIST_HEAD(list); | 259 | LIST_HEAD(list); |
260 | unsigned long flags; | ||
253 | 261 | ||
262 | spin_lock_irqsave(&dwc->lock, flags); | ||
254 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 263 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
255 | dev_err(chan2dev(&dwc->chan), | 264 | dev_err(chan2dev(&dwc->chan), |
256 | "BUG: XFER bit set, but channel not idle!\n"); | 265 | "BUG: XFER bit set, but channel not idle!\n"); |
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
271 | dwc_dostart(dwc, dwc_first_active(dwc)); | 280 | dwc_dostart(dwc, dwc_first_active(dwc)); |
272 | } | 281 | } |
273 | 282 | ||
283 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
284 | |||
274 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 285 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
275 | dwc_descriptor_complete(dwc, desc); | 286 | dwc_descriptor_complete(dwc, desc, true); |
276 | } | 287 | } |
277 | 288 | ||
278 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 289 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
281 | struct dw_desc *desc, *_desc; | 292 | struct dw_desc *desc, *_desc; |
282 | struct dw_desc *child; | 293 | struct dw_desc *child; |
283 | u32 status_xfer; | 294 | u32 status_xfer; |
295 | unsigned long flags; | ||
284 | 296 | ||
297 | spin_lock_irqsave(&dwc->lock, flags); | ||
285 | /* | 298 | /* |
286 | * Clear block interrupt flag before scanning so that we don't | 299 | * Clear block interrupt flag before scanning so that we don't |
287 | * miss any, and read LLP before RAW_XFER to ensure it is | 300 | * miss any, and read LLP before RAW_XFER to ensure it is |
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
294 | if (status_xfer & dwc->mask) { | 307 | if (status_xfer & dwc->mask) { |
295 | /* Everything we've submitted is done */ | 308 | /* Everything we've submitted is done */ |
296 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 309 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
310 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
311 | |||
297 | dwc_complete_all(dw, dwc); | 312 | dwc_complete_all(dw, dwc); |
298 | return; | 313 | return; |
299 | } | 314 | } |
300 | 315 | ||
301 | if (list_empty(&dwc->active_list)) | 316 | if (list_empty(&dwc->active_list)) { |
317 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
302 | return; | 318 | return; |
319 | } | ||
303 | 320 | ||
304 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); | 321 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
305 | 322 | ||
306 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 323 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
307 | if (desc->lli.llp == llp) | 324 | /* check first descriptors addr */ |
325 | if (desc->txd.phys == llp) { | ||
326 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
327 | return; | ||
328 | } | ||
329 | |||
330 | /* check first descriptors llp */ | ||
331 | if (desc->lli.llp == llp) { | ||
308 | /* This one is currently in progress */ | 332 | /* This one is currently in progress */ |
333 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
309 | return; | 334 | return; |
335 | } | ||
310 | 336 | ||
311 | list_for_each_entry(child, &desc->tx_list, desc_node) | 337 | list_for_each_entry(child, &desc->tx_list, desc_node) |
312 | if (child->lli.llp == llp) | 338 | if (child->lli.llp == llp) { |
313 | /* Currently in progress */ | 339 | /* Currently in progress */ |
340 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
314 | return; | 341 | return; |
342 | } | ||
315 | 343 | ||
316 | /* | 344 | /* |
317 | * No descriptors so far seem to be in progress, i.e. | 345 | * No descriptors so far seem to be in progress, i.e. |
318 | * this one must be done. | 346 | * this one must be done. |
319 | */ | 347 | */ |
320 | dwc_descriptor_complete(dwc, desc); | 348 | spin_unlock_irqrestore(&dwc->lock, flags); |
349 | dwc_descriptor_complete(dwc, desc, true); | ||
350 | spin_lock_irqsave(&dwc->lock, flags); | ||
321 | } | 351 | } |
322 | 352 | ||
323 | dev_err(chan2dev(&dwc->chan), | 353 | dev_err(chan2dev(&dwc->chan), |
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
332 | list_move(dwc->queue.next, &dwc->active_list); | 362 | list_move(dwc->queue.next, &dwc->active_list); |
333 | dwc_dostart(dwc, dwc_first_active(dwc)); | 363 | dwc_dostart(dwc, dwc_first_active(dwc)); |
334 | } | 364 | } |
365 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
335 | } | 366 | } |
336 | 367 | ||
337 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 368 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
346 | { | 377 | { |
347 | struct dw_desc *bad_desc; | 378 | struct dw_desc *bad_desc; |
348 | struct dw_desc *child; | 379 | struct dw_desc *child; |
380 | unsigned long flags; | ||
349 | 381 | ||
350 | dwc_scan_descriptors(dw, dwc); | 382 | dwc_scan_descriptors(dw, dwc); |
351 | 383 | ||
384 | spin_lock_irqsave(&dwc->lock, flags); | ||
385 | |||
352 | /* | 386 | /* |
353 | * The descriptor currently at the head of the active list is | 387 | * The descriptor currently at the head of the active list is |
354 | * borked. Since we don't have any way to report errors, we'll | 388 | * borked. Since we don't have any way to report errors, we'll |
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
378 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 412 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
379 | dwc_dump_lli(dwc, &child->lli); | 413 | dwc_dump_lli(dwc, &child->lli); |
380 | 414 | ||
415 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
416 | |||
381 | /* Pretend the descriptor completed successfully */ | 417 | /* Pretend the descriptor completed successfully */ |
382 | dwc_descriptor_complete(dwc, bad_desc); | 418 | dwc_descriptor_complete(dwc, bad_desc, true); |
383 | } | 419 | } |
384 | 420 | ||
385 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 421 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); | |||
402 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | 438 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
403 | u32 status_block, u32 status_err, u32 status_xfer) | 439 | u32 status_block, u32 status_err, u32 status_xfer) |
404 | { | 440 | { |
441 | unsigned long flags; | ||
442 | |||
405 | if (status_block & dwc->mask) { | 443 | if (status_block & dwc->mask) { |
406 | void (*callback)(void *param); | 444 | void (*callback)(void *param); |
407 | void *callback_param; | 445 | void *callback_param; |
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
412 | 450 | ||
413 | callback = dwc->cdesc->period_callback; | 451 | callback = dwc->cdesc->period_callback; |
414 | callback_param = dwc->cdesc->period_callback_param; | 452 | callback_param = dwc->cdesc->period_callback_param; |
415 | if (callback) { | 453 | |
416 | spin_unlock(&dwc->lock); | 454 | if (callback) |
417 | callback(callback_param); | 455 | callback(callback_param); |
418 | spin_lock(&dwc->lock); | ||
419 | } | ||
420 | } | 456 | } |
421 | 457 | ||
422 | /* | 458 | /* |
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
430 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | 466 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " |
431 | "interrupt, stopping DMA transfer\n", | 467 | "interrupt, stopping DMA transfer\n", |
432 | status_xfer ? "xfer" : "error"); | 468 | status_xfer ? "xfer" : "error"); |
469 | |||
470 | spin_lock_irqsave(&dwc->lock, flags); | ||
471 | |||
433 | dev_err(chan2dev(&dwc->chan), | 472 | dev_err(chan2dev(&dwc->chan), |
434 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 473 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
435 | channel_readl(dwc, SAR), | 474 | channel_readl(dwc, SAR), |
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
453 | 492 | ||
454 | for (i = 0; i < dwc->cdesc->periods; i++) | 493 | for (i = 0; i < dwc->cdesc->periods; i++) |
455 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | 494 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); |
495 | |||
496 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
456 | } | 497 | } |
457 | } | 498 | } |
458 | 499 | ||
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data) | |||
476 | 517 | ||
477 | for (i = 0; i < dw->dma.chancnt; i++) { | 518 | for (i = 0; i < dw->dma.chancnt; i++) { |
478 | dwc = &dw->chan[i]; | 519 | dwc = &dw->chan[i]; |
479 | spin_lock(&dwc->lock); | ||
480 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) | 520 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
481 | dwc_handle_cyclic(dw, dwc, status_block, status_err, | 521 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
482 | status_xfer); | 522 | status_xfer); |
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data) | |||
484 | dwc_handle_error(dw, dwc); | 524 | dwc_handle_error(dw, dwc); |
485 | else if ((status_block | status_xfer) & (1 << i)) | 525 | else if ((status_block | status_xfer) & (1 << i)) |
486 | dwc_scan_descriptors(dw, dwc); | 526 | dwc_scan_descriptors(dw, dwc); |
487 | spin_unlock(&dwc->lock); | ||
488 | } | 527 | } |
489 | 528 | ||
490 | /* | 529 | /* |
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
539 | struct dw_desc *desc = txd_to_dw_desc(tx); | 578 | struct dw_desc *desc = txd_to_dw_desc(tx); |
540 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | 579 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); |
541 | dma_cookie_t cookie; | 580 | dma_cookie_t cookie; |
581 | unsigned long flags; | ||
542 | 582 | ||
543 | spin_lock_bh(&dwc->lock); | 583 | spin_lock_irqsave(&dwc->lock, flags); |
544 | cookie = dwc_assign_cookie(dwc, desc); | 584 | cookie = dwc_assign_cookie(dwc, desc); |
545 | 585 | ||
546 | /* | 586 | /* |
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
560 | list_add_tail(&desc->desc_node, &dwc->queue); | 600 | list_add_tail(&desc->desc_node, &dwc->queue); |
561 | } | 601 | } |
562 | 602 | ||
563 | spin_unlock_bh(&dwc->lock); | 603 | spin_unlock_irqrestore(&dwc->lock, flags); |
564 | 604 | ||
565 | return cookie; | 605 | return cookie; |
566 | } | 606 | } |
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
689 | reg = dws->tx_reg; | 729 | reg = dws->tx_reg; |
690 | for_each_sg(sgl, sg, sg_len, i) { | 730 | for_each_sg(sgl, sg, sg_len, i) { |
691 | struct dw_desc *desc; | 731 | struct dw_desc *desc; |
692 | u32 len; | 732 | u32 len, dlen, mem; |
693 | u32 mem; | 733 | |
734 | mem = sg_phys(sg); | ||
735 | len = sg_dma_len(sg); | ||
736 | mem_width = 2; | ||
737 | if (unlikely(mem & 3 || len & 3)) | ||
738 | mem_width = 0; | ||
694 | 739 | ||
740 | slave_sg_todev_fill_desc: | ||
695 | desc = dwc_desc_get(dwc); | 741 | desc = dwc_desc_get(dwc); |
696 | if (!desc) { | 742 | if (!desc) { |
697 | dev_err(chan2dev(chan), | 743 | dev_err(chan2dev(chan), |
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
699 | goto err_desc_get; | 745 | goto err_desc_get; |
700 | } | 746 | } |
701 | 747 | ||
702 | mem = sg_phys(sg); | ||
703 | len = sg_dma_len(sg); | ||
704 | mem_width = 2; | ||
705 | if (unlikely(mem & 3 || len & 3)) | ||
706 | mem_width = 0; | ||
707 | |||
708 | desc->lli.sar = mem; | 748 | desc->lli.sar = mem; |
709 | desc->lli.dar = reg; | 749 | desc->lli.dar = reg; |
710 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 750 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); |
711 | desc->lli.ctlhi = len >> mem_width; | 751 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
752 | dlen = DWC_MAX_COUNT << mem_width; | ||
753 | mem += dlen; | ||
754 | len -= dlen; | ||
755 | } else { | ||
756 | dlen = len; | ||
757 | len = 0; | ||
758 | } | ||
759 | |||
760 | desc->lli.ctlhi = dlen >> mem_width; | ||
712 | 761 | ||
713 | if (!first) { | 762 | if (!first) { |
714 | first = desc; | 763 | first = desc; |
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
722 | &first->tx_list); | 771 | &first->tx_list); |
723 | } | 772 | } |
724 | prev = desc; | 773 | prev = desc; |
725 | total_len += len; | 774 | total_len += dlen; |
775 | |||
776 | if (len) | ||
777 | goto slave_sg_todev_fill_desc; | ||
726 | } | 778 | } |
727 | break; | 779 | break; |
728 | case DMA_FROM_DEVICE: | 780 | case DMA_FROM_DEVICE: |
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
735 | reg = dws->rx_reg; | 787 | reg = dws->rx_reg; |
736 | for_each_sg(sgl, sg, sg_len, i) { | 788 | for_each_sg(sgl, sg, sg_len, i) { |
737 | struct dw_desc *desc; | 789 | struct dw_desc *desc; |
738 | u32 len; | 790 | u32 len, dlen, mem; |
739 | u32 mem; | ||
740 | |||
741 | desc = dwc_desc_get(dwc); | ||
742 | if (!desc) { | ||
743 | dev_err(chan2dev(chan), | ||
744 | "not enough descriptors available\n"); | ||
745 | goto err_desc_get; | ||
746 | } | ||
747 | 791 | ||
748 | mem = sg_phys(sg); | 792 | mem = sg_phys(sg); |
749 | len = sg_dma_len(sg); | 793 | len = sg_dma_len(sg); |
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
751 | if (unlikely(mem & 3 || len & 3)) | 795 | if (unlikely(mem & 3 || len & 3)) |
752 | mem_width = 0; | 796 | mem_width = 0; |
753 | 797 | ||
798 | slave_sg_fromdev_fill_desc: | ||
799 | desc = dwc_desc_get(dwc); | ||
800 | if (!desc) { | ||
801 | dev_err(chan2dev(chan), | ||
802 | "not enough descriptors available\n"); | ||
803 | goto err_desc_get; | ||
804 | } | ||
805 | |||
754 | desc->lli.sar = reg; | 806 | desc->lli.sar = reg; |
755 | desc->lli.dar = mem; | 807 | desc->lli.dar = mem; |
756 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 808 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); |
757 | desc->lli.ctlhi = len >> reg_width; | 809 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
810 | dlen = DWC_MAX_COUNT << reg_width; | ||
811 | mem += dlen; | ||
812 | len -= dlen; | ||
813 | } else { | ||
814 | dlen = len; | ||
815 | len = 0; | ||
816 | } | ||
817 | desc->lli.ctlhi = dlen >> reg_width; | ||
758 | 818 | ||
759 | if (!first) { | 819 | if (!first) { |
760 | first = desc; | 820 | first = desc; |
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
768 | &first->tx_list); | 828 | &first->tx_list); |
769 | } | 829 | } |
770 | prev = desc; | 830 | prev = desc; |
771 | total_len += len; | 831 | total_len += dlen; |
832 | |||
833 | if (len) | ||
834 | goto slave_sg_fromdev_fill_desc; | ||
772 | } | 835 | } |
773 | break; | 836 | break; |
774 | default: | 837 | default: |
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
799 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 862 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
800 | struct dw_dma *dw = to_dw_dma(chan->device); | 863 | struct dw_dma *dw = to_dw_dma(chan->device); |
801 | struct dw_desc *desc, *_desc; | 864 | struct dw_desc *desc, *_desc; |
865 | unsigned long flags; | ||
866 | u32 cfglo; | ||
802 | LIST_HEAD(list); | 867 | LIST_HEAD(list); |
803 | 868 | ||
804 | /* Only supports DMA_TERMINATE_ALL */ | 869 | if (cmd == DMA_PAUSE) { |
805 | if (cmd != DMA_TERMINATE_ALL) | 870 | spin_lock_irqsave(&dwc->lock, flags); |
806 | return -ENXIO; | ||
807 | 871 | ||
808 | /* | 872 | cfglo = channel_readl(dwc, CFG_LO); |
809 | * This is only called when something went wrong elsewhere, so | 873 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
810 | * we don't really care about the data. Just disable the | 874 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) |
811 | * channel. We still have to poll the channel enable bit due | 875 | cpu_relax(); |
812 | * to AHB/HSB limitations. | ||
813 | */ | ||
814 | spin_lock_bh(&dwc->lock); | ||
815 | 876 | ||
816 | channel_clear_bit(dw, CH_EN, dwc->mask); | 877 | dwc->paused = true; |
878 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
879 | } else if (cmd == DMA_RESUME) { | ||
880 | if (!dwc->paused) | ||
881 | return 0; | ||
817 | 882 | ||
818 | while (dma_readl(dw, CH_EN) & dwc->mask) | 883 | spin_lock_irqsave(&dwc->lock, flags); |
819 | cpu_relax(); | ||
820 | 884 | ||
821 | /* active_list entries will end up before queued entries */ | 885 | cfglo = channel_readl(dwc, CFG_LO); |
822 | list_splice_init(&dwc->queue, &list); | 886 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); |
823 | list_splice_init(&dwc->active_list, &list); | 887 | dwc->paused = false; |
824 | 888 | ||
825 | spin_unlock_bh(&dwc->lock); | 889 | spin_unlock_irqrestore(&dwc->lock, flags); |
890 | } else if (cmd == DMA_TERMINATE_ALL) { | ||
891 | spin_lock_irqsave(&dwc->lock, flags); | ||
826 | 892 | ||
827 | /* Flush all pending and queued descriptors */ | 893 | channel_clear_bit(dw, CH_EN, dwc->mask); |
828 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 894 | while (dma_readl(dw, CH_EN) & dwc->mask) |
829 | dwc_descriptor_complete(dwc, desc); | 895 | cpu_relax(); |
896 | |||
897 | dwc->paused = false; | ||
898 | |||
899 | /* active_list entries will end up before queued entries */ | ||
900 | list_splice_init(&dwc->queue, &list); | ||
901 | list_splice_init(&dwc->active_list, &list); | ||
902 | |||
903 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
904 | |||
905 | /* Flush all pending and queued descriptors */ | ||
906 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
907 | dwc_descriptor_complete(dwc, desc, false); | ||
908 | } else | ||
909 | return -ENXIO; | ||
830 | 910 | ||
831 | return 0; | 911 | return 0; |
832 | } | 912 | } |
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan, | |||
846 | 926 | ||
847 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 927 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
848 | if (ret != DMA_SUCCESS) { | 928 | if (ret != DMA_SUCCESS) { |
849 | spin_lock_bh(&dwc->lock); | ||
850 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 929 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
851 | spin_unlock_bh(&dwc->lock); | ||
852 | 930 | ||
853 | last_complete = dwc->completed; | 931 | last_complete = dwc->completed; |
854 | last_used = chan->cookie; | 932 | last_used = chan->cookie; |
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan, | |||
856 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 934 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
857 | } | 935 | } |
858 | 936 | ||
859 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 937 | if (ret != DMA_SUCCESS) |
938 | dma_set_tx_state(txstate, last_complete, last_used, | ||
939 | dwc_first_active(dwc)->len); | ||
940 | else | ||
941 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
942 | |||
943 | if (dwc->paused) | ||
944 | return DMA_PAUSED; | ||
860 | 945 | ||
861 | return ret; | 946 | return ret; |
862 | } | 947 | } |
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
865 | { | 950 | { |
866 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 951 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
867 | 952 | ||
868 | spin_lock_bh(&dwc->lock); | ||
869 | if (!list_empty(&dwc->queue)) | 953 | if (!list_empty(&dwc->queue)) |
870 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 954 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
871 | spin_unlock_bh(&dwc->lock); | ||
872 | } | 955 | } |
873 | 956 | ||
874 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 957 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
880 | int i; | 963 | int i; |
881 | u32 cfghi; | 964 | u32 cfghi; |
882 | u32 cfglo; | 965 | u32 cfglo; |
966 | unsigned long flags; | ||
883 | 967 | ||
884 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 968 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
885 | 969 | ||
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
917 | * doesn't mean what you think it means), and status writeback. | 1001 | * doesn't mean what you think it means), and status writeback. |
918 | */ | 1002 | */ |
919 | 1003 | ||
920 | spin_lock_bh(&dwc->lock); | 1004 | spin_lock_irqsave(&dwc->lock, flags); |
921 | i = dwc->descs_allocated; | 1005 | i = dwc->descs_allocated; |
922 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | 1006 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { |
923 | spin_unlock_bh(&dwc->lock); | 1007 | spin_unlock_irqrestore(&dwc->lock, flags); |
924 | 1008 | ||
925 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 1009 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); |
926 | if (!desc) { | 1010 | if (!desc) { |
927 | dev_info(chan2dev(chan), | 1011 | dev_info(chan2dev(chan), |
928 | "only allocated %d descriptors\n", i); | 1012 | "only allocated %d descriptors\n", i); |
929 | spin_lock_bh(&dwc->lock); | 1013 | spin_lock_irqsave(&dwc->lock, flags); |
930 | break; | 1014 | break; |
931 | } | 1015 | } |
932 | 1016 | ||
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
938 | sizeof(desc->lli), DMA_TO_DEVICE); | 1022 | sizeof(desc->lli), DMA_TO_DEVICE); |
939 | dwc_desc_put(dwc, desc); | 1023 | dwc_desc_put(dwc, desc); |
940 | 1024 | ||
941 | spin_lock_bh(&dwc->lock); | 1025 | spin_lock_irqsave(&dwc->lock, flags); |
942 | i = ++dwc->descs_allocated; | 1026 | i = ++dwc->descs_allocated; |
943 | } | 1027 | } |
944 | 1028 | ||
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
947 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | 1031 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); |
948 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 1032 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
949 | 1033 | ||
950 | spin_unlock_bh(&dwc->lock); | 1034 | spin_unlock_irqrestore(&dwc->lock, flags); |
951 | 1035 | ||
952 | dev_dbg(chan2dev(chan), | 1036 | dev_dbg(chan2dev(chan), |
953 | "alloc_chan_resources allocated %d descriptors\n", i); | 1037 | "alloc_chan_resources allocated %d descriptors\n", i); |
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
960 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1044 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
961 | struct dw_dma *dw = to_dw_dma(chan->device); | 1045 | struct dw_dma *dw = to_dw_dma(chan->device); |
962 | struct dw_desc *desc, *_desc; | 1046 | struct dw_desc *desc, *_desc; |
1047 | unsigned long flags; | ||
963 | LIST_HEAD(list); | 1048 | LIST_HEAD(list); |
964 | 1049 | ||
965 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | 1050 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
970 | BUG_ON(!list_empty(&dwc->queue)); | 1055 | BUG_ON(!list_empty(&dwc->queue)); |
971 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | 1056 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); |
972 | 1057 | ||
973 | spin_lock_bh(&dwc->lock); | 1058 | spin_lock_irqsave(&dwc->lock, flags); |
974 | list_splice_init(&dwc->free_list, &list); | 1059 | list_splice_init(&dwc->free_list, &list); |
975 | dwc->descs_allocated = 0; | 1060 | dwc->descs_allocated = 0; |
976 | 1061 | ||
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
979 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | 1064 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
980 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | 1065 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
981 | 1066 | ||
982 | spin_unlock_bh(&dwc->lock); | 1067 | spin_unlock_irqrestore(&dwc->lock, flags); |
983 | 1068 | ||
984 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 1069 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
985 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 1070 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1004 | { | 1089 | { |
1005 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1090 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1006 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1091 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1092 | unsigned long flags; | ||
1007 | 1093 | ||
1008 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | 1094 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { |
1009 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | 1095 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); |
1010 | return -ENODEV; | 1096 | return -ENODEV; |
1011 | } | 1097 | } |
1012 | 1098 | ||
1013 | spin_lock(&dwc->lock); | 1099 | spin_lock_irqsave(&dwc->lock, flags); |
1014 | 1100 | ||
1015 | /* assert channel is idle */ | 1101 | /* assert channel is idle */ |
1016 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1102 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1023 | channel_readl(dwc, LLP), | 1109 | channel_readl(dwc, LLP), |
1024 | channel_readl(dwc, CTL_HI), | 1110 | channel_readl(dwc, CTL_HI), |
1025 | channel_readl(dwc, CTL_LO)); | 1111 | channel_readl(dwc, CTL_LO)); |
1026 | spin_unlock(&dwc->lock); | 1112 | spin_unlock_irqrestore(&dwc->lock, flags); |
1027 | return -EBUSY; | 1113 | return -EBUSY; |
1028 | } | 1114 | } |
1029 | 1115 | ||
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1038 | 1124 | ||
1039 | channel_set_bit(dw, CH_EN, dwc->mask); | 1125 | channel_set_bit(dw, CH_EN, dwc->mask); |
1040 | 1126 | ||
1041 | spin_unlock(&dwc->lock); | 1127 | spin_unlock_irqrestore(&dwc->lock, flags); |
1042 | 1128 | ||
1043 | return 0; | 1129 | return 0; |
1044 | } | 1130 | } |
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan) | |||
1054 | { | 1140 | { |
1055 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1141 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1056 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1142 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1143 | unsigned long flags; | ||
1057 | 1144 | ||
1058 | spin_lock(&dwc->lock); | 1145 | spin_lock_irqsave(&dwc->lock, flags); |
1059 | 1146 | ||
1060 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1147 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1061 | while (dma_readl(dw, CH_EN) & dwc->mask) | 1148 | while (dma_readl(dw, CH_EN) & dwc->mask) |
1062 | cpu_relax(); | 1149 | cpu_relax(); |
1063 | 1150 | ||
1064 | spin_unlock(&dwc->lock); | 1151 | spin_unlock_irqrestore(&dwc->lock, flags); |
1065 | } | 1152 | } |
1066 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | 1153 | EXPORT_SYMBOL(dw_dma_cyclic_stop); |
1067 | 1154 | ||
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1090 | unsigned int reg_width; | 1177 | unsigned int reg_width; |
1091 | unsigned int periods; | 1178 | unsigned int periods; |
1092 | unsigned int i; | 1179 | unsigned int i; |
1180 | unsigned long flags; | ||
1093 | 1181 | ||
1094 | spin_lock_bh(&dwc->lock); | 1182 | spin_lock_irqsave(&dwc->lock, flags); |
1095 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 1183 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
1096 | spin_unlock_bh(&dwc->lock); | 1184 | spin_unlock_irqrestore(&dwc->lock, flags); |
1097 | dev_dbg(chan2dev(&dwc->chan), | 1185 | dev_dbg(chan2dev(&dwc->chan), |
1098 | "queue and/or active list are not empty\n"); | 1186 | "queue and/or active list are not empty\n"); |
1099 | return ERR_PTR(-EBUSY); | 1187 | return ERR_PTR(-EBUSY); |
1100 | } | 1188 | } |
1101 | 1189 | ||
1102 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 1190 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1103 | spin_unlock_bh(&dwc->lock); | 1191 | spin_unlock_irqrestore(&dwc->lock, flags); |
1104 | if (was_cyclic) { | 1192 | if (was_cyclic) { |
1105 | dev_dbg(chan2dev(&dwc->chan), | 1193 | dev_dbg(chan2dev(&dwc->chan), |
1106 | "channel already prepared for cyclic DMA\n"); | 1194 | "channel already prepared for cyclic DMA\n"); |
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1214 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1302 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1215 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | 1303 | struct dw_cyclic_desc *cdesc = dwc->cdesc; |
1216 | int i; | 1304 | int i; |
1305 | unsigned long flags; | ||
1217 | 1306 | ||
1218 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | 1307 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); |
1219 | 1308 | ||
1220 | if (!cdesc) | 1309 | if (!cdesc) |
1221 | return; | 1310 | return; |
1222 | 1311 | ||
1223 | spin_lock_bh(&dwc->lock); | 1312 | spin_lock_irqsave(&dwc->lock, flags); |
1224 | 1313 | ||
1225 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1314 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1226 | while (dma_readl(dw, CH_EN) & dwc->mask) | 1315 | while (dma_readl(dw, CH_EN) & dwc->mask) |
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1230 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1319 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1231 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1320 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1232 | 1321 | ||
1233 | spin_unlock_bh(&dwc->lock); | 1322 | spin_unlock_irqrestore(&dwc->lock, flags); |
1234 | 1323 | ||
1235 | for (i = 0; i < cdesc->periods; i++) | 1324 | for (i = 0; i < cdesc->periods; i++) |
1236 | dwc_desc_put(dwc, cdesc->desc[i]); | 1325 | dwc_desc_put(dwc, cdesc->desc[i]); |
@@ -1487,3 +1576,4 @@ module_exit(dw_exit); | |||
1487 | MODULE_LICENSE("GPL v2"); | 1576 | MODULE_LICENSE("GPL v2"); |
1488 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 1577 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); |
1489 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 1578 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1579 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 720f821527f8..c3419518d701 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * Driver for the Synopsys DesignWare AHB DMA Controller | 2 | * Driver for the Synopsys DesignWare AHB DMA Controller |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2007 Atmel Corporation | 4 | * Copyright (C) 2005-2007 Atmel Corporation |
5 | * Copyright (C) 2010-2011 ST Microelectronics | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -138,6 +139,7 @@ struct dw_dma_chan { | |||
138 | void __iomem *ch_regs; | 139 | void __iomem *ch_regs; |
139 | u8 mask; | 140 | u8 mask; |
140 | u8 priority; | 141 | u8 priority; |
142 | bool paused; | ||
141 | 143 | ||
142 | spinlock_t lock; | 144 | spinlock_t lock; |
143 | 145 | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 3d4ec38b9b62..f653517ef744 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, | |||
1292 | if (err) | 1292 | if (err) |
1293 | goto err_dma; | 1293 | goto err_dma; |
1294 | 1294 | ||
1295 | pm_runtime_set_active(&pdev->dev); | 1295 | pm_runtime_put_noidle(&pdev->dev); |
1296 | pm_runtime_enable(&pdev->dev); | ||
1297 | pm_runtime_allow(&pdev->dev); | 1296 | pm_runtime_allow(&pdev->dev); |
1298 | return 0; | 1297 | return 0; |
1299 | 1298 | ||
@@ -1322,6 +1321,9 @@ err_enable_device: | |||
1322 | static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | 1321 | static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) |
1323 | { | 1322 | { |
1324 | struct middma_device *device = pci_get_drvdata(pdev); | 1323 | struct middma_device *device = pci_get_drvdata(pdev); |
1324 | |||
1325 | pm_runtime_get_noresume(&pdev->dev); | ||
1326 | pm_runtime_forbid(&pdev->dev); | ||
1325 | middma_shutdown(pdev); | 1327 | middma_shutdown(pdev); |
1326 | pci_dev_put(pdev); | 1328 | pci_dev_put(pdev); |
1327 | kfree(device); | 1329 | kfree(device); |
@@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci) | |||
1385 | static int dma_runtime_suspend(struct device *dev) | 1387 | static int dma_runtime_suspend(struct device *dev) |
1386 | { | 1388 | { |
1387 | struct pci_dev *pci_dev = to_pci_dev(dev); | 1389 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1388 | return dma_suspend(pci_dev, PMSG_SUSPEND); | 1390 | struct middma_device *device = pci_get_drvdata(pci_dev); |
1391 | |||
1392 | device->state = SUSPENDED; | ||
1393 | return 0; | ||
1389 | } | 1394 | } |
1390 | 1395 | ||
1391 | static int dma_runtime_resume(struct device *dev) | 1396 | static int dma_runtime_resume(struct device *dev) |
1392 | { | 1397 | { |
1393 | struct pci_dev *pci_dev = to_pci_dev(dev); | 1398 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1394 | return dma_resume(pci_dev); | 1399 | struct middma_device *device = pci_get_drvdata(pci_dev); |
1400 | |||
1401 | device->state = RUNNING; | ||
1402 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1403 | return 0; | ||
1395 | } | 1404 | } |
1396 | 1405 | ||
1397 | static int dma_runtime_idle(struct device *dev) | 1406 | static int dma_runtime_idle(struct device *dev) |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index f4a51d4d0349..5d65f8377971 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -508,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
508 | struct ioat_ring_ent **ring; | 508 | struct ioat_ring_ent **ring; |
509 | u64 status; | 509 | u64 status; |
510 | int order; | 510 | int order; |
511 | int i = 0; | ||
511 | 512 | ||
512 | /* have we already been set up? */ | 513 | /* have we already been set up? */ |
513 | if (ioat->ring) | 514 | if (ioat->ring) |
@@ -548,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
548 | ioat2_start_null_desc(ioat); | 549 | ioat2_start_null_desc(ioat); |
549 | 550 | ||
550 | /* check that we got off the ground */ | 551 | /* check that we got off the ground */ |
551 | udelay(5); | 552 | do { |
552 | status = ioat_chansts(chan); | 553 | udelay(1); |
554 | status = ioat_chansts(chan); | ||
555 | } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); | ||
556 | |||
553 | if (is_ioat_active(status) || is_ioat_idle(status)) { | 557 | if (is_ioat_active(status) || is_ioat_idle(status)) { |
554 | set_bit(IOAT_RUN, &chan->state); | 558 | set_bit(IOAT_RUN, &chan->state); |
555 | return 1 << ioat->alloc_order; | 559 | return 1 << ioat->alloc_order; |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c6b01f535b29..e03f811a83dd 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
619 | 619 | ||
620 | if (unlikely(!len)) | 620 | if (unlikely(!len)) |
621 | return NULL; | 621 | return NULL; |
622 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 622 | BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); |
623 | 623 | ||
624 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 624 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
625 | __func__, len); | 625 | __func__, len); |
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
652 | 652 | ||
653 | if (unlikely(!len)) | 653 | if (unlikely(!len)) |
654 | return NULL; | 654 | return NULL; |
655 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 655 | BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); |
656 | 656 | ||
657 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 657 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
658 | __func__, len); | 658 | __func__, len); |
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
686 | 686 | ||
687 | if (unlikely(!len)) | 687 | if (unlikely(!len)) |
688 | return NULL; | 688 | return NULL; |
689 | BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); | 689 | BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); |
690 | 690 | ||
691 | dev_dbg(iop_chan->device->common.dev, | 691 | dev_dbg(iop_chan->device->common.dev, |
692 | "%s src_cnt: %d len: %u flags: %lx\n", | 692 | "%s src_cnt: %d len: %u flags: %lx\n", |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index a25f5f61e0e0..954e334e01bb 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
671 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 671 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
672 | return NULL; | 672 | return NULL; |
673 | 673 | ||
674 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | 674 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
675 | 675 | ||
676 | spin_lock_bh(&mv_chan->lock); | 676 | spin_lock_bh(&mv_chan->lock); |
677 | slot_cnt = mv_chan_memcpy_slot_count(len); | 677 | slot_cnt = mv_chan_memcpy_slot_count(len); |
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
710 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 710 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
711 | return NULL; | 711 | return NULL; |
712 | 712 | ||
713 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | 713 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
714 | 714 | ||
715 | spin_lock_bh(&mv_chan->lock); | 715 | spin_lock_bh(&mv_chan->lock); |
716 | slot_cnt = mv_chan_memset_slot_count(len); | 716 | slot_cnt = mv_chan_memset_slot_count(len); |
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
744 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 744 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
745 | return NULL; | 745 | return NULL; |
746 | 746 | ||
747 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | 747 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
748 | 748 | ||
749 | dev_dbg(mv_chan->device->common.dev, | 749 | dev_dbg(mv_chan->device->common.dev, |
750 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | 750 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 8d8fef1480a9..ff5b38f9d45b 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -77,10 +77,10 @@ struct pch_dma_regs { | |||
77 | u32 dma_ctl0; | 77 | u32 dma_ctl0; |
78 | u32 dma_ctl1; | 78 | u32 dma_ctl1; |
79 | u32 dma_ctl2; | 79 | u32 dma_ctl2; |
80 | u32 reserved1; | 80 | u32 dma_ctl3; |
81 | u32 dma_sts0; | 81 | u32 dma_sts0; |
82 | u32 dma_sts1; | 82 | u32 dma_sts1; |
83 | u32 reserved2; | 83 | u32 dma_sts2; |
84 | u32 reserved3; | 84 | u32 reserved3; |
85 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; | 85 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
86 | }; | 86 | }; |
@@ -130,6 +130,7 @@ struct pch_dma { | |||
130 | #define PCH_DMA_CTL0 0x00 | 130 | #define PCH_DMA_CTL0 0x00 |
131 | #define PCH_DMA_CTL1 0x04 | 131 | #define PCH_DMA_CTL1 0x04 |
132 | #define PCH_DMA_CTL2 0x08 | 132 | #define PCH_DMA_CTL2 0x08 |
133 | #define PCH_DMA_CTL3 0x0C | ||
133 | #define PCH_DMA_STS0 0x10 | 134 | #define PCH_DMA_STS0 0x10 |
134 | #define PCH_DMA_STS1 0x14 | 135 | #define PCH_DMA_STS1 0x14 |
135 | 136 | ||
@@ -138,7 +139,8 @@ struct pch_dma { | |||
138 | #define dma_writel(pd, name, val) \ | 139 | #define dma_writel(pd, name, val) \ |
139 | writel((val), (pd)->membase + PCH_DMA_##name) | 140 | writel((val), (pd)->membase + PCH_DMA_##name) |
140 | 141 | ||
141 | static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | 142 | static inline |
143 | struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | ||
142 | { | 144 | { |
143 | return container_of(txd, struct pch_dma_desc, txd); | 145 | return container_of(txd, struct pch_dma_desc, txd); |
144 | } | 146 | } |
@@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan) | |||
163 | return chan->dev->device.parent; | 165 | return chan->dev->device.parent; |
164 | } | 166 | } |
165 | 167 | ||
166 | static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | 168 | static inline |
169 | struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | ||
167 | { | 170 | { |
168 | return list_first_entry(&pd_chan->active_list, | 171 | return list_first_entry(&pd_chan->active_list, |
169 | struct pch_dma_desc, desc_node); | 172 | struct pch_dma_desc, desc_node); |
170 | } | 173 | } |
171 | 174 | ||
172 | static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | 175 | static inline |
176 | struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | ||
173 | { | 177 | { |
174 | return list_first_entry(&pd_chan->queue, | 178 | return list_first_entry(&pd_chan->queue, |
175 | struct pch_dma_desc, desc_node); | 179 | struct pch_dma_desc, desc_node); |
@@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
199 | struct pch_dma *pd = to_pd(chan->device); | 203 | struct pch_dma *pd = to_pd(chan->device); |
200 | u32 val; | 204 | u32 val; |
201 | 205 | ||
202 | val = dma_readl(pd, CTL0); | 206 | if (chan->chan_id < 8) { |
207 | val = dma_readl(pd, CTL0); | ||
203 | 208 | ||
204 | if (pd_chan->dir == DMA_TO_DEVICE) | 209 | if (pd_chan->dir == DMA_TO_DEVICE) |
205 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 210 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
206 | DMA_CTL0_DIR_SHIFT_BITS); | 211 | DMA_CTL0_DIR_SHIFT_BITS); |
207 | else | 212 | else |
208 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 213 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
209 | DMA_CTL0_DIR_SHIFT_BITS)); | 214 | DMA_CTL0_DIR_SHIFT_BITS)); |
215 | |||
216 | dma_writel(pd, CTL0, val); | ||
217 | } else { | ||
218 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | ||
219 | val = dma_readl(pd, CTL3); | ||
210 | 220 | ||
211 | dma_writel(pd, CTL0, val); | 221 | if (pd_chan->dir == DMA_TO_DEVICE) |
222 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | ||
223 | DMA_CTL0_DIR_SHIFT_BITS); | ||
224 | else | ||
225 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + | ||
226 | DMA_CTL0_DIR_SHIFT_BITS)); | ||
227 | |||
228 | dma_writel(pd, CTL3, val); | ||
229 | } | ||
212 | 230 | ||
213 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", | 231 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", |
214 | chan->chan_id, val); | 232 | chan->chan_id, val); |
@@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |||
219 | struct pch_dma *pd = to_pd(chan->device); | 237 | struct pch_dma *pd = to_pd(chan->device); |
220 | u32 val; | 238 | u32 val; |
221 | 239 | ||
222 | val = dma_readl(pd, CTL0); | 240 | if (chan->chan_id < 8) { |
241 | val = dma_readl(pd, CTL0); | ||
242 | |||
243 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
244 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
245 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | ||
223 | 246 | ||
224 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | 247 | dma_writel(pd, CTL0, val); |
225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | 248 | } else { |
226 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | 249 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
250 | |||
251 | val = dma_readl(pd, CTL3); | ||
252 | |||
253 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
254 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
255 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); | ||
227 | 256 | ||
228 | dma_writel(pd, CTL0, val); | 257 | dma_writel(pd, CTL3, val); |
258 | |||
259 | } | ||
229 | 260 | ||
230 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | 261 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", |
231 | chan->chan_id, val); | 262 | chan->chan_id, val); |
@@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | |||
251 | 282 | ||
252 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | 283 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) |
253 | { | 284 | { |
254 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | ||
255 | u32 val; | ||
256 | |||
257 | if (!pdc_is_idle(pd_chan)) { | 285 | if (!pdc_is_idle(pd_chan)) { |
258 | dev_err(chan2dev(&pd_chan->chan), | 286 | dev_err(chan2dev(&pd_chan->chan), |
259 | "BUG: Attempt to start non-idle channel\n"); | 287 | "BUG: Attempt to start non-idle channel\n"); |
@@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |||
279 | channel_writel(pd_chan, NEXT, desc->txd.phys); | 307 | channel_writel(pd_chan, NEXT, desc->txd.phys); |
280 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); | 308 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
281 | } | 309 | } |
282 | |||
283 | val = dma_readl(pd, CTL2); | ||
284 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); | ||
285 | dma_writel(pd, CTL2, val); | ||
286 | } | 310 | } |
287 | 311 | ||
288 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, | 312 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, |
@@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
403 | { | 427 | { |
404 | struct pch_dma_desc *desc, *_d; | 428 | struct pch_dma_desc *desc, *_d; |
405 | struct pch_dma_desc *ret = NULL; | 429 | struct pch_dma_desc *ret = NULL; |
406 | int i; | 430 | int i = 0; |
407 | 431 | ||
408 | spin_lock(&pd_chan->lock); | 432 | spin_lock(&pd_chan->lock); |
409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | 433 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
@@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) | |||
478 | spin_unlock_bh(&pd_chan->lock); | 502 | spin_unlock_bh(&pd_chan->lock); |
479 | 503 | ||
480 | pdc_enable_irq(chan, 1); | 504 | pdc_enable_irq(chan, 1); |
481 | pdc_set_dir(chan); | ||
482 | 505 | ||
483 | return pd_chan->descs_allocated; | 506 | return pd_chan->descs_allocated; |
484 | } | 507 | } |
@@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
561 | else | 584 | else |
562 | return NULL; | 585 | return NULL; |
563 | 586 | ||
587 | pd_chan->dir = direction; | ||
588 | pdc_set_dir(chan); | ||
589 | |||
564 | for_each_sg(sgl, sg, sg_len, i) { | 590 | for_each_sg(sgl, sg, sg_len, i) { |
565 | desc = pdc_desc_get(pd_chan); | 591 | desc = pdc_desc_get(pd_chan); |
566 | 592 | ||
@@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd) | |||
703 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); | 729 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); |
704 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); | 730 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); |
705 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); | 731 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); |
732 | pd->regs.dma_ctl3 = dma_readl(pd, CTL3); | ||
706 | 733 | ||
707 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | 734 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
708 | pd_chan = to_pd_chan(chan); | 735 | pd_chan = to_pd_chan(chan); |
@@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd) | |||
725 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); | 752 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); |
726 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); | 753 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); |
727 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); | 754 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); |
755 | dma_writel(pd, CTL3, pd->regs.dma_ctl3); | ||
728 | 756 | ||
729 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | 757 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
730 | pd_chan = to_pd_chan(chan); | 758 | pd_chan = to_pd_chan(chan); |
@@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev, | |||
850 | 878 | ||
851 | pd_chan->membase = ®s->desc[i]; | 879 | pd_chan->membase = ®s->desc[i]; |
852 | 880 | ||
853 | pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
854 | |||
855 | spin_lock_init(&pd_chan->lock); | 881 | spin_lock_init(&pd_chan->lock); |
856 | 882 | ||
857 | INIT_LIST_HEAD(&pd_chan->active_list); | 883 | INIT_LIST_HEAD(&pd_chan->active_list); |
@@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) | |||
929 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 | 955 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 |
930 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B | 956 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B |
931 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 | 957 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 |
958 | #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 | ||
959 | #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B | ||
960 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E | ||
961 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 | ||
962 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B | ||
932 | 963 | ||
933 | static const struct pci_device_id pch_dma_id_table[] = { | 964 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { |
934 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, | 965 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
935 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, | 966 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, |
936 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | 967 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ |
937 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ | 968 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ |
938 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ | 969 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ |
970 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ | ||
971 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ | ||
972 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ | ||
973 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ | ||
974 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ | ||
939 | { 0, }, | 975 | { 0, }, |
940 | }; | 976 | }; |
941 | 977 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 3b0247e74cc4..fc457a7e8832 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( | |||
2313 | if (unlikely(!len)) | 2313 | if (unlikely(!len)) |
2314 | return NULL; | 2314 | return NULL; |
2315 | 2315 | ||
2316 | BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); | 2316 | BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); |
2317 | 2317 | ||
2318 | spin_lock_bh(&ppc440spe_chan->lock); | 2318 | spin_lock_bh(&ppc440spe_chan->lock); |
2319 | 2319 | ||
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( | |||
2354 | if (unlikely(!len)) | 2354 | if (unlikely(!len)) |
2355 | return NULL; | 2355 | return NULL; |
2356 | 2356 | ||
2357 | BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); | 2357 | BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); |
2358 | 2358 | ||
2359 | spin_lock_bh(&ppc440spe_chan->lock); | 2359 | spin_lock_bh(&ppc440spe_chan->lock); |
2360 | 2360 | ||
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( | |||
2397 | dma_dest, dma_src, src_cnt)); | 2397 | dma_dest, dma_src, src_cnt)); |
2398 | if (unlikely(!len)) | 2398 | if (unlikely(!len)) |
2399 | return NULL; | 2399 | return NULL; |
2400 | BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); | 2400 | BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); |
2401 | 2401 | ||
2402 | dev_dbg(ppc440spe_chan->device->common.dev, | 2402 | dev_dbg(ppc440spe_chan->device->common.dev, |
2403 | "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", | 2403 | "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", |
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq( | |||
2887 | ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, | 2887 | ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, |
2888 | dst, src, src_cnt)); | 2888 | dst, src, src_cnt)); |
2889 | BUG_ON(!len); | 2889 | BUG_ON(!len); |
2890 | BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); | 2890 | BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); |
2891 | BUG_ON(!src_cnt); | 2891 | BUG_ON(!src_cnt); |
2892 | 2892 | ||
2893 | if (src_cnt == 1 && dst[1] == src[0]) { | 2893 | if (src_cnt == 1 && dst[1] == src[0]) { |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 94ee15dd3aed..8f222d4db7de 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | |||
1829 | { | 1829 | { |
1830 | struct stedma40_platform_data *plat = chan->base->plat_data; | 1830 | struct stedma40_platform_data *plat = chan->base->plat_data; |
1831 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 1831 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1832 | dma_addr_t addr; | 1832 | dma_addr_t addr = 0; |
1833 | 1833 | ||
1834 | if (chan->runtime_addr) | 1834 | if (chan->runtime_addr) |
1835 | return chan->runtime_addr; | 1835 | return chan->runtime_addr; |
@@ -2962,4 +2962,4 @@ static int __init stedma40_init(void) | |||
2962 | { | 2962 | { |
2963 | return platform_driver_probe(&d40_driver, d40_probe); | 2963 | return platform_driver_probe(&d40_driver, d40_probe); |
2964 | } | 2964 | } |
2965 | arch_initcall(stedma40_init); | 2965 | subsys_initcall(stedma40_init); |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 6998d9376ef9..4bfe0a2f7d50 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -3,6 +3,7 @@ | |||
3 | * AVR32 systems.) | 3 | * AVR32 systems.) |
4 | * | 4 | * |
5 | * Copyright (C) 2007 Atmel Corporation | 5 | * Copyright (C) 2007 Atmel Corporation |
6 | * Copyright (C) 2010-2011 ST Microelectronics | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |