diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-07 14:11:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-07 14:11:43 -0400 |
commit | d2b4a646717153a1a180b64d4a8464054dbd700e (patch) | |
tree | a019907da37389f59ddb429c7d10de178514af1e /drivers | |
parent | 8dce5f3dee21bf976193ddb06426b9727cf5d1a2 (diff) | |
parent | 67eacc1583909d0588c8d5d80c16298c899a6382 (diff) |
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"Once you have some time from extended weekend celebrations please
consider pulling the following to get:
- Various fixes and PCI driver for dw_dmac by Andy
- DT binding for imx-dma by Markus & imx-sdma by Shawn
- DT fixes for dmaengine by Lars
- jz4740 dmac driver by Lars
- and various fixes across the drivers"
What "extended weekend celebrations"? I'm in the merge window, who has
time for extended celebrations..
* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (40 commits)
DMA: shdma: add DT support
DMA: shdma: shdma_chan_filter() has to be in shdma-base.h
DMA: shdma: (cosmetic) don't re-calculate a pointer
dmaengine: at_hdmac: prepare clk before calling enable
dmaengine/trivial: at_hdmac: add curly brackets to if/else expressions
dmaengine: at_hdmac: remove unsuded atc_cleanup_descriptors()
dmaengine: at_hdmac: add FIFO configuration parameter to DMA DT binding
ARM: at91: dt: add header to define at_hdmac configuration
MIPS: jz4740: Correct clock gate bit for DMA controller
MIPS: jz4740: Remove custom DMA API
MIPS: jz4740: Register jz4740 DMA device
dma: Add a jz4740 dmaengine driver
MIPS: jz4740: Acquire and enable DMA controller clock
dma: mmp_tdma: disable irq when disabling dma channel
dmaengine: PL08x: Avoid collisions with get_signal() macro
dmaengine: dw: select DW_DMAC_BIG_ENDIAN_IO automagically
dma: dw: add PCI part of the driver
dma: dw: split driver to library part and platform code
dma: move dw_dmac driver to an own directory
dw_dmac: don't check resource with devm_ioremap_resource
...
Diffstat (limited to 'drivers')
28 files changed, 1633 insertions, 433 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3215a3cb3de8..6825957c97fb 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -79,25 +79,7 @@ config INTEL_IOP_ADMA | |||
79 | help | 79 | help |
80 | Enable support for the Intel(R) IOP Series RAID engines. | 80 | Enable support for the Intel(R) IOP Series RAID engines. |
81 | 81 | ||
82 | config DW_DMAC | 82 | source "drivers/dma/dw/Kconfig" |
83 | tristate "Synopsys DesignWare AHB DMA support" | ||
84 | depends on GENERIC_HARDIRQS | ||
85 | select DMA_ENGINE | ||
86 | default y if CPU_AT32AP7000 | ||
87 | help | ||
88 | Support the Synopsys DesignWare AHB DMA controller. This | ||
89 | can be integrated in chips such as the Atmel AT32ap7000. | ||
90 | |||
91 | config DW_DMAC_BIG_ENDIAN_IO | ||
92 | bool "Use big endian I/O register access" | ||
93 | default y if AVR32 | ||
94 | depends on DW_DMAC | ||
95 | help | ||
96 | Say yes here to use big endian I/O access when reading and writing | ||
97 | to the DMA controller registers. This is needed on some platforms, | ||
98 | like the Atmel AVR32 architecture. | ||
99 | |||
100 | If unsure, use the default setting. | ||
101 | 83 | ||
102 | config AT_HDMAC | 84 | config AT_HDMAC |
103 | tristate "Atmel AHB DMA support" | 85 | tristate "Atmel AHB DMA support" |
@@ -312,6 +294,12 @@ config MMP_PDMA | |||
312 | help | 294 | help |
313 | Support the MMP PDMA engine for PXA and MMP platfrom. | 295 | Support the MMP PDMA engine for PXA and MMP platfrom. |
314 | 296 | ||
297 | config DMA_JZ4740 | ||
298 | tristate "JZ4740 DMA support" | ||
299 | depends on MACH_JZ4740 | ||
300 | select DMA_ENGINE | ||
301 | select DMA_VIRTUAL_CHANNELS | ||
302 | |||
315 | config DMA_ENGINE | 303 | config DMA_ENGINE |
316 | bool | 304 | bool |
317 | 305 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a2b0df591f95..5e0f2ef85614 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o | |||
15 | obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o | 15 | obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o |
16 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ | 16 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ |
17 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 17 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
18 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | 18 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ |
19 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 19 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
20 | obj-$(CONFIG_MX3_IPU) += ipu/ | 20 | obj-$(CONFIG_MX3_IPU) += ipu/ |
21 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 21 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
@@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | |||
38 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o | 38 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o |
39 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o | 39 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o |
40 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o | 40 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o |
41 | obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8bad254a498d..06fe45c74de5 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -299,8 +299,8 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan) | |||
299 | const struct pl08x_platform_data *pd = plchan->host->pd; | 299 | const struct pl08x_platform_data *pd = plchan->host->pd; |
300 | int ret; | 300 | int ret; |
301 | 301 | ||
302 | if (plchan->mux_use++ == 0 && pd->get_signal) { | 302 | if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { |
303 | ret = pd->get_signal(plchan->cd); | 303 | ret = pd->get_xfer_signal(plchan->cd); |
304 | if (ret < 0) { | 304 | if (ret < 0) { |
305 | plchan->mux_use = 0; | 305 | plchan->mux_use = 0; |
306 | return ret; | 306 | return ret; |
@@ -318,8 +318,8 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan) | |||
318 | if (plchan->signal >= 0) { | 318 | if (plchan->signal >= 0) { |
319 | WARN_ON(plchan->mux_use == 0); | 319 | WARN_ON(plchan->mux_use == 0); |
320 | 320 | ||
321 | if (--plchan->mux_use == 0 && pd->put_signal) { | 321 | if (--plchan->mux_use == 0 && pd->put_xfer_signal) { |
322 | pd->put_signal(plchan->cd, plchan->signal); | 322 | pd->put_xfer_signal(plchan->cd, plchan->signal); |
323 | plchan->signal = -1; | 323 | plchan->signal = -1; |
324 | } | 324 | } |
325 | } | 325 | } |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index e923cda930f9..c787f38a186a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * found on AT91SAM9263. | 14 | * found on AT91SAM9263. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <dt-bindings/dma/at91.h> | ||
17 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
18 | #include <linux/dmaengine.h> | 19 | #include <linux/dmaengine.h> |
19 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
@@ -54,6 +55,7 @@ MODULE_PARM_DESC(init_nr_desc_per_channel, | |||
54 | 55 | ||
55 | /* prototypes */ | 56 | /* prototypes */ |
56 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); | 57 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); |
58 | static void atc_issue_pending(struct dma_chan *chan); | ||
57 | 59 | ||
58 | 60 | ||
59 | /*----------------------------------------------------------------------*/ | 61 | /*----------------------------------------------------------------------*/ |
@@ -230,6 +232,95 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
230 | vdbg_dump_regs(atchan); | 232 | vdbg_dump_regs(atchan); |
231 | } | 233 | } |
232 | 234 | ||
235 | /* | ||
236 | * atc_get_current_descriptors - | ||
237 | * locate the descriptor which equal to physical address in DSCR | ||
238 | * @atchan: the channel we want to start | ||
239 | * @dscr_addr: physical descriptor address in DSCR | ||
240 | */ | ||
241 | static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, | ||
242 | u32 dscr_addr) | ||
243 | { | ||
244 | struct at_desc *desc, *_desc, *child, *desc_cur = NULL; | ||
245 | |||
246 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { | ||
247 | if (desc->lli.dscr == dscr_addr) { | ||
248 | desc_cur = desc; | ||
249 | break; | ||
250 | } | ||
251 | |||
252 | list_for_each_entry(child, &desc->tx_list, desc_node) { | ||
253 | if (child->lli.dscr == dscr_addr) { | ||
254 | desc_cur = child; | ||
255 | break; | ||
256 | } | ||
257 | } | ||
258 | } | ||
259 | |||
260 | return desc_cur; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * atc_get_bytes_left - | ||
265 | * Get the number of bytes residue in dma buffer, | ||
266 | * @chan: the channel we want to start | ||
267 | */ | ||
268 | static int atc_get_bytes_left(struct dma_chan *chan) | ||
269 | { | ||
270 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
271 | struct at_dma *atdma = to_at_dma(chan->device); | ||
272 | int chan_id = atchan->chan_common.chan_id; | ||
273 | struct at_desc *desc_first = atc_first_active(atchan); | ||
274 | struct at_desc *desc_cur; | ||
275 | int ret = 0, count = 0; | ||
276 | |||
277 | /* | ||
278 | * Initialize necessary values in the first time. | ||
279 | * remain_desc record remain desc length. | ||
280 | */ | ||
281 | if (atchan->remain_desc == 0) | ||
282 | /* First descriptor embedds the transaction length */ | ||
283 | atchan->remain_desc = desc_first->len; | ||
284 | |||
285 | /* | ||
286 | * This happens when current descriptor transfer complete. | ||
287 | * The residual buffer size should reduce current descriptor length. | ||
288 | */ | ||
289 | if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { | ||
290 | clear_bit(ATC_IS_BTC, &atchan->status); | ||
291 | desc_cur = atc_get_current_descriptors(atchan, | ||
292 | channel_readl(atchan, DSCR)); | ||
293 | if (!desc_cur) { | ||
294 | ret = -EINVAL; | ||
295 | goto out; | ||
296 | } | ||
297 | atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) | ||
298 | << (desc_first->tx_width); | ||
299 | if (atchan->remain_desc < 0) { | ||
300 | ret = -EINVAL; | ||
301 | goto out; | ||
302 | } else { | ||
303 | ret = atchan->remain_desc; | ||
304 | } | ||
305 | } else { | ||
306 | /* | ||
307 | * Get residual bytes when current | ||
308 | * descriptor transfer in progress. | ||
309 | */ | ||
310 | count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) | ||
311 | << (desc_first->tx_width); | ||
312 | ret = atchan->remain_desc - count; | ||
313 | } | ||
314 | /* | ||
315 | * Check fifo empty. | ||
316 | */ | ||
317 | if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) | ||
318 | atc_issue_pending(chan); | ||
319 | |||
320 | out: | ||
321 | return ret; | ||
322 | } | ||
323 | |||
233 | /** | 324 | /** |
234 | * atc_chain_complete - finish work for one transaction chain | 325 | * atc_chain_complete - finish work for one transaction chain |
235 | * @atchan: channel we work on | 326 | * @atchan: channel we work on |
@@ -327,37 +418,6 @@ static void atc_complete_all(struct at_dma_chan *atchan) | |||
327 | } | 418 | } |
328 | 419 | ||
329 | /** | 420 | /** |
330 | * atc_cleanup_descriptors - cleanup up finished descriptors in active_list | ||
331 | * @atchan: channel to be cleaned up | ||
332 | * | ||
333 | * Called with atchan->lock held and bh disabled | ||
334 | */ | ||
335 | static void atc_cleanup_descriptors(struct at_dma_chan *atchan) | ||
336 | { | ||
337 | struct at_desc *desc, *_desc; | ||
338 | struct at_desc *child; | ||
339 | |||
340 | dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); | ||
341 | |||
342 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { | ||
343 | if (!(desc->lli.ctrla & ATC_DONE)) | ||
344 | /* This one is currently in progress */ | ||
345 | return; | ||
346 | |||
347 | list_for_each_entry(child, &desc->tx_list, desc_node) | ||
348 | if (!(child->lli.ctrla & ATC_DONE)) | ||
349 | /* Currently in progress */ | ||
350 | return; | ||
351 | |||
352 | /* | ||
353 | * No descriptors so far seem to be in progress, i.e. | ||
354 | * this chain must be done. | ||
355 | */ | ||
356 | atc_chain_complete(atchan, desc); | ||
357 | } | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * atc_advance_work - at the end of a transaction, move forward | 421 | * atc_advance_work - at the end of a transaction, move forward |
362 | * @atchan: channel where the transaction ended | 422 | * @atchan: channel where the transaction ended |
363 | * | 423 | * |
@@ -496,6 +556,8 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |||
496 | /* Give information to tasklet */ | 556 | /* Give information to tasklet */ |
497 | set_bit(ATC_IS_ERROR, &atchan->status); | 557 | set_bit(ATC_IS_ERROR, &atchan->status); |
498 | } | 558 | } |
559 | if (pending & AT_DMA_BTC(i)) | ||
560 | set_bit(ATC_IS_BTC, &atchan->status); | ||
499 | tasklet_schedule(&atchan->tasklet); | 561 | tasklet_schedule(&atchan->tasklet); |
500 | ret = IRQ_HANDLED; | 562 | ret = IRQ_HANDLED; |
501 | } | 563 | } |
@@ -615,6 +677,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
615 | /* First descriptor of the chain embedds additional information */ | 677 | /* First descriptor of the chain embedds additional information */ |
616 | first->txd.cookie = -EBUSY; | 678 | first->txd.cookie = -EBUSY; |
617 | first->len = len; | 679 | first->len = len; |
680 | first->tx_width = src_width; | ||
618 | 681 | ||
619 | /* set end-of-link to the last link descriptor of list*/ | 682 | /* set end-of-link to the last link descriptor of list*/ |
620 | set_desc_eol(desc); | 683 | set_desc_eol(desc); |
@@ -761,6 +824,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
761 | /* First descriptor of the chain embedds additional information */ | 824 | /* First descriptor of the chain embedds additional information */ |
762 | first->txd.cookie = -EBUSY; | 825 | first->txd.cookie = -EBUSY; |
763 | first->len = total_len; | 826 | first->len = total_len; |
827 | first->tx_width = reg_width; | ||
764 | 828 | ||
765 | /* first link descriptor of list is responsible of flags */ | 829 | /* first link descriptor of list is responsible of flags */ |
766 | first->txd.flags = flags; /* client is in control of this ack */ | 830 | first->txd.flags = flags; /* client is in control of this ack */ |
@@ -919,6 +983,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
919 | /* First descriptor of the chain embedds additional information */ | 983 | /* First descriptor of the chain embedds additional information */ |
920 | first->txd.cookie = -EBUSY; | 984 | first->txd.cookie = -EBUSY; |
921 | first->len = buf_len; | 985 | first->len = buf_len; |
986 | first->tx_width = reg_width; | ||
922 | 987 | ||
923 | return &first->txd; | 988 | return &first->txd; |
924 | 989 | ||
@@ -1032,34 +1097,36 @@ atc_tx_status(struct dma_chan *chan, | |||
1032 | struct dma_tx_state *txstate) | 1097 | struct dma_tx_state *txstate) |
1033 | { | 1098 | { |
1034 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1099 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1035 | dma_cookie_t last_used; | ||
1036 | dma_cookie_t last_complete; | ||
1037 | unsigned long flags; | 1100 | unsigned long flags; |
1038 | enum dma_status ret; | 1101 | enum dma_status ret; |
1039 | 1102 | int bytes = 0; | |
1040 | spin_lock_irqsave(&atchan->lock, flags); | ||
1041 | 1103 | ||
1042 | ret = dma_cookie_status(chan, cookie, txstate); | 1104 | ret = dma_cookie_status(chan, cookie, txstate); |
1043 | if (ret != DMA_SUCCESS) { | 1105 | if (ret == DMA_SUCCESS) |
1044 | atc_cleanup_descriptors(atchan); | 1106 | return ret; |
1107 | /* | ||
1108 | * There's no point calculating the residue if there's | ||
1109 | * no txstate to store the value. | ||
1110 | */ | ||
1111 | if (!txstate) | ||
1112 | return DMA_ERROR; | ||
1045 | 1113 | ||
1046 | ret = dma_cookie_status(chan, cookie, txstate); | 1114 | spin_lock_irqsave(&atchan->lock, flags); |
1047 | } | ||
1048 | 1115 | ||
1049 | last_complete = chan->completed_cookie; | 1116 | /* Get number of bytes left in the active transactions */ |
1050 | last_used = chan->cookie; | 1117 | bytes = atc_get_bytes_left(chan); |
1051 | 1118 | ||
1052 | spin_unlock_irqrestore(&atchan->lock, flags); | 1119 | spin_unlock_irqrestore(&atchan->lock, flags); |
1053 | 1120 | ||
1054 | if (ret != DMA_SUCCESS) | 1121 | if (unlikely(bytes < 0)) { |
1055 | dma_set_residue(txstate, atc_first_active(atchan)->len); | 1122 | dev_vdbg(chan2dev(chan), "get residual bytes error\n"); |
1056 | 1123 | return DMA_ERROR; | |
1057 | if (atc_chan_is_paused(atchan)) | 1124 | } else { |
1058 | ret = DMA_PAUSED; | 1125 | dma_set_residue(txstate, bytes); |
1126 | } | ||
1059 | 1127 | ||
1060 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", | 1128 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", |
1061 | ret, cookie, last_complete ? last_complete : 0, | 1129 | ret, cookie, bytes); |
1062 | last_used ? last_used : 0); | ||
1063 | 1130 | ||
1064 | return ret; | 1131 | return ret; |
1065 | } | 1132 | } |
@@ -1120,7 +1187,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1120 | */ | 1187 | */ |
1121 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); | 1188 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); |
1122 | 1189 | ||
1123 | /* if cfg configuration specified take it instad of default */ | 1190 | /* if cfg configuration specified take it instead of default */ |
1124 | if (atslave->cfg) | 1191 | if (atslave->cfg) |
1125 | cfg = atslave->cfg; | 1192 | cfg = atslave->cfg; |
1126 | } | 1193 | } |
@@ -1143,6 +1210,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1143 | 1210 | ||
1144 | spin_lock_irqsave(&atchan->lock, flags); | 1211 | spin_lock_irqsave(&atchan->lock, flags); |
1145 | atchan->descs_allocated = i; | 1212 | atchan->descs_allocated = i; |
1213 | atchan->remain_desc = 0; | ||
1146 | list_splice(&tmp_list, &atchan->free_list); | 1214 | list_splice(&tmp_list, &atchan->free_list); |
1147 | dma_cookie_init(chan); | 1215 | dma_cookie_init(chan); |
1148 | spin_unlock_irqrestore(&atchan->lock, flags); | 1216 | spin_unlock_irqrestore(&atchan->lock, flags); |
@@ -1185,6 +1253,7 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1185 | list_splice_init(&atchan->free_list, &list); | 1253 | list_splice_init(&atchan->free_list, &list); |
1186 | atchan->descs_allocated = 0; | 1254 | atchan->descs_allocated = 0; |
1187 | atchan->status = 0; | 1255 | atchan->status = 0; |
1256 | atchan->remain_desc = 0; | ||
1188 | 1257 | ||
1189 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1258 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
1190 | } | 1259 | } |
@@ -1223,14 +1292,31 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, | |||
1223 | atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); | 1292 | atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); |
1224 | if (!atslave) | 1293 | if (!atslave) |
1225 | return NULL; | 1294 | return NULL; |
1295 | |||
1296 | atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; | ||
1226 | /* | 1297 | /* |
1227 | * We can fill both SRC_PER and DST_PER, one of these fields will be | 1298 | * We can fill both SRC_PER and DST_PER, one of these fields will be |
1228 | * ignored depending on DMA transfer direction. | 1299 | * ignored depending on DMA transfer direction. |
1229 | */ | 1300 | */ |
1230 | per_id = dma_spec->args[1]; | 1301 | per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; |
1231 | atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW | 1302 | atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) |
1232 | | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) | 1303 | | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); |
1233 | | ATC_SRC_PER(per_id); | 1304 | /* |
1305 | * We have to translate the value we get from the device tree since | ||
1306 | * the half FIFO configuration value had to be 0 to keep backward | ||
1307 | * compatibility. | ||
1308 | */ | ||
1309 | switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { | ||
1310 | case AT91_DMA_CFG_FIFOCFG_ALAP: | ||
1311 | atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; | ||
1312 | break; | ||
1313 | case AT91_DMA_CFG_FIFOCFG_ASAP: | ||
1314 | atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; | ||
1315 | break; | ||
1316 | case AT91_DMA_CFG_FIFOCFG_HALF: | ||
1317 | default: | ||
1318 | atslave->cfg |= ATC_FIFOCFG_HALFFIFO; | ||
1319 | } | ||
1234 | atslave->dma_dev = &dmac_pdev->dev; | 1320 | atslave->dma_dev = &dmac_pdev->dev; |
1235 | 1321 | ||
1236 | chan = dma_request_channel(mask, at_dma_filter, atslave); | 1322 | chan = dma_request_channel(mask, at_dma_filter, atslave); |
@@ -1374,7 +1460,9 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1374 | err = PTR_ERR(atdma->clk); | 1460 | err = PTR_ERR(atdma->clk); |
1375 | goto err_clk; | 1461 | goto err_clk; |
1376 | } | 1462 | } |
1377 | clk_enable(atdma->clk); | 1463 | err = clk_prepare_enable(atdma->clk); |
1464 | if (err) | ||
1465 | goto err_clk_prepare; | ||
1378 | 1466 | ||
1379 | /* force dma off, just in case */ | 1467 | /* force dma off, just in case */ |
1380 | at_dma_off(atdma); | 1468 | at_dma_off(atdma); |
@@ -1472,10 +1560,10 @@ err_of_dma_controller_register: | |||
1472 | dma_async_device_unregister(&atdma->dma_common); | 1560 | dma_async_device_unregister(&atdma->dma_common); |
1473 | dma_pool_destroy(atdma->dma_desc_pool); | 1561 | dma_pool_destroy(atdma->dma_desc_pool); |
1474 | err_pool_create: | 1562 | err_pool_create: |
1475 | platform_set_drvdata(pdev, NULL); | ||
1476 | free_irq(platform_get_irq(pdev, 0), atdma); | 1563 | free_irq(platform_get_irq(pdev, 0), atdma); |
1477 | err_irq: | 1564 | err_irq: |
1478 | clk_disable(atdma->clk); | 1565 | clk_disable_unprepare(atdma->clk); |
1566 | err_clk_prepare: | ||
1479 | clk_put(atdma->clk); | 1567 | clk_put(atdma->clk); |
1480 | err_clk: | 1568 | err_clk: |
1481 | iounmap(atdma->regs); | 1569 | iounmap(atdma->regs); |
@@ -1497,7 +1585,6 @@ static int at_dma_remove(struct platform_device *pdev) | |||
1497 | dma_async_device_unregister(&atdma->dma_common); | 1585 | dma_async_device_unregister(&atdma->dma_common); |
1498 | 1586 | ||
1499 | dma_pool_destroy(atdma->dma_desc_pool); | 1587 | dma_pool_destroy(atdma->dma_desc_pool); |
1500 | platform_set_drvdata(pdev, NULL); | ||
1501 | free_irq(platform_get_irq(pdev, 0), atdma); | 1588 | free_irq(platform_get_irq(pdev, 0), atdma); |
1502 | 1589 | ||
1503 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | 1590 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, |
@@ -1512,7 +1599,7 @@ static int at_dma_remove(struct platform_device *pdev) | |||
1512 | list_del(&chan->device_node); | 1599 | list_del(&chan->device_node); |
1513 | } | 1600 | } |
1514 | 1601 | ||
1515 | clk_disable(atdma->clk); | 1602 | clk_disable_unprepare(atdma->clk); |
1516 | clk_put(atdma->clk); | 1603 | clk_put(atdma->clk); |
1517 | 1604 | ||
1518 | iounmap(atdma->regs); | 1605 | iounmap(atdma->regs); |
@@ -1531,7 +1618,7 @@ static void at_dma_shutdown(struct platform_device *pdev) | |||
1531 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1618 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1532 | 1619 | ||
1533 | at_dma_off(platform_get_drvdata(pdev)); | 1620 | at_dma_off(platform_get_drvdata(pdev)); |
1534 | clk_disable(atdma->clk); | 1621 | clk_disable_unprepare(atdma->clk); |
1535 | } | 1622 | } |
1536 | 1623 | ||
1537 | static int at_dma_prepare(struct device *dev) | 1624 | static int at_dma_prepare(struct device *dev) |
@@ -1588,7 +1675,7 @@ static int at_dma_suspend_noirq(struct device *dev) | |||
1588 | 1675 | ||
1589 | /* disable DMA controller */ | 1676 | /* disable DMA controller */ |
1590 | at_dma_off(atdma); | 1677 | at_dma_off(atdma); |
1591 | clk_disable(atdma->clk); | 1678 | clk_disable_unprepare(atdma->clk); |
1592 | return 0; | 1679 | return 0; |
1593 | } | 1680 | } |
1594 | 1681 | ||
@@ -1618,7 +1705,7 @@ static int at_dma_resume_noirq(struct device *dev) | |||
1618 | struct dma_chan *chan, *_chan; | 1705 | struct dma_chan *chan, *_chan; |
1619 | 1706 | ||
1620 | /* bring back DMA controller */ | 1707 | /* bring back DMA controller */ |
1621 | clk_enable(atdma->clk); | 1708 | clk_prepare_enable(atdma->clk); |
1622 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1709 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1623 | 1710 | ||
1624 | /* clear any pending interrupt */ | 1711 | /* clear any pending interrupt */ |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index c604d26fd4d3..f31d647acdfa 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -182,6 +182,7 @@ struct at_lli { | |||
182 | * @txd: support for the async_tx api | 182 | * @txd: support for the async_tx api |
183 | * @desc_node: node on the channed descriptors list | 183 | * @desc_node: node on the channed descriptors list |
184 | * @len: total transaction bytecount | 184 | * @len: total transaction bytecount |
185 | * @tx_width: transfer width | ||
185 | */ | 186 | */ |
186 | struct at_desc { | 187 | struct at_desc { |
187 | /* FIRST values the hardware uses */ | 188 | /* FIRST values the hardware uses */ |
@@ -192,6 +193,7 @@ struct at_desc { | |||
192 | struct dma_async_tx_descriptor txd; | 193 | struct dma_async_tx_descriptor txd; |
193 | struct list_head desc_node; | 194 | struct list_head desc_node; |
194 | size_t len; | 195 | size_t len; |
196 | u32 tx_width; | ||
195 | }; | 197 | }; |
196 | 198 | ||
197 | static inline struct at_desc * | 199 | static inline struct at_desc * |
@@ -211,6 +213,7 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) | |||
211 | enum atc_status { | 213 | enum atc_status { |
212 | ATC_IS_ERROR = 0, | 214 | ATC_IS_ERROR = 0, |
213 | ATC_IS_PAUSED = 1, | 215 | ATC_IS_PAUSED = 1, |
216 | ATC_IS_BTC = 2, | ||
214 | ATC_IS_CYCLIC = 24, | 217 | ATC_IS_CYCLIC = 24, |
215 | }; | 218 | }; |
216 | 219 | ||
@@ -228,6 +231,7 @@ enum atc_status { | |||
228 | * @save_cfg: configuration register that is saved on suspend/resume cycle | 231 | * @save_cfg: configuration register that is saved on suspend/resume cycle |
229 | * @save_dscr: for cyclic operations, preserve next descriptor address in | 232 | * @save_dscr: for cyclic operations, preserve next descriptor address in |
230 | * the cyclic list on suspend/resume cycle | 233 | * the cyclic list on suspend/resume cycle |
234 | * @remain_desc: to save remain desc length | ||
231 | * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG | 235 | * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG |
232 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 236 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
233 | * @active_list: list of descriptors dmaengine is being running on | 237 | * @active_list: list of descriptors dmaengine is being running on |
@@ -246,6 +250,7 @@ struct at_dma_chan { | |||
246 | struct tasklet_struct tasklet; | 250 | struct tasklet_struct tasklet; |
247 | u32 save_cfg; | 251 | u32 save_cfg; |
248 | u32 save_dscr; | 252 | u32 save_dscr; |
253 | u32 remain_desc; | ||
249 | struct dma_slave_config dma_sconfig; | 254 | struct dma_slave_config dma_sconfig; |
250 | 255 | ||
251 | spinlock_t lock; | 256 | spinlock_t lock; |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c new file mode 100644 index 000000000000..b0c0c8268d42 --- /dev/null +++ b/drivers/dma/dma-jz4740.c | |||
@@ -0,0 +1,617 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de> | ||
3 | * JZ4740 DMAC support | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the | ||
7 | * Free Software Foundation; either version 2 of the License, or (at your | ||
8 | * option) any later version. | ||
9 | * | ||
10 | * You should have received a copy of the GNU General Public License along | ||
11 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
12 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/clk.h> | ||
27 | |||
28 | #include <asm/mach-jz4740/dma.h> | ||
29 | |||
30 | #include "virt-dma.h" | ||
31 | |||
32 | #define JZ_DMA_NR_CHANS 6 | ||
33 | |||
34 | #define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20) | ||
35 | #define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20) | ||
36 | #define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20) | ||
37 | #define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20) | ||
38 | #define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20) | ||
39 | #define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20) | ||
40 | #define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20) | ||
41 | |||
42 | #define JZ_REG_DMA_CTRL 0x300 | ||
43 | #define JZ_REG_DMA_IRQ 0x304 | ||
44 | #define JZ_REG_DMA_DOORBELL 0x308 | ||
45 | #define JZ_REG_DMA_DOORBELL_SET 0x30C | ||
46 | |||
47 | #define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31) | ||
48 | #define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6) | ||
49 | #define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4) | ||
50 | #define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3) | ||
51 | #define JZ_DMA_STATUS_CTRL_HALT BIT(2) | ||
52 | #define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1) | ||
53 | #define JZ_DMA_STATUS_CTRL_ENABLE BIT(0) | ||
54 | |||
55 | #define JZ_DMA_CMD_SRC_INC BIT(23) | ||
56 | #define JZ_DMA_CMD_DST_INC BIT(22) | ||
57 | #define JZ_DMA_CMD_RDIL_MASK (0xf << 16) | ||
58 | #define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14) | ||
59 | #define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12) | ||
60 | #define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8) | ||
61 | #define JZ_DMA_CMD_BLOCK_MODE BIT(7) | ||
62 | #define JZ_DMA_CMD_DESC_VALID BIT(4) | ||
63 | #define JZ_DMA_CMD_DESC_VALID_MODE BIT(3) | ||
64 | #define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2) | ||
65 | #define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1) | ||
66 | #define JZ_DMA_CMD_LINK_ENABLE BIT(0) | ||
67 | |||
68 | #define JZ_DMA_CMD_FLAGS_OFFSET 22 | ||
69 | #define JZ_DMA_CMD_RDIL_OFFSET 16 | ||
70 | #define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14 | ||
71 | #define JZ_DMA_CMD_DST_WIDTH_OFFSET 12 | ||
72 | #define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8 | ||
73 | #define JZ_DMA_CMD_MODE_OFFSET 7 | ||
74 | |||
75 | #define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8) | ||
76 | #define JZ_DMA_CTRL_HALT BIT(3) | ||
77 | #define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2) | ||
78 | #define JZ_DMA_CTRL_ENABLE BIT(0) | ||
79 | |||
80 | enum jz4740_dma_width { | ||
81 | JZ4740_DMA_WIDTH_32BIT = 0, | ||
82 | JZ4740_DMA_WIDTH_8BIT = 1, | ||
83 | JZ4740_DMA_WIDTH_16BIT = 2, | ||
84 | }; | ||
85 | |||
86 | enum jz4740_dma_transfer_size { | ||
87 | JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0, | ||
88 | JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1, | ||
89 | JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2, | ||
90 | JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3, | ||
91 | JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4, | ||
92 | }; | ||
93 | |||
94 | enum jz4740_dma_flags { | ||
95 | JZ4740_DMA_SRC_AUTOINC = 0x2, | ||
96 | JZ4740_DMA_DST_AUTOINC = 0x1, | ||
97 | }; | ||
98 | |||
99 | enum jz4740_dma_mode { | ||
100 | JZ4740_DMA_MODE_SINGLE = 0, | ||
101 | JZ4740_DMA_MODE_BLOCK = 1, | ||
102 | }; | ||
103 | |||
104 | struct jz4740_dma_sg { | ||
105 | dma_addr_t addr; | ||
106 | unsigned int len; | ||
107 | }; | ||
108 | |||
109 | struct jz4740_dma_desc { | ||
110 | struct virt_dma_desc vdesc; | ||
111 | |||
112 | enum dma_transfer_direction direction; | ||
113 | bool cyclic; | ||
114 | |||
115 | unsigned int num_sgs; | ||
116 | struct jz4740_dma_sg sg[]; | ||
117 | }; | ||
118 | |||
119 | struct jz4740_dmaengine_chan { | ||
120 | struct virt_dma_chan vchan; | ||
121 | unsigned int id; | ||
122 | |||
123 | dma_addr_t fifo_addr; | ||
124 | unsigned int transfer_shift; | ||
125 | |||
126 | struct jz4740_dma_desc *desc; | ||
127 | unsigned int next_sg; | ||
128 | }; | ||
129 | |||
130 | struct jz4740_dma_dev { | ||
131 | struct dma_device ddev; | ||
132 | void __iomem *base; | ||
133 | struct clk *clk; | ||
134 | |||
135 | struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS]; | ||
136 | }; | ||
137 | |||
138 | static struct jz4740_dma_dev *jz4740_dma_chan_get_dev( | ||
139 | struct jz4740_dmaengine_chan *chan) | ||
140 | { | ||
141 | return container_of(chan->vchan.chan.device, struct jz4740_dma_dev, | ||
142 | ddev); | ||
143 | } | ||
144 | |||
145 | static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c) | ||
146 | { | ||
147 | return container_of(c, struct jz4740_dmaengine_chan, vchan.chan); | ||
148 | } | ||
149 | |||
150 | static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc) | ||
151 | { | ||
152 | return container_of(vdesc, struct jz4740_dma_desc, vdesc); | ||
153 | } | ||
154 | |||
155 | static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev, | ||
156 | unsigned int reg) | ||
157 | { | ||
158 | return readl(dmadev->base + reg); | ||
159 | } | ||
160 | |||
161 | static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev, | ||
162 | unsigned reg, uint32_t val) | ||
163 | { | ||
164 | writel(val, dmadev->base + reg); | ||
165 | } | ||
166 | |||
167 | static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev, | ||
168 | unsigned int reg, uint32_t val, uint32_t mask) | ||
169 | { | ||
170 | uint32_t tmp; | ||
171 | |||
172 | tmp = jz4740_dma_read(dmadev, reg); | ||
173 | tmp &= ~mask; | ||
174 | tmp |= val; | ||
175 | jz4740_dma_write(dmadev, reg, tmp); | ||
176 | } | ||
177 | |||
178 | static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs) | ||
179 | { | ||
180 | return kzalloc(sizeof(struct jz4740_dma_desc) + | ||
181 | sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC); | ||
182 | } | ||
183 | |||
184 | static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width) | ||
185 | { | ||
186 | switch (width) { | ||
187 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
188 | return JZ4740_DMA_WIDTH_8BIT; | ||
189 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
190 | return JZ4740_DMA_WIDTH_16BIT; | ||
191 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
192 | return JZ4740_DMA_WIDTH_32BIT; | ||
193 | default: | ||
194 | return JZ4740_DMA_WIDTH_32BIT; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) | ||
199 | { | ||
200 | if (maxburst <= 1) | ||
201 | return JZ4740_DMA_TRANSFER_SIZE_1BYTE; | ||
202 | else if (maxburst <= 3) | ||
203 | return JZ4740_DMA_TRANSFER_SIZE_2BYTE; | ||
204 | else if (maxburst <= 15) | ||
205 | return JZ4740_DMA_TRANSFER_SIZE_4BYTE; | ||
206 | else if (maxburst <= 31) | ||
207 | return JZ4740_DMA_TRANSFER_SIZE_16BYTE; | ||
208 | |||
209 | return JZ4740_DMA_TRANSFER_SIZE_32BYTE; | ||
210 | } | ||
211 | |||
212 | static int jz4740_dma_slave_config(struct dma_chan *c, | ||
213 | const struct dma_slave_config *config) | ||
214 | { | ||
215 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | ||
216 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); | ||
217 | enum jz4740_dma_width src_width; | ||
218 | enum jz4740_dma_width dst_width; | ||
219 | enum jz4740_dma_transfer_size transfer_size; | ||
220 | enum jz4740_dma_flags flags; | ||
221 | uint32_t cmd; | ||
222 | |||
223 | switch (config->direction) { | ||
224 | case DMA_MEM_TO_DEV: | ||
225 | flags = JZ4740_DMA_SRC_AUTOINC; | ||
226 | transfer_size = jz4740_dma_maxburst(config->dst_maxburst); | ||
227 | chan->fifo_addr = config->dst_addr; | ||
228 | break; | ||
229 | case DMA_DEV_TO_MEM: | ||
230 | flags = JZ4740_DMA_DST_AUTOINC; | ||
231 | transfer_size = jz4740_dma_maxburst(config->src_maxburst); | ||
232 | chan->fifo_addr = config->src_addr; | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | |||
238 | src_width = jz4740_dma_width(config->src_addr_width); | ||
239 | dst_width = jz4740_dma_width(config->dst_addr_width); | ||
240 | |||
241 | switch (transfer_size) { | ||
242 | case JZ4740_DMA_TRANSFER_SIZE_2BYTE: | ||
243 | chan->transfer_shift = 1; | ||
244 | break; | ||
245 | case JZ4740_DMA_TRANSFER_SIZE_4BYTE: | ||
246 | chan->transfer_shift = 2; | ||
247 | break; | ||
248 | case JZ4740_DMA_TRANSFER_SIZE_16BYTE: | ||
249 | chan->transfer_shift = 4; | ||
250 | break; | ||
251 | case JZ4740_DMA_TRANSFER_SIZE_32BYTE: | ||
252 | chan->transfer_shift = 5; | ||
253 | break; | ||
254 | default: | ||
255 | chan->transfer_shift = 0; | ||
256 | break; | ||
257 | } | ||
258 | |||
259 | cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET; | ||
260 | cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET; | ||
261 | cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET; | ||
262 | cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET; | ||
263 | cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET; | ||
264 | cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE; | ||
265 | |||
266 | jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd); | ||
267 | jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0); | ||
268 | jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id), | ||
269 | config->slave_id); | ||
270 | |||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | static int jz4740_dma_terminate_all(struct dma_chan *c) | ||
275 | { | ||
276 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | ||
277 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); | ||
278 | unsigned long flags; | ||
279 | LIST_HEAD(head); | ||
280 | |||
281 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
282 | jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, | ||
283 | JZ_DMA_STATUS_CTRL_ENABLE); | ||
284 | chan->desc = NULL; | ||
285 | vchan_get_all_descriptors(&chan->vchan, &head); | ||
286 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
287 | |||
288 | vchan_dma_desc_free_list(&chan->vchan, &head); | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
294 | unsigned long arg) | ||
295 | { | ||
296 | struct dma_slave_config *config = (struct dma_slave_config *)arg; | ||
297 | |||
298 | switch (cmd) { | ||
299 | case DMA_SLAVE_CONFIG: | ||
300 | return jz4740_dma_slave_config(chan, config); | ||
301 | case DMA_TERMINATE_ALL: | ||
302 | return jz4740_dma_terminate_all(chan); | ||
303 | default: | ||
304 | return -ENOSYS; | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) | ||
309 | { | ||
310 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); | ||
311 | dma_addr_t src_addr, dst_addr; | ||
312 | struct virt_dma_desc *vdesc; | ||
313 | struct jz4740_dma_sg *sg; | ||
314 | |||
315 | jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, | ||
316 | JZ_DMA_STATUS_CTRL_ENABLE); | ||
317 | |||
318 | if (!chan->desc) { | ||
319 | vdesc = vchan_next_desc(&chan->vchan); | ||
320 | if (!vdesc) | ||
321 | return 0; | ||
322 | chan->desc = to_jz4740_dma_desc(vdesc); | ||
323 | chan->next_sg = 0; | ||
324 | } | ||
325 | |||
326 | if (chan->next_sg == chan->desc->num_sgs) | ||
327 | chan->next_sg = 0; | ||
328 | |||
329 | sg = &chan->desc->sg[chan->next_sg]; | ||
330 | |||
331 | if (chan->desc->direction == DMA_MEM_TO_DEV) { | ||
332 | src_addr = sg->addr; | ||
333 | dst_addr = chan->fifo_addr; | ||
334 | } else { | ||
335 | src_addr = chan->fifo_addr; | ||
336 | dst_addr = sg->addr; | ||
337 | } | ||
338 | jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr); | ||
339 | jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr); | ||
340 | jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id), | ||
341 | sg->len >> chan->transfer_shift); | ||
342 | |||
343 | chan->next_sg++; | ||
344 | |||
345 | jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), | ||
346 | JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE, | ||
347 | JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC | | ||
348 | JZ_DMA_STATUS_CTRL_ENABLE); | ||
349 | |||
350 | jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL, | ||
351 | JZ_DMA_CTRL_ENABLE, | ||
352 | JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE); | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan) | ||
358 | { | ||
359 | spin_lock(&chan->vchan.lock); | ||
360 | if (chan->desc) { | ||
361 | if (chan->desc && chan->desc->cyclic) { | ||
362 | vchan_cyclic_callback(&chan->desc->vdesc); | ||
363 | } else { | ||
364 | if (chan->next_sg == chan->desc->num_sgs) { | ||
365 | chan->desc = NULL; | ||
366 | vchan_cookie_complete(&chan->desc->vdesc); | ||
367 | } | ||
368 | } | ||
369 | } | ||
370 | jz4740_dma_start_transfer(chan); | ||
371 | spin_unlock(&chan->vchan.lock); | ||
372 | } | ||
373 | |||
374 | static irqreturn_t jz4740_dma_irq(int irq, void *devid) | ||
375 | { | ||
376 | struct jz4740_dma_dev *dmadev = devid; | ||
377 | uint32_t irq_status; | ||
378 | unsigned int i; | ||
379 | |||
380 | irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ); | ||
381 | |||
382 | for (i = 0; i < 6; ++i) { | ||
383 | if (irq_status & (1 << i)) { | ||
384 | jz4740_dma_write_mask(dmadev, | ||
385 | JZ_REG_DMA_STATUS_CTRL(i), 0, | ||
386 | JZ_DMA_STATUS_CTRL_ENABLE | | ||
387 | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); | ||
388 | |||
389 | jz4740_dma_chan_irq(&dmadev->chan[i]); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | return IRQ_HANDLED; | ||
394 | } | ||
395 | |||
396 | static void jz4740_dma_issue_pending(struct dma_chan *c) | ||
397 | { | ||
398 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | ||
399 | unsigned long flags; | ||
400 | |||
401 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
402 | if (vchan_issue_pending(&chan->vchan) && !chan->desc) | ||
403 | jz4740_dma_start_transfer(chan); | ||
404 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
405 | } | ||
406 | |||
407 | static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( | ||
408 | struct dma_chan *c, struct scatterlist *sgl, | ||
409 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
410 | unsigned long flags, void *context) | ||
411 | { | ||
412 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | ||
413 | struct jz4740_dma_desc *desc; | ||
414 | struct scatterlist *sg; | ||
415 | unsigned int i; | ||
416 | |||
417 | desc = jz4740_dma_alloc_desc(sg_len); | ||
418 | if (!desc) | ||
419 | return NULL; | ||
420 | |||
421 | for_each_sg(sgl, sg, sg_len, i) { | ||
422 | desc->sg[i].addr = sg_dma_address(sg); | ||
423 | desc->sg[i].len = sg_dma_len(sg); | ||
424 | } | ||
425 | |||
426 | desc->num_sgs = sg_len; | ||
427 | desc->direction = direction; | ||
428 | desc->cyclic = false; | ||
429 | |||
430 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
431 | } | ||
432 | |||
433 | static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( | ||
434 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | ||
435 | size_t period_len, enum dma_transfer_direction direction, | ||
436 | unsigned long flags, void *context) | ||
437 | { | ||
438 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | ||
439 | struct jz4740_dma_desc *desc; | ||
440 | unsigned int num_periods, i; | ||
441 | |||
442 | if (buf_len % period_len) | ||
443 | return NULL; | ||
444 | |||
445 | num_periods = buf_len / period_len; | ||
446 | |||
447 | desc = jz4740_dma_alloc_desc(num_periods); | ||
448 | if (!desc) | ||
449 | return NULL; | ||
450 | |||
451 | for (i = 0; i < num_periods; i++) { | ||
452 | desc->sg[i].addr = buf_addr; | ||
453 | desc->sg[i].len = period_len; | ||
454 | buf_addr += period_len; | ||
455 | } | ||
456 | |||
457 | desc->num_sgs = num_periods; | ||
458 | desc->direction = direction; | ||
459 | desc->cyclic = true; | ||
460 | |||
461 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
462 | } | ||
463 | |||
464 | static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, | ||
465 | struct jz4740_dma_desc *desc, unsigned int next_sg) | ||
466 | { | ||
467 | struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); | ||
468 | unsigned int residue, count; | ||
469 | unsigned int i; | ||
470 | |||
471 | residue = 0; | ||
472 | |||
473 | for (i = next_sg; i < desc->num_sgs; i++) | ||
474 | residue += desc->sg[i].len; | ||
475 | |||
476 | if (next_sg != 0) { | ||
477 | count = jz4740_dma_read(dmadev, | ||
478 | JZ_REG_DMA_TRANSFER_COUNT(chan->id)); | ||
479 | residue += count << chan->transfer_shift; | ||
480 | } | ||
481 | |||
482 | return residue; | ||
483 | } | ||
484 | |||
485 | static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, | ||
486 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
487 | { | ||
488 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | ||
489 | struct virt_dma_desc *vdesc; | ||
490 | enum dma_status status; | ||
491 | unsigned long flags; | ||
492 | |||
493 | status = dma_cookie_status(c, cookie, state); | ||
494 | if (status == DMA_SUCCESS || !state) | ||
495 | return status; | ||
496 | |||
497 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
498 | vdesc = vchan_find_desc(&chan->vchan, cookie); | ||
499 | if (cookie == chan->desc->vdesc.tx.cookie) { | ||
500 | state->residue = jz4740_dma_desc_residue(chan, chan->desc, | ||
501 | chan->next_sg); | ||
502 | } else if (vdesc) { | ||
503 | state->residue = jz4740_dma_desc_residue(chan, | ||
504 | to_jz4740_dma_desc(vdesc), 0); | ||
505 | } else { | ||
506 | state->residue = 0; | ||
507 | } | ||
508 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
509 | |||
510 | return status; | ||
511 | } | ||
512 | |||
513 | static int jz4740_dma_alloc_chan_resources(struct dma_chan *c) | ||
514 | { | ||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | static void jz4740_dma_free_chan_resources(struct dma_chan *c) | ||
519 | { | ||
520 | vchan_free_chan_resources(to_virt_chan(c)); | ||
521 | } | ||
522 | |||
523 | static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) | ||
524 | { | ||
525 | kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); | ||
526 | } | ||
527 | |||
528 | static int jz4740_dma_probe(struct platform_device *pdev) | ||
529 | { | ||
530 | struct jz4740_dmaengine_chan *chan; | ||
531 | struct jz4740_dma_dev *dmadev; | ||
532 | struct dma_device *dd; | ||
533 | unsigned int i; | ||
534 | struct resource *res; | ||
535 | int ret; | ||
536 | int irq; | ||
537 | |||
538 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | ||
539 | if (!dmadev) | ||
540 | return -EINVAL; | ||
541 | |||
542 | dd = &dmadev->ddev; | ||
543 | |||
544 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
545 | dmadev->base = devm_ioremap_resource(&pdev->dev, res); | ||
546 | if (IS_ERR(dmadev->base)) | ||
547 | return PTR_ERR(dmadev->base); | ||
548 | |||
549 | dmadev->clk = clk_get(&pdev->dev, "dma"); | ||
550 | if (IS_ERR(dmadev->clk)) | ||
551 | return PTR_ERR(dmadev->clk); | ||
552 | |||
553 | clk_prepare_enable(dmadev->clk); | ||
554 | |||
555 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | ||
556 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | ||
557 | dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources; | ||
558 | dd->device_free_chan_resources = jz4740_dma_free_chan_resources; | ||
559 | dd->device_tx_status = jz4740_dma_tx_status; | ||
560 | dd->device_issue_pending = jz4740_dma_issue_pending; | ||
561 | dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; | ||
562 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; | ||
563 | dd->device_control = jz4740_dma_control; | ||
564 | dd->dev = &pdev->dev; | ||
565 | dd->chancnt = JZ_DMA_NR_CHANS; | ||
566 | INIT_LIST_HEAD(&dd->channels); | ||
567 | |||
568 | for (i = 0; i < dd->chancnt; i++) { | ||
569 | chan = &dmadev->chan[i]; | ||
570 | chan->id = i; | ||
571 | chan->vchan.desc_free = jz4740_dma_desc_free; | ||
572 | vchan_init(&chan->vchan, dd); | ||
573 | } | ||
574 | |||
575 | ret = dma_async_device_register(dd); | ||
576 | if (ret) | ||
577 | return ret; | ||
578 | |||
579 | irq = platform_get_irq(pdev, 0); | ||
580 | ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); | ||
581 | if (ret) | ||
582 | goto err_unregister; | ||
583 | |||
584 | platform_set_drvdata(pdev, dmadev); | ||
585 | |||
586 | return 0; | ||
587 | |||
588 | err_unregister: | ||
589 | dma_async_device_unregister(dd); | ||
590 | return ret; | ||
591 | } | ||
592 | |||
593 | static int jz4740_dma_remove(struct platform_device *pdev) | ||
594 | { | ||
595 | struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); | ||
596 | int irq = platform_get_irq(pdev, 0); | ||
597 | |||
598 | free_irq(irq, dmadev); | ||
599 | dma_async_device_unregister(&dmadev->ddev); | ||
600 | clk_disable_unprepare(dmadev->clk); | ||
601 | |||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | static struct platform_driver jz4740_dma_driver = { | ||
606 | .probe = jz4740_dma_probe, | ||
607 | .remove = jz4740_dma_remove, | ||
608 | .driver = { | ||
609 | .name = "jz4740-dma", | ||
610 | .owner = THIS_MODULE, | ||
611 | }, | ||
612 | }; | ||
613 | module_platform_driver(jz4740_dma_driver); | ||
614 | |||
615 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); | ||
616 | MODULE_DESCRIPTION("JZ4740 DMA driver"); | ||
617 | MODULE_LICENSE("GPLv2"); | ||
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig new file mode 100644 index 000000000000..dde13248b681 --- /dev/null +++ b/drivers/dma/dw/Kconfig | |||
@@ -0,0 +1,29 @@ | |||
1 | # | ||
2 | # DMA engine configuration for dw | ||
3 | # | ||
4 | |||
5 | config DW_DMAC_CORE | ||
6 | tristate "Synopsys DesignWare AHB DMA support" | ||
7 | depends on GENERIC_HARDIRQS | ||
8 | select DMA_ENGINE | ||
9 | |||
10 | config DW_DMAC | ||
11 | tristate "Synopsys DesignWare AHB DMA platform driver" | ||
12 | select DW_DMAC_CORE | ||
13 | select DW_DMAC_BIG_ENDIAN_IO if AVR32 | ||
14 | default y if CPU_AT32AP7000 | ||
15 | help | ||
16 | Support the Synopsys DesignWare AHB DMA controller. This | ||
17 | can be integrated in chips such as the Atmel AT32ap7000. | ||
18 | |||
19 | config DW_DMAC_PCI | ||
20 | tristate "Synopsys DesignWare AHB DMA PCI driver" | ||
21 | depends on PCI | ||
22 | select DW_DMAC_CORE | ||
23 | help | ||
24 | Support the Synopsys DesignWare AHB DMA controller on the | ||
25 | platfroms that enumerate it as a PCI device. For example, | ||
26 | Intel Medfield has integrated this GPDMA controller. | ||
27 | |||
28 | config DW_DMAC_BIG_ENDIAN_IO | ||
29 | bool | ||
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile new file mode 100644 index 000000000000..3eebd1ce2c6b --- /dev/null +++ b/drivers/dma/dw/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o | ||
2 | dw_dmac_core-objs := core.o | ||
3 | |||
4 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | ||
5 | dw_dmac-objs := platform.o | ||
6 | |||
7 | obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o | ||
8 | dw_dmac_pci-objs := pci.o | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw/core.c index 2e5deaa82b60..eea479c12173 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw/core.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Atmel Corporation | 4 | * Copyright (C) 2007-2008 Atmel Corporation |
5 | * Copyright (C) 2010-2011 ST Microelectronics | 5 | * Copyright (C) 2010-2011 ST Microelectronics |
6 | * Copyright (C) 2013 Intel Corporation | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -19,17 +20,12 @@ | |||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
21 | #include <linux/io.h> | 22 | #include <linux/io.h> |
22 | #include <linux/of.h> | ||
23 | #include <linux/of_dma.h> | ||
24 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
25 | #include <linux/module.h> | 24 | #include <linux/module.h> |
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
28 | #include <linux/acpi.h> | ||
29 | #include <linux/acpi_dma.h> | ||
30 | 26 | ||
31 | #include "dw_dmac_regs.h" | 27 | #include "../dmaengine.h" |
32 | #include "dmaengine.h" | 28 | #include "internal.h" |
33 | 29 | ||
34 | /* | 30 | /* |
35 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | 31 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", |
@@ -41,16 +37,6 @@ | |||
41 | * which does not support descriptor writeback. | 37 | * which does not support descriptor writeback. |
42 | */ | 38 | */ |
43 | 39 | ||
44 | static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) | ||
45 | { | ||
46 | return slave ? slave->dst_master : 0; | ||
47 | } | ||
48 | |||
49 | static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | ||
50 | { | ||
51 | return slave ? slave->src_master : 1; | ||
52 | } | ||
53 | |||
54 | static inline void dwc_set_masters(struct dw_dma_chan *dwc) | 40 | static inline void dwc_set_masters(struct dw_dma_chan *dwc) |
55 | { | 41 | { |
56 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 42 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
@@ -556,14 +542,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
556 | 542 | ||
557 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 543 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
558 | 544 | ||
559 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | 545 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) |
560 | { | 546 | { |
561 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 547 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
562 | return channel_readl(dwc, SAR); | 548 | return channel_readl(dwc, SAR); |
563 | } | 549 | } |
564 | EXPORT_SYMBOL(dw_dma_get_src_addr); | 550 | EXPORT_SYMBOL(dw_dma_get_src_addr); |
565 | 551 | ||
566 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | 552 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) |
567 | { | 553 | { |
568 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 554 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
569 | return channel_readl(dwc, DAR); | 555 | return channel_readl(dwc, DAR); |
@@ -1225,99 +1211,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1225 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1211 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1226 | } | 1212 | } |
1227 | 1213 | ||
1228 | /*----------------------------------------------------------------------*/ | ||
1229 | |||
1230 | struct dw_dma_of_filter_args { | ||
1231 | struct dw_dma *dw; | ||
1232 | unsigned int req; | ||
1233 | unsigned int src; | ||
1234 | unsigned int dst; | ||
1235 | }; | ||
1236 | |||
1237 | static bool dw_dma_of_filter(struct dma_chan *chan, void *param) | ||
1238 | { | ||
1239 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
1240 | struct dw_dma_of_filter_args *fargs = param; | ||
1241 | |||
1242 | /* Ensure the device matches our channel */ | ||
1243 | if (chan->device != &fargs->dw->dma) | ||
1244 | return false; | ||
1245 | |||
1246 | dwc->request_line = fargs->req; | ||
1247 | dwc->src_master = fargs->src; | ||
1248 | dwc->dst_master = fargs->dst; | ||
1249 | |||
1250 | return true; | ||
1251 | } | ||
1252 | |||
1253 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | ||
1254 | struct of_dma *ofdma) | ||
1255 | { | ||
1256 | struct dw_dma *dw = ofdma->of_dma_data; | ||
1257 | struct dw_dma_of_filter_args fargs = { | ||
1258 | .dw = dw, | ||
1259 | }; | ||
1260 | dma_cap_mask_t cap; | ||
1261 | |||
1262 | if (dma_spec->args_count != 3) | ||
1263 | return NULL; | ||
1264 | |||
1265 | fargs.req = dma_spec->args[0]; | ||
1266 | fargs.src = dma_spec->args[1]; | ||
1267 | fargs.dst = dma_spec->args[2]; | ||
1268 | |||
1269 | if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || | ||
1270 | fargs.src >= dw->nr_masters || | ||
1271 | fargs.dst >= dw->nr_masters)) | ||
1272 | return NULL; | ||
1273 | |||
1274 | dma_cap_zero(cap); | ||
1275 | dma_cap_set(DMA_SLAVE, cap); | ||
1276 | |||
1277 | /* TODO: there should be a simpler way to do this */ | ||
1278 | return dma_request_channel(cap, dw_dma_of_filter, &fargs); | ||
1279 | } | ||
1280 | |||
1281 | #ifdef CONFIG_ACPI | ||
1282 | static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) | ||
1283 | { | ||
1284 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
1285 | struct acpi_dma_spec *dma_spec = param; | ||
1286 | |||
1287 | if (chan->device->dev != dma_spec->dev || | ||
1288 | chan->chan_id != dma_spec->chan_id) | ||
1289 | return false; | ||
1290 | |||
1291 | dwc->request_line = dma_spec->slave_id; | ||
1292 | dwc->src_master = dwc_get_sms(NULL); | ||
1293 | dwc->dst_master = dwc_get_dms(NULL); | ||
1294 | |||
1295 | return true; | ||
1296 | } | ||
1297 | |||
1298 | static void dw_dma_acpi_controller_register(struct dw_dma *dw) | ||
1299 | { | ||
1300 | struct device *dev = dw->dma.dev; | ||
1301 | struct acpi_dma_filter_info *info; | ||
1302 | int ret; | ||
1303 | |||
1304 | info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); | ||
1305 | if (!info) | ||
1306 | return; | ||
1307 | |||
1308 | dma_cap_zero(info->dma_cap); | ||
1309 | dma_cap_set(DMA_SLAVE, info->dma_cap); | ||
1310 | info->filter_fn = dw_dma_acpi_filter; | ||
1311 | |||
1312 | ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, | ||
1313 | info); | ||
1314 | if (ret) | ||
1315 | dev_err(dev, "could not register acpi_dma_controller\n"); | ||
1316 | } | ||
1317 | #else /* !CONFIG_ACPI */ | ||
1318 | static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} | ||
1319 | #endif /* !CONFIG_ACPI */ | ||
1320 | |||
1321 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 1214 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1322 | 1215 | ||
1323 | /** | 1216 | /** |
@@ -1598,104 +1491,24 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1598 | dw->chan[i].initialized = false; | 1491 | dw->chan[i].initialized = false; |
1599 | } | 1492 | } |
1600 | 1493 | ||
1601 | #ifdef CONFIG_OF | 1494 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) |
1602 | static struct dw_dma_platform_data * | ||
1603 | dw_dma_parse_dt(struct platform_device *pdev) | ||
1604 | { | 1495 | { |
1605 | struct device_node *np = pdev->dev.of_node; | ||
1606 | struct dw_dma_platform_data *pdata; | ||
1607 | u32 tmp, arr[4]; | ||
1608 | |||
1609 | if (!np) { | ||
1610 | dev_err(&pdev->dev, "Missing DT data\n"); | ||
1611 | return NULL; | ||
1612 | } | ||
1613 | |||
1614 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
1615 | if (!pdata) | ||
1616 | return NULL; | ||
1617 | |||
1618 | if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) | ||
1619 | return NULL; | ||
1620 | |||
1621 | if (of_property_read_bool(np, "is_private")) | ||
1622 | pdata->is_private = true; | ||
1623 | |||
1624 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) | ||
1625 | pdata->chan_allocation_order = (unsigned char)tmp; | ||
1626 | |||
1627 | if (!of_property_read_u32(np, "chan_priority", &tmp)) | ||
1628 | pdata->chan_priority = tmp; | ||
1629 | |||
1630 | if (!of_property_read_u32(np, "block_size", &tmp)) | ||
1631 | pdata->block_size = tmp; | ||
1632 | |||
1633 | if (!of_property_read_u32(np, "dma-masters", &tmp)) { | ||
1634 | if (tmp > 4) | ||
1635 | return NULL; | ||
1636 | |||
1637 | pdata->nr_masters = tmp; | ||
1638 | } | ||
1639 | |||
1640 | if (!of_property_read_u32_array(np, "data_width", arr, | ||
1641 | pdata->nr_masters)) | ||
1642 | for (tmp = 0; tmp < pdata->nr_masters; tmp++) | ||
1643 | pdata->data_width[tmp] = arr[tmp]; | ||
1644 | |||
1645 | return pdata; | ||
1646 | } | ||
1647 | #else | ||
1648 | static inline struct dw_dma_platform_data * | ||
1649 | dw_dma_parse_dt(struct platform_device *pdev) | ||
1650 | { | ||
1651 | return NULL; | ||
1652 | } | ||
1653 | #endif | ||
1654 | |||
1655 | static int dw_probe(struct platform_device *pdev) | ||
1656 | { | ||
1657 | struct dw_dma_platform_data *pdata; | ||
1658 | struct resource *io; | ||
1659 | struct dw_dma *dw; | 1496 | struct dw_dma *dw; |
1660 | size_t size; | 1497 | size_t size; |
1661 | void __iomem *regs; | ||
1662 | bool autocfg; | 1498 | bool autocfg; |
1663 | unsigned int dw_params; | 1499 | unsigned int dw_params; |
1664 | unsigned int nr_channels; | 1500 | unsigned int nr_channels; |
1665 | unsigned int max_blk_size = 0; | 1501 | unsigned int max_blk_size = 0; |
1666 | int irq; | ||
1667 | int err; | 1502 | int err; |
1668 | int i; | 1503 | int i; |
1669 | 1504 | ||
1670 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1505 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); |
1671 | if (!io) | ||
1672 | return -EINVAL; | ||
1673 | |||
1674 | irq = platform_get_irq(pdev, 0); | ||
1675 | if (irq < 0) | ||
1676 | return irq; | ||
1677 | |||
1678 | regs = devm_ioremap_resource(&pdev->dev, io); | ||
1679 | if (IS_ERR(regs)) | ||
1680 | return PTR_ERR(regs); | ||
1681 | |||
1682 | /* Apply default dma_mask if needed */ | ||
1683 | if (!pdev->dev.dma_mask) { | ||
1684 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | ||
1685 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
1686 | } | ||
1687 | |||
1688 | dw_params = dma_read_byaddr(regs, DW_PARAMS); | ||
1689 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | 1506 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; |
1690 | 1507 | ||
1691 | dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params); | 1508 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); |
1692 | |||
1693 | pdata = dev_get_platdata(&pdev->dev); | ||
1694 | if (!pdata) | ||
1695 | pdata = dw_dma_parse_dt(pdev); | ||
1696 | 1509 | ||
1697 | if (!pdata && autocfg) { | 1510 | if (!pdata && autocfg) { |
1698 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 1511 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); |
1699 | if (!pdata) | 1512 | if (!pdata) |
1700 | return -ENOMEM; | 1513 | return -ENOMEM; |
1701 | 1514 | ||
@@ -1712,16 +1525,17 @@ static int dw_probe(struct platform_device *pdev) | |||
1712 | nr_channels = pdata->nr_channels; | 1525 | nr_channels = pdata->nr_channels; |
1713 | 1526 | ||
1714 | size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); | 1527 | size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); |
1715 | dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | 1528 | dw = devm_kzalloc(chip->dev, size, GFP_KERNEL); |
1716 | if (!dw) | 1529 | if (!dw) |
1717 | return -ENOMEM; | 1530 | return -ENOMEM; |
1718 | 1531 | ||
1719 | dw->clk = devm_clk_get(&pdev->dev, "hclk"); | 1532 | dw->clk = devm_clk_get(chip->dev, "hclk"); |
1720 | if (IS_ERR(dw->clk)) | 1533 | if (IS_ERR(dw->clk)) |
1721 | return PTR_ERR(dw->clk); | 1534 | return PTR_ERR(dw->clk); |
1722 | clk_prepare_enable(dw->clk); | 1535 | clk_prepare_enable(dw->clk); |
1723 | 1536 | ||
1724 | dw->regs = regs; | 1537 | dw->regs = chip->regs; |
1538 | chip->dw = dw; | ||
1725 | 1539 | ||
1726 | /* Get hardware configuration parameters */ | 1540 | /* Get hardware configuration parameters */ |
1727 | if (autocfg) { | 1541 | if (autocfg) { |
@@ -1746,18 +1560,16 @@ static int dw_probe(struct platform_device *pdev) | |||
1746 | /* Disable BLOCK interrupts as well */ | 1560 | /* Disable BLOCK interrupts as well */ |
1747 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1561 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1748 | 1562 | ||
1749 | err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, | 1563 | err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0, |
1750 | "dw_dmac", dw); | 1564 | "dw_dmac", dw); |
1751 | if (err) | 1565 | if (err) |
1752 | return err; | 1566 | return err; |
1753 | 1567 | ||
1754 | platform_set_drvdata(pdev, dw); | ||
1755 | |||
1756 | /* Create a pool of consistent memory blocks for hardware descriptors */ | 1568 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
1757 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, | 1569 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, |
1758 | sizeof(struct dw_desc), 4, 0); | 1570 | sizeof(struct dw_desc), 4, 0); |
1759 | if (!dw->desc_pool) { | 1571 | if (!dw->desc_pool) { |
1760 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); | 1572 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); |
1761 | return -ENOMEM; | 1573 | return -ENOMEM; |
1762 | } | 1574 | } |
1763 | 1575 | ||
@@ -1798,12 +1610,12 @@ static int dw_probe(struct platform_device *pdev) | |||
1798 | /* Hardware configuration */ | 1610 | /* Hardware configuration */ |
1799 | if (autocfg) { | 1611 | if (autocfg) { |
1800 | unsigned int dwc_params; | 1612 | unsigned int dwc_params; |
1613 | void __iomem *addr = chip->regs + r * sizeof(u32); | ||
1801 | 1614 | ||
1802 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), | 1615 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); |
1803 | DWC_PARAMS); | ||
1804 | 1616 | ||
1805 | dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, | 1617 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, |
1806 | dwc_params); | 1618 | dwc_params); |
1807 | 1619 | ||
1808 | /* Decode maximum block size for given channel. The | 1620 | /* Decode maximum block size for given channel. The |
1809 | * stored 4 bit value represents blocks from 0x00 for 3 | 1621 | * stored 4 bit value represents blocks from 0x00 for 3 |
@@ -1834,7 +1646,7 @@ static int dw_probe(struct platform_device *pdev) | |||
1834 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1646 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1835 | if (pdata->is_private) | 1647 | if (pdata->is_private) |
1836 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | 1648 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); |
1837 | dw->dma.dev = &pdev->dev; | 1649 | dw->dma.dev = chip->dev; |
1838 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 1650 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1839 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 1651 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
1840 | 1652 | ||
@@ -1848,32 +1660,20 @@ static int dw_probe(struct platform_device *pdev) | |||
1848 | 1660 | ||
1849 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1661 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1850 | 1662 | ||
1851 | dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", | 1663 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", |
1852 | nr_channels); | 1664 | nr_channels); |
1853 | 1665 | ||
1854 | dma_async_device_register(&dw->dma); | 1666 | dma_async_device_register(&dw->dma); |
1855 | 1667 | ||
1856 | if (pdev->dev.of_node) { | ||
1857 | err = of_dma_controller_register(pdev->dev.of_node, | ||
1858 | dw_dma_of_xlate, dw); | ||
1859 | if (err) | ||
1860 | dev_err(&pdev->dev, | ||
1861 | "could not register of_dma_controller\n"); | ||
1862 | } | ||
1863 | |||
1864 | if (ACPI_HANDLE(&pdev->dev)) | ||
1865 | dw_dma_acpi_controller_register(dw); | ||
1866 | |||
1867 | return 0; | 1668 | return 0; |
1868 | } | 1669 | } |
1670 | EXPORT_SYMBOL_GPL(dw_dma_probe); | ||
1869 | 1671 | ||
1870 | static int dw_remove(struct platform_device *pdev) | 1672 | int dw_dma_remove(struct dw_dma_chip *chip) |
1871 | { | 1673 | { |
1872 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1674 | struct dw_dma *dw = chip->dw; |
1873 | struct dw_dma_chan *dwc, *_dwc; | 1675 | struct dw_dma_chan *dwc, *_dwc; |
1874 | 1676 | ||
1875 | if (pdev->dev.of_node) | ||
1876 | of_dma_controller_free(pdev->dev.of_node); | ||
1877 | dw_dma_off(dw); | 1677 | dw_dma_off(dw); |
1878 | dma_async_device_unregister(&dw->dma); | 1678 | dma_async_device_unregister(&dw->dma); |
1879 | 1679 | ||
@@ -1887,86 +1687,44 @@ static int dw_remove(struct platform_device *pdev) | |||
1887 | 1687 | ||
1888 | return 0; | 1688 | return 0; |
1889 | } | 1689 | } |
1690 | EXPORT_SYMBOL_GPL(dw_dma_remove); | ||
1890 | 1691 | ||
1891 | static void dw_shutdown(struct platform_device *pdev) | 1692 | void dw_dma_shutdown(struct dw_dma_chip *chip) |
1892 | { | 1693 | { |
1893 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1694 | struct dw_dma *dw = chip->dw; |
1894 | 1695 | ||
1895 | dw_dma_off(dw); | 1696 | dw_dma_off(dw); |
1896 | clk_disable_unprepare(dw->clk); | 1697 | clk_disable_unprepare(dw->clk); |
1897 | } | 1698 | } |
1699 | EXPORT_SYMBOL_GPL(dw_dma_shutdown); | ||
1700 | |||
1701 | #ifdef CONFIG_PM_SLEEP | ||
1898 | 1702 | ||
1899 | static int dw_suspend_noirq(struct device *dev) | 1703 | int dw_dma_suspend(struct dw_dma_chip *chip) |
1900 | { | 1704 | { |
1901 | struct platform_device *pdev = to_platform_device(dev); | 1705 | struct dw_dma *dw = chip->dw; |
1902 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
1903 | 1706 | ||
1904 | dw_dma_off(dw); | 1707 | dw_dma_off(dw); |
1905 | clk_disable_unprepare(dw->clk); | 1708 | clk_disable_unprepare(dw->clk); |
1906 | 1709 | ||
1907 | return 0; | 1710 | return 0; |
1908 | } | 1711 | } |
1712 | EXPORT_SYMBOL_GPL(dw_dma_suspend); | ||
1909 | 1713 | ||
1910 | static int dw_resume_noirq(struct device *dev) | 1714 | int dw_dma_resume(struct dw_dma_chip *chip) |
1911 | { | 1715 | { |
1912 | struct platform_device *pdev = to_platform_device(dev); | 1716 | struct dw_dma *dw = chip->dw; |
1913 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
1914 | 1717 | ||
1915 | clk_prepare_enable(dw->clk); | 1718 | clk_prepare_enable(dw->clk); |
1916 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1719 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1917 | 1720 | ||
1918 | return 0; | 1721 | return 0; |
1919 | } | 1722 | } |
1723 | EXPORT_SYMBOL_GPL(dw_dma_resume); | ||
1920 | 1724 | ||
1921 | static const struct dev_pm_ops dw_dev_pm_ops = { | 1725 | #endif /* CONFIG_PM_SLEEP */ |
1922 | .suspend_noirq = dw_suspend_noirq, | ||
1923 | .resume_noirq = dw_resume_noirq, | ||
1924 | .freeze_noirq = dw_suspend_noirq, | ||
1925 | .thaw_noirq = dw_resume_noirq, | ||
1926 | .restore_noirq = dw_resume_noirq, | ||
1927 | .poweroff_noirq = dw_suspend_noirq, | ||
1928 | }; | ||
1929 | |||
1930 | #ifdef CONFIG_OF | ||
1931 | static const struct of_device_id dw_dma_of_id_table[] = { | ||
1932 | { .compatible = "snps,dma-spear1340" }, | ||
1933 | {} | ||
1934 | }; | ||
1935 | MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); | ||
1936 | #endif | ||
1937 | |||
1938 | #ifdef CONFIG_ACPI | ||
1939 | static const struct acpi_device_id dw_dma_acpi_id_table[] = { | ||
1940 | { "INTL9C60", 0 }, | ||
1941 | { } | ||
1942 | }; | ||
1943 | #endif | ||
1944 | |||
1945 | static struct platform_driver dw_driver = { | ||
1946 | .probe = dw_probe, | ||
1947 | .remove = dw_remove, | ||
1948 | .shutdown = dw_shutdown, | ||
1949 | .driver = { | ||
1950 | .name = "dw_dmac", | ||
1951 | .pm = &dw_dev_pm_ops, | ||
1952 | .of_match_table = of_match_ptr(dw_dma_of_id_table), | ||
1953 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), | ||
1954 | }, | ||
1955 | }; | ||
1956 | |||
1957 | static int __init dw_init(void) | ||
1958 | { | ||
1959 | return platform_driver_register(&dw_driver); | ||
1960 | } | ||
1961 | subsys_initcall(dw_init); | ||
1962 | |||
1963 | static void __exit dw_exit(void) | ||
1964 | { | ||
1965 | platform_driver_unregister(&dw_driver); | ||
1966 | } | ||
1967 | module_exit(dw_exit); | ||
1968 | 1726 | ||
1969 | MODULE_LICENSE("GPL v2"); | 1727 | MODULE_LICENSE("GPL v2"); |
1970 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 1728 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
1971 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 1729 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1972 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); | 1730 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h new file mode 100644 index 000000000000..32667f9e0dda --- /dev/null +++ b/drivers/dma/dw/internal.h | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * Driver for the Synopsys DesignWare DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2013 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef _DW_DMAC_INTERNAL_H | ||
12 | #define _DW_DMAC_INTERNAL_H | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/dw_dmac.h> | ||
16 | |||
17 | #include "regs.h" | ||
18 | |||
19 | /** | ||
20 | * struct dw_dma_chip - representation of DesignWare DMA controller hardware | ||
21 | * @dev: struct device of the DMA controller | ||
22 | * @irq: irq line | ||
23 | * @regs: memory mapped I/O space | ||
24 | * @dw: struct dw_dma that is filed by dw_dma_probe() | ||
25 | */ | ||
26 | struct dw_dma_chip { | ||
27 | struct device *dev; | ||
28 | int irq; | ||
29 | void __iomem *regs; | ||
30 | struct dw_dma *dw; | ||
31 | }; | ||
32 | |||
33 | /* Export to the platform drivers */ | ||
34 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); | ||
35 | int dw_dma_remove(struct dw_dma_chip *chip); | ||
36 | |||
37 | void dw_dma_shutdown(struct dw_dma_chip *chip); | ||
38 | |||
39 | #ifdef CONFIG_PM_SLEEP | ||
40 | |||
41 | int dw_dma_suspend(struct dw_dma_chip *chip); | ||
42 | int dw_dma_resume(struct dw_dma_chip *chip); | ||
43 | |||
44 | #endif /* CONFIG_PM_SLEEP */ | ||
45 | |||
46 | /** | ||
47 | * dwc_get_dms - get destination master | ||
48 | * @slave: pointer to the custom slave configuration | ||
49 | * | ||
50 | * Returns destination master in the custom slave configuration if defined, or | ||
51 | * default value otherwise. | ||
52 | */ | ||
53 | static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) | ||
54 | { | ||
55 | return slave ? slave->dst_master : 0; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * dwc_get_sms - get source master | ||
60 | * @slave: pointer to the custom slave configuration | ||
61 | * | ||
62 | * Returns source master in the custom slave configuration if defined, or | ||
63 | * default value otherwise. | ||
64 | */ | ||
65 | static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | ||
66 | { | ||
67 | return slave ? slave->src_master : 1; | ||
68 | } | ||
69 | |||
70 | #endif /* _DW_DMAC_INTERNAL_H */ | ||
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c new file mode 100644 index 000000000000..e89fc24b8293 --- /dev/null +++ b/drivers/dma/dw/pci.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * PCI driver for the Synopsys DesignWare DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2013 Intel Corporation | ||
5 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/device.h> | ||
15 | |||
16 | #include "internal.h" | ||
17 | |||
18 | static struct dw_dma_platform_data dw_pci_pdata = { | ||
19 | .is_private = 1, | ||
20 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | ||
21 | .chan_priority = CHAN_PRIORITY_ASCENDING, | ||
22 | }; | ||
23 | |||
24 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | ||
25 | { | ||
26 | struct dw_dma_chip *chip; | ||
27 | struct dw_dma_platform_data *pdata = (void *)pid->driver_data; | ||
28 | int ret; | ||
29 | |||
30 | ret = pcim_enable_device(pdev); | ||
31 | if (ret) | ||
32 | return ret; | ||
33 | |||
34 | ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); | ||
35 | if (ret) { | ||
36 | dev_err(&pdev->dev, "I/O memory remapping failed\n"); | ||
37 | return ret; | ||
38 | } | ||
39 | |||
40 | pci_set_master(pdev); | ||
41 | pci_try_set_mwi(pdev); | ||
42 | |||
43 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
44 | if (ret) | ||
45 | return ret; | ||
46 | |||
47 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
48 | if (ret) | ||
49 | return ret; | ||
50 | |||
51 | chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); | ||
52 | if (!chip) | ||
53 | return -ENOMEM; | ||
54 | |||
55 | chip->dev = &pdev->dev; | ||
56 | chip->regs = pcim_iomap_table(pdev)[0]; | ||
57 | chip->irq = pdev->irq; | ||
58 | |||
59 | ret = dw_dma_probe(chip, pdata); | ||
60 | if (ret) | ||
61 | return ret; | ||
62 | |||
63 | pci_set_drvdata(pdev, chip); | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static void dw_pci_remove(struct pci_dev *pdev) | ||
69 | { | ||
70 | struct dw_dma_chip *chip = pci_get_drvdata(pdev); | ||
71 | int ret; | ||
72 | |||
73 | ret = dw_dma_remove(chip); | ||
74 | if (ret) | ||
75 | dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); | ||
76 | } | ||
77 | |||
78 | static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = { | ||
79 | /* Medfield */ | ||
80 | { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, | ||
81 | { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata }, | ||
82 | |||
83 | /* BayTrail */ | ||
84 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, | ||
85 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, | ||
86 | { } | ||
87 | }; | ||
88 | MODULE_DEVICE_TABLE(pci, dw_pci_id_table); | ||
89 | |||
90 | static struct pci_driver dw_pci_driver = { | ||
91 | .name = "dw_dmac_pci", | ||
92 | .id_table = dw_pci_id_table, | ||
93 | .probe = dw_pci_probe, | ||
94 | .remove = dw_pci_remove, | ||
95 | }; | ||
96 | |||
97 | module_pci_driver(dw_pci_driver); | ||
98 | |||
99 | MODULE_LICENSE("GPL v2"); | ||
100 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller PCI driver"); | ||
101 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | ||
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c new file mode 100644 index 000000000000..6c9449cffae8 --- /dev/null +++ b/drivers/dma/dw/platform.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * Platform driver for the Synopsys DesignWare DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2007-2008 Atmel Corporation | ||
5 | * Copyright (C) 2010-2011 ST Microelectronics | ||
6 | * Copyright (C) 2013 Intel Corporation | ||
7 | * | ||
8 | * Some parts of this driver are derived from the original dw_dmac. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/clk.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/dmaengine.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/of_dma.h> | ||
23 | #include <linux/acpi.h> | ||
24 | #include <linux/acpi_dma.h> | ||
25 | |||
26 | #include "internal.h" | ||
27 | |||
28 | struct dw_dma_of_filter_args { | ||
29 | struct dw_dma *dw; | ||
30 | unsigned int req; | ||
31 | unsigned int src; | ||
32 | unsigned int dst; | ||
33 | }; | ||
34 | |||
35 | static bool dw_dma_of_filter(struct dma_chan *chan, void *param) | ||
36 | { | ||
37 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
38 | struct dw_dma_of_filter_args *fargs = param; | ||
39 | |||
40 | /* Ensure the device matches our channel */ | ||
41 | if (chan->device != &fargs->dw->dma) | ||
42 | return false; | ||
43 | |||
44 | dwc->request_line = fargs->req; | ||
45 | dwc->src_master = fargs->src; | ||
46 | dwc->dst_master = fargs->dst; | ||
47 | |||
48 | return true; | ||
49 | } | ||
50 | |||
51 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | ||
52 | struct of_dma *ofdma) | ||
53 | { | ||
54 | struct dw_dma *dw = ofdma->of_dma_data; | ||
55 | struct dw_dma_of_filter_args fargs = { | ||
56 | .dw = dw, | ||
57 | }; | ||
58 | dma_cap_mask_t cap; | ||
59 | |||
60 | if (dma_spec->args_count != 3) | ||
61 | return NULL; | ||
62 | |||
63 | fargs.req = dma_spec->args[0]; | ||
64 | fargs.src = dma_spec->args[1]; | ||
65 | fargs.dst = dma_spec->args[2]; | ||
66 | |||
67 | if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || | ||
68 | fargs.src >= dw->nr_masters || | ||
69 | fargs.dst >= dw->nr_masters)) | ||
70 | return NULL; | ||
71 | |||
72 | dma_cap_zero(cap); | ||
73 | dma_cap_set(DMA_SLAVE, cap); | ||
74 | |||
75 | /* TODO: there should be a simpler way to do this */ | ||
76 | return dma_request_channel(cap, dw_dma_of_filter, &fargs); | ||
77 | } | ||
78 | |||
79 | #ifdef CONFIG_ACPI | ||
80 | static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) | ||
81 | { | ||
82 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
83 | struct acpi_dma_spec *dma_spec = param; | ||
84 | |||
85 | if (chan->device->dev != dma_spec->dev || | ||
86 | chan->chan_id != dma_spec->chan_id) | ||
87 | return false; | ||
88 | |||
89 | dwc->request_line = dma_spec->slave_id; | ||
90 | dwc->src_master = dwc_get_sms(NULL); | ||
91 | dwc->dst_master = dwc_get_dms(NULL); | ||
92 | |||
93 | return true; | ||
94 | } | ||
95 | |||
96 | static void dw_dma_acpi_controller_register(struct dw_dma *dw) | ||
97 | { | ||
98 | struct device *dev = dw->dma.dev; | ||
99 | struct acpi_dma_filter_info *info; | ||
100 | int ret; | ||
101 | |||
102 | info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); | ||
103 | if (!info) | ||
104 | return; | ||
105 | |||
106 | dma_cap_zero(info->dma_cap); | ||
107 | dma_cap_set(DMA_SLAVE, info->dma_cap); | ||
108 | info->filter_fn = dw_dma_acpi_filter; | ||
109 | |||
110 | ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, | ||
111 | info); | ||
112 | if (ret) | ||
113 | dev_err(dev, "could not register acpi_dma_controller\n"); | ||
114 | } | ||
115 | #else /* !CONFIG_ACPI */ | ||
116 | static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} | ||
117 | #endif /* !CONFIG_ACPI */ | ||
118 | |||
119 | #ifdef CONFIG_OF | ||
120 | static struct dw_dma_platform_data * | ||
121 | dw_dma_parse_dt(struct platform_device *pdev) | ||
122 | { | ||
123 | struct device_node *np = pdev->dev.of_node; | ||
124 | struct dw_dma_platform_data *pdata; | ||
125 | u32 tmp, arr[4]; | ||
126 | |||
127 | if (!np) { | ||
128 | dev_err(&pdev->dev, "Missing DT data\n"); | ||
129 | return NULL; | ||
130 | } | ||
131 | |||
132 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
133 | if (!pdata) | ||
134 | return NULL; | ||
135 | |||
136 | if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) | ||
137 | return NULL; | ||
138 | |||
139 | if (of_property_read_bool(np, "is_private")) | ||
140 | pdata->is_private = true; | ||
141 | |||
142 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) | ||
143 | pdata->chan_allocation_order = (unsigned char)tmp; | ||
144 | |||
145 | if (!of_property_read_u32(np, "chan_priority", &tmp)) | ||
146 | pdata->chan_priority = tmp; | ||
147 | |||
148 | if (!of_property_read_u32(np, "block_size", &tmp)) | ||
149 | pdata->block_size = tmp; | ||
150 | |||
151 | if (!of_property_read_u32(np, "dma-masters", &tmp)) { | ||
152 | if (tmp > 4) | ||
153 | return NULL; | ||
154 | |||
155 | pdata->nr_masters = tmp; | ||
156 | } | ||
157 | |||
158 | if (!of_property_read_u32_array(np, "data_width", arr, | ||
159 | pdata->nr_masters)) | ||
160 | for (tmp = 0; tmp < pdata->nr_masters; tmp++) | ||
161 | pdata->data_width[tmp] = arr[tmp]; | ||
162 | |||
163 | return pdata; | ||
164 | } | ||
165 | #else | ||
166 | static inline struct dw_dma_platform_data * | ||
167 | dw_dma_parse_dt(struct platform_device *pdev) | ||
168 | { | ||
169 | return NULL; | ||
170 | } | ||
171 | #endif | ||
172 | |||
173 | static int dw_probe(struct platform_device *pdev) | ||
174 | { | ||
175 | struct dw_dma_chip *chip; | ||
176 | struct device *dev = &pdev->dev; | ||
177 | struct resource *mem; | ||
178 | struct dw_dma_platform_data *pdata; | ||
179 | int err; | ||
180 | |||
181 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); | ||
182 | if (!chip) | ||
183 | return -ENOMEM; | ||
184 | |||
185 | chip->irq = platform_get_irq(pdev, 0); | ||
186 | if (chip->irq < 0) | ||
187 | return chip->irq; | ||
188 | |||
189 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
190 | chip->regs = devm_ioremap_resource(dev, mem); | ||
191 | if (IS_ERR(chip->regs)) | ||
192 | return PTR_ERR(chip->regs); | ||
193 | |||
194 | /* Apply default dma_mask if needed */ | ||
195 | if (!dev->dma_mask) { | ||
196 | dev->dma_mask = &dev->coherent_dma_mask; | ||
197 | dev->coherent_dma_mask = DMA_BIT_MASK(32); | ||
198 | } | ||
199 | |||
200 | pdata = dev_get_platdata(dev); | ||
201 | if (!pdata) | ||
202 | pdata = dw_dma_parse_dt(pdev); | ||
203 | |||
204 | chip->dev = dev; | ||
205 | |||
206 | err = dw_dma_probe(chip, pdata); | ||
207 | if (err) | ||
208 | return err; | ||
209 | |||
210 | platform_set_drvdata(pdev, chip); | ||
211 | |||
212 | if (pdev->dev.of_node) { | ||
213 | err = of_dma_controller_register(pdev->dev.of_node, | ||
214 | dw_dma_of_xlate, chip->dw); | ||
215 | if (err) | ||
216 | dev_err(&pdev->dev, | ||
217 | "could not register of_dma_controller\n"); | ||
218 | } | ||
219 | |||
220 | if (ACPI_HANDLE(&pdev->dev)) | ||
221 | dw_dma_acpi_controller_register(chip->dw); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int dw_remove(struct platform_device *pdev) | ||
227 | { | ||
228 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | ||
229 | |||
230 | if (pdev->dev.of_node) | ||
231 | of_dma_controller_free(pdev->dev.of_node); | ||
232 | |||
233 | return dw_dma_remove(chip); | ||
234 | } | ||
235 | |||
236 | static void dw_shutdown(struct platform_device *pdev) | ||
237 | { | ||
238 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | ||
239 | |||
240 | dw_dma_shutdown(chip); | ||
241 | } | ||
242 | |||
243 | #ifdef CONFIG_OF | ||
244 | static const struct of_device_id dw_dma_of_id_table[] = { | ||
245 | { .compatible = "snps,dma-spear1340" }, | ||
246 | {} | ||
247 | }; | ||
248 | MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); | ||
249 | #endif | ||
250 | |||
251 | #ifdef CONFIG_ACPI | ||
252 | static const struct acpi_device_id dw_dma_acpi_id_table[] = { | ||
253 | { "INTL9C60", 0 }, | ||
254 | { } | ||
255 | }; | ||
256 | #endif | ||
257 | |||
258 | #ifdef CONFIG_PM_SLEEP | ||
259 | |||
260 | static int dw_suspend_noirq(struct device *dev) | ||
261 | { | ||
262 | struct platform_device *pdev = to_platform_device(dev); | ||
263 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | ||
264 | |||
265 | return dw_dma_suspend(chip); | ||
266 | } | ||
267 | |||
268 | static int dw_resume_noirq(struct device *dev) | ||
269 | { | ||
270 | struct platform_device *pdev = to_platform_device(dev); | ||
271 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | ||
272 | |||
273 | return dw_dma_resume(chip); | ||
274 | } | ||
275 | |||
276 | #else /* !CONFIG_PM_SLEEP */ | ||
277 | |||
278 | #define dw_suspend_noirq NULL | ||
279 | #define dw_resume_noirq NULL | ||
280 | |||
281 | #endif /* !CONFIG_PM_SLEEP */ | ||
282 | |||
283 | static const struct dev_pm_ops dw_dev_pm_ops = { | ||
284 | .suspend_noirq = dw_suspend_noirq, | ||
285 | .resume_noirq = dw_resume_noirq, | ||
286 | .freeze_noirq = dw_suspend_noirq, | ||
287 | .thaw_noirq = dw_resume_noirq, | ||
288 | .restore_noirq = dw_resume_noirq, | ||
289 | .poweroff_noirq = dw_suspend_noirq, | ||
290 | }; | ||
291 | |||
292 | static struct platform_driver dw_driver = { | ||
293 | .probe = dw_probe, | ||
294 | .remove = dw_remove, | ||
295 | .shutdown = dw_shutdown, | ||
296 | .driver = { | ||
297 | .name = "dw_dmac", | ||
298 | .pm = &dw_dev_pm_ops, | ||
299 | .of_match_table = of_match_ptr(dw_dma_of_id_table), | ||
300 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), | ||
301 | }, | ||
302 | }; | ||
303 | |||
304 | static int __init dw_init(void) | ||
305 | { | ||
306 | return platform_driver_register(&dw_driver); | ||
307 | } | ||
308 | subsys_initcall(dw_init); | ||
309 | |||
310 | static void __exit dw_exit(void) | ||
311 | { | ||
312 | platform_driver_unregister(&dw_driver); | ||
313 | } | ||
314 | module_exit(dw_exit); | ||
315 | |||
316 | MODULE_LICENSE("GPL v2"); | ||
317 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw/regs.h index 9d417200bd57..deb4274f80f4 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -9,6 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/interrupt.h> | ||
12 | #include <linux/dmaengine.h> | 13 | #include <linux/dmaengine.h> |
13 | #include <linux/dw_dmac.h> | 14 | #include <linux/dw_dmac.h> |
14 | 15 | ||
@@ -100,6 +101,12 @@ struct dw_dma_regs { | |||
100 | u32 DW_PARAMS; | 101 | u32 DW_PARAMS; |
101 | }; | 102 | }; |
102 | 103 | ||
104 | /* | ||
105 | * Big endian I/O access when reading and writing to the DMA controller | ||
106 | * registers. This is needed on some platforms, like the Atmel AVR32 | ||
107 | * architecture. | ||
108 | */ | ||
109 | |||
103 | #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO | 110 | #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO |
104 | #define dma_readl_native ioread32be | 111 | #define dma_readl_native ioread32be |
105 | #define dma_writel_native iowrite32be | 112 | #define dma_writel_native iowrite32be |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 4fc2980556ad..49e8fbdb8983 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -1368,7 +1368,7 @@ static int fsldma_of_probe(struct platform_device *op) | |||
1368 | 1368 | ||
1369 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); | 1369 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); |
1370 | 1370 | ||
1371 | dev_set_drvdata(&op->dev, fdev); | 1371 | platform_set_drvdata(op, fdev); |
1372 | 1372 | ||
1373 | /* | 1373 | /* |
1374 | * We cannot use of_platform_bus_probe() because there is no | 1374 | * We cannot use of_platform_bus_probe() because there is no |
@@ -1417,7 +1417,7 @@ static int fsldma_of_remove(struct platform_device *op) | |||
1417 | struct fsldma_device *fdev; | 1417 | struct fsldma_device *fdev; |
1418 | unsigned int i; | 1418 | unsigned int i; |
1419 | 1419 | ||
1420 | fdev = dev_get_drvdata(&op->dev); | 1420 | fdev = platform_get_drvdata(op); |
1421 | dma_async_device_unregister(&fdev->common); | 1421 | dma_async_device_unregister(&fdev->common); |
1422 | 1422 | ||
1423 | fsldma_free_irqs(fdev); | 1423 | fsldma_free_irqs(fdev); |
@@ -1428,7 +1428,6 @@ static int fsldma_of_remove(struct platform_device *op) | |||
1428 | } | 1428 | } |
1429 | 1429 | ||
1430 | iounmap(fdev->regs); | 1430 | iounmap(fdev->regs); |
1431 | dev_set_drvdata(&op->dev, NULL); | ||
1432 | kfree(fdev); | 1431 | kfree(fdev); |
1433 | 1432 | ||
1434 | return 0; | 1433 | return 0; |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f28583370d00..ff2aab973b45 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
28 | #include <linux/dmaengine.h> | 28 | #include <linux/dmaengine.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/of_device.h> | ||
31 | #include <linux/of_dma.h> | ||
30 | 32 | ||
31 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
32 | #include <linux/platform_data/dma-imx.h> | 34 | #include <linux/platform_data/dma-imx.h> |
@@ -186,6 +188,11 @@ struct imxdma_engine { | |||
186 | enum imx_dma_type devtype; | 188 | enum imx_dma_type devtype; |
187 | }; | 189 | }; |
188 | 190 | ||
191 | struct imxdma_filter_data { | ||
192 | struct imxdma_engine *imxdma; | ||
193 | int request; | ||
194 | }; | ||
195 | |||
189 | static struct platform_device_id imx_dma_devtype[] = { | 196 | static struct platform_device_id imx_dma_devtype[] = { |
190 | { | 197 | { |
191 | .name = "imx1-dma", | 198 | .name = "imx1-dma", |
@@ -202,6 +209,22 @@ static struct platform_device_id imx_dma_devtype[] = { | |||
202 | }; | 209 | }; |
203 | MODULE_DEVICE_TABLE(platform, imx_dma_devtype); | 210 | MODULE_DEVICE_TABLE(platform, imx_dma_devtype); |
204 | 211 | ||
212 | static const struct of_device_id imx_dma_of_dev_id[] = { | ||
213 | { | ||
214 | .compatible = "fsl,imx1-dma", | ||
215 | .data = &imx_dma_devtype[IMX1_DMA], | ||
216 | }, { | ||
217 | .compatible = "fsl,imx21-dma", | ||
218 | .data = &imx_dma_devtype[IMX21_DMA], | ||
219 | }, { | ||
220 | .compatible = "fsl,imx27-dma", | ||
221 | .data = &imx_dma_devtype[IMX27_DMA], | ||
222 | }, { | ||
223 | /* sentinel */ | ||
224 | } | ||
225 | }; | ||
226 | MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); | ||
227 | |||
205 | static inline int is_imx1_dma(struct imxdma_engine *imxdma) | 228 | static inline int is_imx1_dma(struct imxdma_engine *imxdma) |
206 | { | 229 | { |
207 | return imxdma->devtype == IMX1_DMA; | 230 | return imxdma->devtype == IMX1_DMA; |
@@ -996,17 +1019,55 @@ static void imxdma_issue_pending(struct dma_chan *chan) | |||
996 | spin_unlock_irqrestore(&imxdma->lock, flags); | 1019 | spin_unlock_irqrestore(&imxdma->lock, flags); |
997 | } | 1020 | } |
998 | 1021 | ||
1022 | static bool imxdma_filter_fn(struct dma_chan *chan, void *param) | ||
1023 | { | ||
1024 | struct imxdma_filter_data *fdata = param; | ||
1025 | struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); | ||
1026 | |||
1027 | if (chan->device->dev != fdata->imxdma->dev) | ||
1028 | return false; | ||
1029 | |||
1030 | imxdma_chan->dma_request = fdata->request; | ||
1031 | chan->private = NULL; | ||
1032 | |||
1033 | return true; | ||
1034 | } | ||
1035 | |||
1036 | static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, | ||
1037 | struct of_dma *ofdma) | ||
1038 | { | ||
1039 | int count = dma_spec->args_count; | ||
1040 | struct imxdma_engine *imxdma = ofdma->of_dma_data; | ||
1041 | struct imxdma_filter_data fdata = { | ||
1042 | .imxdma = imxdma, | ||
1043 | }; | ||
1044 | |||
1045 | if (count != 1) | ||
1046 | return NULL; | ||
1047 | |||
1048 | fdata.request = dma_spec->args[0]; | ||
1049 | |||
1050 | return dma_request_channel(imxdma->dma_device.cap_mask, | ||
1051 | imxdma_filter_fn, &fdata); | ||
1052 | } | ||
1053 | |||
999 | static int __init imxdma_probe(struct platform_device *pdev) | 1054 | static int __init imxdma_probe(struct platform_device *pdev) |
1000 | { | 1055 | { |
1001 | struct imxdma_engine *imxdma; | 1056 | struct imxdma_engine *imxdma; |
1002 | struct resource *res; | 1057 | struct resource *res; |
1058 | const struct of_device_id *of_id; | ||
1003 | int ret, i; | 1059 | int ret, i; |
1004 | int irq, irq_err; | 1060 | int irq, irq_err; |
1005 | 1061 | ||
1062 | of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev); | ||
1063 | if (of_id) | ||
1064 | pdev->id_entry = of_id->data; | ||
1065 | |||
1006 | imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); | 1066 | imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); |
1007 | if (!imxdma) | 1067 | if (!imxdma) |
1008 | return -ENOMEM; | 1068 | return -ENOMEM; |
1009 | 1069 | ||
1070 | imxdma->dev = &pdev->dev; | ||
1010 | imxdma->devtype = pdev->id_entry->driver_data; | 1071 | imxdma->devtype = pdev->id_entry->driver_data; |
1011 | 1072 | ||
1012 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1073 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -1111,7 +1172,6 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
1111 | &imxdma->dma_device.channels); | 1172 | &imxdma->dma_device.channels); |
1112 | } | 1173 | } |
1113 | 1174 | ||
1114 | imxdma->dev = &pdev->dev; | ||
1115 | imxdma->dma_device.dev = &pdev->dev; | 1175 | imxdma->dma_device.dev = &pdev->dev; |
1116 | 1176 | ||
1117 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; | 1177 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; |
@@ -1136,8 +1196,19 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
1136 | goto err; | 1196 | goto err; |
1137 | } | 1197 | } |
1138 | 1198 | ||
1199 | if (pdev->dev.of_node) { | ||
1200 | ret = of_dma_controller_register(pdev->dev.of_node, | ||
1201 | imxdma_xlate, imxdma); | ||
1202 | if (ret) { | ||
1203 | dev_err(&pdev->dev, "unable to register of_dma_controller\n"); | ||
1204 | goto err_of_dma_controller; | ||
1205 | } | ||
1206 | } | ||
1207 | |||
1139 | return 0; | 1208 | return 0; |
1140 | 1209 | ||
1210 | err_of_dma_controller: | ||
1211 | dma_async_device_unregister(&imxdma->dma_device); | ||
1141 | err: | 1212 | err: |
1142 | clk_disable_unprepare(imxdma->dma_ipg); | 1213 | clk_disable_unprepare(imxdma->dma_ipg); |
1143 | clk_disable_unprepare(imxdma->dma_ahb); | 1214 | clk_disable_unprepare(imxdma->dma_ahb); |
@@ -1150,6 +1221,9 @@ static int imxdma_remove(struct platform_device *pdev) | |||
1150 | 1221 | ||
1151 | dma_async_device_unregister(&imxdma->dma_device); | 1222 | dma_async_device_unregister(&imxdma->dma_device); |
1152 | 1223 | ||
1224 | if (pdev->dev.of_node) | ||
1225 | of_dma_controller_free(pdev->dev.of_node); | ||
1226 | |||
1153 | clk_disable_unprepare(imxdma->dma_ipg); | 1227 | clk_disable_unprepare(imxdma->dma_ipg); |
1154 | clk_disable_unprepare(imxdma->dma_ahb); | 1228 | clk_disable_unprepare(imxdma->dma_ahb); |
1155 | 1229 | ||
@@ -1159,6 +1233,7 @@ static int imxdma_remove(struct platform_device *pdev) | |||
1159 | static struct platform_driver imxdma_driver = { | 1233 | static struct platform_driver imxdma_driver = { |
1160 | .driver = { | 1234 | .driver = { |
1161 | .name = "imx-dma", | 1235 | .name = "imx-dma", |
1236 | .of_match_table = imx_dma_of_dev_id, | ||
1162 | }, | 1237 | }, |
1163 | .id_table = imx_dma_devtype, | 1238 | .id_table = imx_dma_devtype, |
1164 | .remove = imxdma_remove, | 1239 | .remove = imxdma_remove, |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 092867bf795c..1e44b8cf95da 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/dmaengine.h> | 36 | #include <linux/dmaengine.h> |
37 | #include <linux/of.h> | 37 | #include <linux/of.h> |
38 | #include <linux/of_device.h> | 38 | #include <linux/of_device.h> |
39 | #include <linux/of_dma.h> | ||
39 | 40 | ||
40 | #include <asm/irq.h> | 41 | #include <asm/irq.h> |
41 | #include <linux/platform_data/dma-imx-sdma.h> | 42 | #include <linux/platform_data/dma-imx-sdma.h> |
@@ -1296,6 +1297,35 @@ err_dma_alloc: | |||
1296 | return ret; | 1297 | return ret; |
1297 | } | 1298 | } |
1298 | 1299 | ||
1300 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) | ||
1301 | { | ||
1302 | struct imx_dma_data *data = fn_param; | ||
1303 | |||
1304 | if (!imx_dma_is_general_purpose(chan)) | ||
1305 | return false; | ||
1306 | |||
1307 | chan->private = data; | ||
1308 | |||
1309 | return true; | ||
1310 | } | ||
1311 | |||
1312 | static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, | ||
1313 | struct of_dma *ofdma) | ||
1314 | { | ||
1315 | struct sdma_engine *sdma = ofdma->of_dma_data; | ||
1316 | dma_cap_mask_t mask = sdma->dma_device.cap_mask; | ||
1317 | struct imx_dma_data data; | ||
1318 | |||
1319 | if (dma_spec->args_count != 3) | ||
1320 | return NULL; | ||
1321 | |||
1322 | data.dma_request = dma_spec->args[0]; | ||
1323 | data.peripheral_type = dma_spec->args[1]; | ||
1324 | data.priority = dma_spec->args[2]; | ||
1325 | |||
1326 | return dma_request_channel(mask, sdma_filter_fn, &data); | ||
1327 | } | ||
1328 | |||
1299 | static int __init sdma_probe(struct platform_device *pdev) | 1329 | static int __init sdma_probe(struct platform_device *pdev) |
1300 | { | 1330 | { |
1301 | const struct of_device_id *of_id = | 1331 | const struct of_device_id *of_id = |
@@ -1443,10 +1473,20 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1443 | goto err_init; | 1473 | goto err_init; |
1444 | } | 1474 | } |
1445 | 1475 | ||
1476 | if (np) { | ||
1477 | ret = of_dma_controller_register(np, sdma_xlate, sdma); | ||
1478 | if (ret) { | ||
1479 | dev_err(&pdev->dev, "failed to register controller\n"); | ||
1480 | goto err_register; | ||
1481 | } | ||
1482 | } | ||
1483 | |||
1446 | dev_info(sdma->dev, "initialized\n"); | 1484 | dev_info(sdma->dev, "initialized\n"); |
1447 | 1485 | ||
1448 | return 0; | 1486 | return 0; |
1449 | 1487 | ||
1488 | err_register: | ||
1489 | dma_async_device_unregister(&sdma->dma_device); | ||
1450 | err_init: | 1490 | err_init: |
1451 | kfree(sdma->script_addrs); | 1491 | kfree(sdma->script_addrs); |
1452 | err_alloc: | 1492 | err_alloc: |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 43d5a6c33297..9b9366537d73 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -154,6 +154,10 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | |||
154 | { | 154 | { |
155 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | 155 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, |
156 | tdmac->reg_base + TDCR); | 156 | tdmac->reg_base + TDCR); |
157 | |||
158 | /* disable irq */ | ||
159 | writel(0, tdmac->reg_base + TDIMR); | ||
160 | |||
157 | tdmac->status = DMA_SUCCESS; | 161 | tdmac->status = DMA_SUCCESS; |
158 | } | 162 | } |
159 | 163 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index b48a79c28845..719593002ab7 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -693,7 +693,7 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) | |||
693 | return true; | 693 | return true; |
694 | } | 694 | } |
695 | 695 | ||
696 | struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, | 696 | static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, |
697 | struct of_dma *ofdma) | 697 | struct of_dma *ofdma) |
698 | { | 698 | { |
699 | struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; | 699 | struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 7aa0864cd487..75334bdd2c56 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -35,8 +35,7 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) | |||
35 | struct of_dma *ofdma; | 35 | struct of_dma *ofdma; |
36 | 36 | ||
37 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) | 37 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) |
38 | if ((ofdma->of_node == dma_spec->np) && | 38 | if (ofdma->of_node == dma_spec->np) |
39 | (ofdma->of_dma_nbcells == dma_spec->args_count)) | ||
40 | return ofdma; | 39 | return ofdma; |
41 | 40 | ||
42 | pr_debug("%s: can't find DMA controller %s\n", __func__, | 41 | pr_debug("%s: can't find DMA controller %s\n", __func__, |
@@ -64,8 +63,6 @@ int of_dma_controller_register(struct device_node *np, | |||
64 | void *data) | 63 | void *data) |
65 | { | 64 | { |
66 | struct of_dma *ofdma; | 65 | struct of_dma *ofdma; |
67 | int nbcells; | ||
68 | const __be32 *prop; | ||
69 | 66 | ||
70 | if (!np || !of_dma_xlate) { | 67 | if (!np || !of_dma_xlate) { |
71 | pr_err("%s: not enough information provided\n", __func__); | 68 | pr_err("%s: not enough information provided\n", __func__); |
@@ -76,19 +73,7 @@ int of_dma_controller_register(struct device_node *np, | |||
76 | if (!ofdma) | 73 | if (!ofdma) |
77 | return -ENOMEM; | 74 | return -ENOMEM; |
78 | 75 | ||
79 | prop = of_get_property(np, "#dma-cells", NULL); | ||
80 | if (prop) | ||
81 | nbcells = be32_to_cpup(prop); | ||
82 | |||
83 | if (!prop || !nbcells) { | ||
84 | pr_err("%s: #dma-cells property is missing or invalid\n", | ||
85 | __func__); | ||
86 | kfree(ofdma); | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | |||
90 | ofdma->of_node = np; | 76 | ofdma->of_node = np; |
91 | ofdma->of_dma_nbcells = nbcells; | ||
92 | ofdma->of_dma_xlate = of_dma_xlate; | 77 | ofdma->of_dma_xlate = of_dma_xlate; |
93 | ofdma->of_dma_data = data; | 78 | ofdma->of_dma_data = data; |
94 | 79 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 7ec82f0667eb..593827b3fdd4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -157,7 +157,6 @@ enum pl330_reqtype { | |||
157 | #define PERIPH_REV_R0P0 0 | 157 | #define PERIPH_REV_R0P0 0 |
158 | #define PERIPH_REV_R1P0 1 | 158 | #define PERIPH_REV_R1P0 1 |
159 | #define PERIPH_REV_R1P1 2 | 159 | #define PERIPH_REV_R1P1 2 |
160 | #define PCELL_ID 0xff0 | ||
161 | 160 | ||
162 | #define CR0_PERIPH_REQ_SET (1 << 0) | 161 | #define CR0_PERIPH_REQ_SET (1 << 0) |
163 | #define CR0_BOOT_EN_SET (1 << 1) | 162 | #define CR0_BOOT_EN_SET (1 << 1) |
@@ -193,8 +192,6 @@ enum pl330_reqtype { | |||
193 | #define INTEG_CFG 0x0 | 192 | #define INTEG_CFG 0x0 |
194 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) | 193 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) |
195 | 194 | ||
196 | #define PCELL_ID_VAL 0xb105f00d | ||
197 | |||
198 | #define PL330_STATE_STOPPED (1 << 0) | 195 | #define PL330_STATE_STOPPED (1 << 0) |
199 | #define PL330_STATE_EXECUTING (1 << 1) | 196 | #define PL330_STATE_EXECUTING (1 << 1) |
200 | #define PL330_STATE_WFE (1 << 2) | 197 | #define PL330_STATE_WFE (1 << 2) |
@@ -292,7 +289,6 @@ static unsigned cmd_line; | |||
292 | /* Populated by the PL330 core driver for DMA API driver's info */ | 289 | /* Populated by the PL330 core driver for DMA API driver's info */ |
293 | struct pl330_config { | 290 | struct pl330_config { |
294 | u32 periph_id; | 291 | u32 periph_id; |
295 | u32 pcell_id; | ||
296 | #define DMAC_MODE_NS (1 << 0) | 292 | #define DMAC_MODE_NS (1 << 0) |
297 | unsigned int mode; | 293 | unsigned int mode; |
298 | unsigned int data_bus_width:10; /* In number of bits */ | 294 | unsigned int data_bus_width:10; /* In number of bits */ |
@@ -505,7 +501,7 @@ struct pl330_dmac { | |||
505 | /* Maximum possible events/irqs */ | 501 | /* Maximum possible events/irqs */ |
506 | int events[32]; | 502 | int events[32]; |
507 | /* BUS address of MicroCode buffer */ | 503 | /* BUS address of MicroCode buffer */ |
508 | u32 mcode_bus; | 504 | dma_addr_t mcode_bus; |
509 | /* CPU address of MicroCode buffer */ | 505 | /* CPU address of MicroCode buffer */ |
510 | void *mcode_cpu; | 506 | void *mcode_cpu; |
511 | /* List of all Channel threads */ | 507 | /* List of all Channel threads */ |
@@ -650,19 +646,6 @@ static inline bool _manager_ns(struct pl330_thread *thrd) | |||
650 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; | 646 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; |
651 | } | 647 | } |
652 | 648 | ||
653 | static inline u32 get_id(struct pl330_info *pi, u32 off) | ||
654 | { | ||
655 | void __iomem *regs = pi->base; | ||
656 | u32 id = 0; | ||
657 | |||
658 | id |= (readb(regs + off + 0x0) << 0); | ||
659 | id |= (readb(regs + off + 0x4) << 8); | ||
660 | id |= (readb(regs + off + 0x8) << 16); | ||
661 | id |= (readb(regs + off + 0xc) << 24); | ||
662 | |||
663 | return id; | ||
664 | } | ||
665 | |||
666 | static inline u32 get_revision(u32 periph_id) | 649 | static inline u32 get_revision(u32 periph_id) |
667 | { | 650 | { |
668 | return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; | 651 | return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; |
@@ -1986,9 +1969,6 @@ static void read_dmac_config(struct pl330_info *pi) | |||
1986 | pi->pcfg.num_events = val; | 1969 | pi->pcfg.num_events = val; |
1987 | 1970 | ||
1988 | pi->pcfg.irq_ns = readl(regs + CR3); | 1971 | pi->pcfg.irq_ns = readl(regs + CR3); |
1989 | |||
1990 | pi->pcfg.periph_id = get_id(pi, PERIPH_ID); | ||
1991 | pi->pcfg.pcell_id = get_id(pi, PCELL_ID); | ||
1992 | } | 1972 | } |
1993 | 1973 | ||
1994 | static inline void _reset_thread(struct pl330_thread *thrd) | 1974 | static inline void _reset_thread(struct pl330_thread *thrd) |
@@ -2098,10 +2078,8 @@ static int pl330_add(struct pl330_info *pi) | |||
2098 | regs = pi->base; | 2078 | regs = pi->base; |
2099 | 2079 | ||
2100 | /* Check if we can handle this DMAC */ | 2080 | /* Check if we can handle this DMAC */ |
2101 | if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL | 2081 | if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { |
2102 | || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { | 2082 | dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id); |
2103 | dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", | ||
2104 | get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); | ||
2105 | return -EINVAL; | 2083 | return -EINVAL; |
2106 | } | 2084 | } |
2107 | 2085 | ||
@@ -2916,6 +2894,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2916 | if (ret) | 2894 | if (ret) |
2917 | return ret; | 2895 | return ret; |
2918 | 2896 | ||
2897 | pi->pcfg.periph_id = adev->periphid; | ||
2919 | ret = pl330_add(pi); | 2898 | ret = pl330_add(pi); |
2920 | if (ret) | 2899 | if (ret) |
2921 | goto probe_err1; | 2900 | goto probe_err1; |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 1e220f8dfd8c..370ff8265630 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -4434,7 +4434,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) | |||
4434 | adev->dev = &ofdev->dev; | 4434 | adev->dev = &ofdev->dev; |
4435 | adev->common.dev = &ofdev->dev; | 4435 | adev->common.dev = &ofdev->dev; |
4436 | INIT_LIST_HEAD(&adev->common.channels); | 4436 | INIT_LIST_HEAD(&adev->common.channels); |
4437 | dev_set_drvdata(&ofdev->dev, adev); | 4437 | platform_set_drvdata(ofdev, adev); |
4438 | 4438 | ||
4439 | /* create a channel */ | 4439 | /* create a channel */ |
4440 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | 4440 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
@@ -4547,14 +4547,13 @@ out: | |||
4547 | */ | 4547 | */ |
4548 | static int ppc440spe_adma_remove(struct platform_device *ofdev) | 4548 | static int ppc440spe_adma_remove(struct platform_device *ofdev) |
4549 | { | 4549 | { |
4550 | struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); | 4550 | struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev); |
4551 | struct device_node *np = ofdev->dev.of_node; | 4551 | struct device_node *np = ofdev->dev.of_node; |
4552 | struct resource res; | 4552 | struct resource res; |
4553 | struct dma_chan *chan, *_chan; | 4553 | struct dma_chan *chan, *_chan; |
4554 | struct ppc_dma_chan_ref *ref, *_ref; | 4554 | struct ppc_dma_chan_ref *ref, *_ref; |
4555 | struct ppc440spe_adma_chan *ppc440spe_chan; | 4555 | struct ppc440spe_adma_chan *ppc440spe_chan; |
4556 | 4556 | ||
4557 | dev_set_drvdata(&ofdev->dev, NULL); | ||
4558 | if (adev->id < PPC440SPE_ADMA_ENGINES_NUM) | 4557 | if (adev->id < PPC440SPE_ADMA_ENGINES_NUM) |
4559 | ppc440spe_adma_devices[adev->id] = -1; | 4558 | ppc440spe_adma_devices[adev->id] = -1; |
4560 | 4559 | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index c07ca4612e46..c962138dde96 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o | 1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o |
2 | obj-$(CONFIG_SH_DMAE) += shdma.o | 2 | obj-$(CONFIG_SH_DMAE) += shdma.o |
3 | obj-$(CONFIG_SUDMAC) += sudmac.o | 3 | obj-$(CONFIG_SUDMAC) += sudmac.o |
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 4acb85a10250..28ca36121631 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -175,7 +175,18 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | |||
175 | { | 175 | { |
176 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | 176 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
177 | const struct shdma_ops *ops = sdev->ops; | 177 | const struct shdma_ops *ops = sdev->ops; |
178 | int ret; | 178 | int ret, match; |
179 | |||
180 | if (schan->dev->of_node) { | ||
181 | match = schan->hw_req; | ||
182 | ret = ops->set_slave(schan, match, true); | ||
183 | if (ret < 0) | ||
184 | return ret; | ||
185 | |||
186 | slave_id = schan->slave_id; | ||
187 | } else { | ||
188 | match = slave_id; | ||
189 | } | ||
179 | 190 | ||
180 | if (slave_id < 0 || slave_id >= slave_num) | 191 | if (slave_id < 0 || slave_id >= slave_num) |
181 | return -EINVAL; | 192 | return -EINVAL; |
@@ -183,7 +194,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | |||
183 | if (test_and_set_bit(slave_id, shdma_slave_used)) | 194 | if (test_and_set_bit(slave_id, shdma_slave_used)) |
184 | return -EBUSY; | 195 | return -EBUSY; |
185 | 196 | ||
186 | ret = ops->set_slave(schan, slave_id, false); | 197 | ret = ops->set_slave(schan, match, false); |
187 | if (ret < 0) { | 198 | if (ret < 0) { |
188 | clear_bit(slave_id, shdma_slave_used); | 199 | clear_bit(slave_id, shdma_slave_used); |
189 | return ret; | 200 | return ret; |
@@ -206,23 +217,26 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | |||
206 | * services would have to provide their own filters, which first would check | 217 | * services would have to provide their own filters, which first would check |
207 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do | 218 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do |
208 | * this, and only then, in case of a match, call this common filter. | 219 | * this, and only then, in case of a match, call this common filter. |
220 | * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). | ||
221 | * In that case the MID-RID value is used for slave channel filtering and is | ||
222 | * passed to this function in the "arg" parameter. | ||
209 | */ | 223 | */ |
210 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) | 224 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) |
211 | { | 225 | { |
212 | struct shdma_chan *schan = to_shdma_chan(chan); | 226 | struct shdma_chan *schan = to_shdma_chan(chan); |
213 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | 227 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
214 | const struct shdma_ops *ops = sdev->ops; | 228 | const struct shdma_ops *ops = sdev->ops; |
215 | int slave_id = (int)arg; | 229 | int match = (int)arg; |
216 | int ret; | 230 | int ret; |
217 | 231 | ||
218 | if (slave_id < 0) | 232 | if (match < 0) |
219 | /* No slave requested - arbitrary channel */ | 233 | /* No slave requested - arbitrary channel */ |
220 | return true; | 234 | return true; |
221 | 235 | ||
222 | if (slave_id >= slave_num) | 236 | if (!schan->dev->of_node && match >= slave_num) |
223 | return false; | 237 | return false; |
224 | 238 | ||
225 | ret = ops->set_slave(schan, slave_id, true); | 239 | ret = ops->set_slave(schan, match, true); |
226 | if (ret < 0) | 240 | if (ret < 0) |
227 | return false; | 241 | return false; |
228 | 242 | ||
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c new file mode 100644 index 000000000000..11bcb05cd79c --- /dev/null +++ b/drivers/dma/sh/shdma-of.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * SHDMA Device Tree glue | ||
3 | * | ||
4 | * Copyright (C) 2013 Renesas Electronics Inc. | ||
5 | * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
6 | * | ||
7 | * This is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/dmaengine.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_dma.h> | ||
16 | #include <linux/of_platform.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/shdma-base.h> | ||
19 | |||
20 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | ||
21 | |||
22 | static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec, | ||
23 | struct of_dma *ofdma) | ||
24 | { | ||
25 | u32 id = dma_spec->args[0]; | ||
26 | dma_cap_mask_t mask; | ||
27 | struct dma_chan *chan; | ||
28 | |||
29 | if (dma_spec->args_count != 1) | ||
30 | return NULL; | ||
31 | |||
32 | dma_cap_zero(mask); | ||
33 | /* Only slave DMA channels can be allocated via DT */ | ||
34 | dma_cap_set(DMA_SLAVE, mask); | ||
35 | |||
36 | chan = dma_request_channel(mask, shdma_chan_filter, (void *)id); | ||
37 | if (chan) | ||
38 | to_shdma_chan(chan)->hw_req = id; | ||
39 | |||
40 | return chan; | ||
41 | } | ||
42 | |||
43 | static int shdma_of_probe(struct platform_device *pdev) | ||
44 | { | ||
45 | const struct of_dev_auxdata *lookup = pdev->dev.platform_data; | ||
46 | int ret; | ||
47 | |||
48 | if (!lookup) | ||
49 | return -EINVAL; | ||
50 | |||
51 | ret = of_dma_controller_register(pdev->dev.of_node, | ||
52 | shdma_of_xlate, pdev); | ||
53 | if (ret < 0) | ||
54 | return ret; | ||
55 | |||
56 | ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev); | ||
57 | if (ret < 0) | ||
58 | of_dma_controller_free(pdev->dev.of_node); | ||
59 | |||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static const struct of_device_id shdma_of_match[] = { | ||
64 | { .compatible = "renesas,shdma-mux", }, | ||
65 | { } | ||
66 | }; | ||
67 | MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | ||
68 | |||
69 | static struct platform_driver shdma_of = { | ||
70 | .driver = { | ||
71 | .owner = THIS_MODULE, | ||
72 | .name = "shdma-of", | ||
73 | .of_match_table = shdma_of_match, | ||
74 | }, | ||
75 | .probe = shdma_of_probe, | ||
76 | }; | ||
77 | |||
78 | module_platform_driver(shdma_of); | ||
79 | |||
80 | MODULE_LICENSE("GPL v2"); | ||
81 | MODULE_DESCRIPTION("SH-DMA driver DT glue"); | ||
82 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); | ||
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index b70709b030d8..b67f45f5c271 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c | |||
@@ -301,20 +301,32 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan, | |||
301 | } | 301 | } |
302 | } | 302 | } |
303 | 303 | ||
304 | /* | ||
305 | * Find a slave channel configuration from the contoller list by either a slave | ||
306 | * ID in the non-DT case, or by a MID/RID value in the DT case | ||
307 | */ | ||
304 | static const struct sh_dmae_slave_config *dmae_find_slave( | 308 | static const struct sh_dmae_slave_config *dmae_find_slave( |
305 | struct sh_dmae_chan *sh_chan, int slave_id) | 309 | struct sh_dmae_chan *sh_chan, int match) |
306 | { | 310 | { |
307 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 311 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
308 | struct sh_dmae_pdata *pdata = shdev->pdata; | 312 | struct sh_dmae_pdata *pdata = shdev->pdata; |
309 | const struct sh_dmae_slave_config *cfg; | 313 | const struct sh_dmae_slave_config *cfg; |
310 | int i; | 314 | int i; |
311 | 315 | ||
312 | if (slave_id >= SH_DMA_SLAVE_NUMBER) | 316 | if (!sh_chan->shdma_chan.dev->of_node) { |
313 | return NULL; | 317 | if (match >= SH_DMA_SLAVE_NUMBER) |
318 | return NULL; | ||
314 | 319 | ||
315 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | 320 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
316 | if (cfg->slave_id == slave_id) | 321 | if (cfg->slave_id == match) |
317 | return cfg; | 322 | return cfg; |
323 | } else { | ||
324 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
325 | if (cfg->mid_rid == match) { | ||
326 | sh_chan->shdma_chan.slave_id = cfg->slave_id; | ||
327 | return cfg; | ||
328 | } | ||
329 | } | ||
318 | 330 | ||
319 | return NULL; | 331 | return NULL; |
320 | } | 332 | } |
@@ -729,7 +741,7 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
729 | goto eshdma; | 741 | goto eshdma; |
730 | 742 | ||
731 | /* platform data */ | 743 | /* platform data */ |
732 | shdev->pdata = pdev->dev.platform_data; | 744 | shdev->pdata = pdata; |
733 | 745 | ||
734 | if (pdata->chcr_offset) | 746 | if (pdata->chcr_offset) |
735 | shdev->chcr_offset = pdata->chcr_offset; | 747 | shdev->chcr_offset = pdata->chcr_offset; |
@@ -920,11 +932,18 @@ static int sh_dmae_remove(struct platform_device *pdev) | |||
920 | return 0; | 932 | return 0; |
921 | } | 933 | } |
922 | 934 | ||
935 | static const struct of_device_id sh_dmae_of_match[] = { | ||
936 | { .compatible = "renesas,shdma", }, | ||
937 | { } | ||
938 | }; | ||
939 | MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | ||
940 | |||
923 | static struct platform_driver sh_dmae_driver = { | 941 | static struct platform_driver sh_dmae_driver = { |
924 | .driver = { | 942 | .driver = { |
925 | .owner = THIS_MODULE, | 943 | .owner = THIS_MODULE, |
926 | .pm = &sh_dmae_pm, | 944 | .pm = &sh_dmae_pm, |
927 | .name = SH_DMAE_DRV_NAME, | 945 | .name = SH_DMAE_DRV_NAME, |
946 | .of_match_table = sh_dmae_of_match, | ||
928 | }, | 947 | }, |
929 | .remove = sh_dmae_remove, | 948 | .remove = sh_dmae_remove, |
930 | .shutdown = sh_dmae_shutdown, | 949 | .shutdown = sh_dmae_shutdown, |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 1765a0a2736d..716b23e4f327 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -466,12 +466,29 @@ static enum dma_status | |||
466 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 466 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
467 | struct dma_tx_state *txstate) | 467 | struct dma_tx_state *txstate) |
468 | { | 468 | { |
469 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
469 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | 470 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
470 | unsigned long flags; | 471 | unsigned long flags; |
471 | enum dma_status ret; | 472 | enum dma_status ret; |
473 | struct sirfsoc_dma_desc *sdesc; | ||
474 | int cid = schan->chan.chan_id; | ||
475 | unsigned long dma_pos; | ||
476 | unsigned long dma_request_bytes; | ||
477 | unsigned long residue; | ||
472 | 478 | ||
473 | spin_lock_irqsave(&schan->lock, flags); | 479 | spin_lock_irqsave(&schan->lock, flags); |
480 | |||
481 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | ||
482 | node); | ||
483 | dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * | ||
484 | (sdesc->width * SIRFSOC_DMA_WORD_LEN); | ||
485 | |||
474 | ret = dma_cookie_status(chan, cookie, txstate); | 486 | ret = dma_cookie_status(chan, cookie, txstate); |
487 | dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) | ||
488 | << 2; | ||
489 | residue = dma_request_bytes - (dma_pos - sdesc->addr); | ||
490 | dma_set_residue(txstate, residue); | ||
491 | |||
475 | spin_unlock_irqrestore(&schan->lock, flags); | 492 | spin_unlock_irqrestore(&schan->lock, flags); |
476 | 493 | ||
477 | return ret; | 494 | return ret; |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 33f59ecd256e..f137914d7b16 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -1191,6 +1191,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) | |||
1191 | list_splice_init(&tdc->free_dma_desc, &dma_desc_list); | 1191 | list_splice_init(&tdc->free_dma_desc, &dma_desc_list); |
1192 | INIT_LIST_HEAD(&tdc->cb_desc); | 1192 | INIT_LIST_HEAD(&tdc->cb_desc); |
1193 | tdc->config_init = false; | 1193 | tdc->config_init = false; |
1194 | tdc->isr_handler = NULL; | ||
1194 | spin_unlock_irqrestore(&tdc->lock, flags); | 1195 | spin_unlock_irqrestore(&tdc->lock, flags); |
1195 | 1196 | ||
1196 | while (!list_empty(&dma_desc_list)) { | 1197 | while (!list_empty(&dma_desc_list)) { |
@@ -1334,7 +1335,7 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1334 | if (ret) { | 1335 | if (ret) { |
1335 | dev_err(&pdev->dev, | 1336 | dev_err(&pdev->dev, |
1336 | "request_irq failed with err %d channel %d\n", | 1337 | "request_irq failed with err %d channel %d\n", |
1337 | i, ret); | 1338 | ret, i); |
1338 | goto err_irq; | 1339 | goto err_irq; |
1339 | } | 1340 | } |
1340 | 1341 | ||
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 26107ba6edb3..0ef43c136aa7 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -811,8 +811,6 @@ static int td_remove(struct platform_device *pdev) | |||
811 | kfree(td); | 811 | kfree(td); |
812 | release_mem_region(iomem->start, resource_size(iomem)); | 812 | release_mem_region(iomem->start, resource_size(iomem)); |
813 | 813 | ||
814 | platform_set_drvdata(pdev, NULL); | ||
815 | |||
816 | dev_dbg(&pdev->dev, "Removed...\n"); | 814 | dev_dbg(&pdev->dev, "Removed...\n"); |
817 | return 0; | 815 | return 0; |
818 | } | 816 | } |