diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-10 16:37:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-10 16:37:36 -0400 |
commit | ec5b103ecfde929004b691f29183255aeeadecd5 (patch) | |
tree | 3b16d0654c074b5b36d06e56110c7218a8685655 /drivers/dma | |
parent | d0048f0b91ee35ab940ec6cbdfdd238c55b12a14 (diff) | |
parent | 5622ff1a4dd7dcb1c09953d8066a4e7c4c350b2d (diff) |
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"This pull brings:
- Andy's DW driver updates
- Guennadi's sh driver updates
- Pl08x driver fixes from Tomasz & Alban
- Improvements to mmp_pdma by Daniel
- TI EDMA fixes by Joel
- New drivers:
- Hisilicon k3dma driver
- Renesas rcar dma driver
- New API for publishing slave driver capablities
- Various fixes across the subsystem by Andy, Jingoo, Sachin etc..."
* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (94 commits)
dma: edma: Remove limits on number of slots
dma: edma: Leave linked to Null slot instead of DUMMY slot
dma: edma: Find missed events and issue them
ARM: edma: Add function to manually trigger an EDMA channel
dma: edma: Write out and handle MAX_NR_SG at a given time
dma: edma: Setup parameters to DMA MAX_NR_SG at a time
dmaengine: pl330: use dma_set_max_seg_size to set the sg limit
dmaengine: dma_slave_caps: remove sg entries
dma: replace devm_request_and_ioremap by devm_ioremap_resource
dma: ste_dma40: Fix potential null pointer dereference
dma: ste_dma40: Remove duplicate const
dma: imx-dma: Remove redundant NULL check
dma: dmagengine: fix function names in comments
dma: add driver for R-Car HPB-DMAC
dma: k3dma: use devm_ioremap_resource() instead of devm_request_and_ioremap()
dma: imx-sdma: Staticize sdma_driver_data structures
pch_dma: Add MODULE_DEVICE_TABLE
dmaengine: PL08x: Add cyclic transfer support
dmaengine: PL08x: Fix reading the byte count in cctl
dmaengine: PL08x: Add support for different maximum transfer size
...
Diffstat (limited to 'drivers/dma')
38 files changed, 2941 insertions, 636 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index daa4da281e5e..526ec77c7ba0 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -308,6 +308,15 @@ config DMA_JZ4740 | |||
308 | select DMA_ENGINE | 308 | select DMA_ENGINE |
309 | select DMA_VIRTUAL_CHANNELS | 309 | select DMA_VIRTUAL_CHANNELS |
310 | 310 | ||
311 | config K3_DMA | ||
312 | tristate "Hisilicon K3 DMA support" | ||
313 | depends on ARCH_HI3xxx | ||
314 | select DMA_ENGINE | ||
315 | select DMA_VIRTUAL_CHANNELS | ||
316 | help | ||
317 | Support the DMA engine for Hisilicon K3 platform | ||
318 | devices. | ||
319 | |||
311 | config DMA_ENGINE | 320 | config DMA_ENGINE |
312 | bool | 321 | bool |
313 | 322 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 6d62ec30c4bc..db89035b3626 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -40,3 +40,4 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o | |||
40 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o | 40 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o |
41 | obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o | 41 | obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o |
42 | obj-$(CONFIG_TI_CPPI41) += cppi41.o | 42 | obj-$(CONFIG_TI_CPPI41) += cppi41.o |
43 | obj-$(CONFIG_K3_DMA) += k3dma.o | ||
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 5a18f82f732a..e69b03c0fa50 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c | |||
@@ -43,7 +43,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, | |||
43 | struct list_head resource_list; | 43 | struct list_head resource_list; |
44 | struct resource_list_entry *rentry; | 44 | struct resource_list_entry *rentry; |
45 | resource_size_t mem = 0, irq = 0; | 45 | resource_size_t mem = 0, irq = 0; |
46 | u32 vendor_id; | ||
47 | int ret; | 46 | int ret; |
48 | 47 | ||
49 | if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) | 48 | if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) |
@@ -73,9 +72,8 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, | |||
73 | if (si->mmio_base_low != mem || si->gsi_interrupt != irq) | 72 | if (si->mmio_base_low != mem || si->gsi_interrupt != irq) |
74 | return 0; | 73 | return 0; |
75 | 74 | ||
76 | vendor_id = le32_to_cpu(grp->vendor_id); | ||
77 | dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", | 75 | dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", |
78 | (char *)&vendor_id, grp->device_id, grp->revision); | 76 | (char *)&grp->vendor_id, grp->device_id, grp->revision); |
79 | 77 | ||
80 | /* Check if the request line range is available */ | 78 | /* Check if the request line range is available */ |
81 | if (si->base_request_line == 0 && si->num_handshake_signals == 0) | 79 | if (si->base_request_line == 0 && si->num_handshake_signals == 0) |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index bff41d4848e5..fce46c5bf1c7 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * | 24 | * |
25 | * Documentation: ARM DDI 0196G == PL080 | 25 | * Documentation: ARM DDI 0196G == PL080 |
26 | * Documentation: ARM DDI 0218E == PL081 | 26 | * Documentation: ARM DDI 0218E == PL081 |
27 | * Documentation: S3C6410 User's Manual == PL080S | ||
27 | * | 28 | * |
28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any | 29 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any |
29 | * channel. | 30 | * channel. |
@@ -36,6 +37,14 @@ | |||
36 | * | 37 | * |
37 | * The PL080 has a dual bus master, PL081 has a single master. | 38 | * The PL080 has a dual bus master, PL081 has a single master. |
38 | * | 39 | * |
40 | * PL080S is a version modified by Samsung and used in S3C64xx SoCs. | ||
41 | * It differs in following aspects: | ||
42 | * - CH_CONFIG register at different offset, | ||
43 | * - separate CH_CONTROL2 register for transfer size, | ||
44 | * - bigger maximum transfer size, | ||
45 | * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, | ||
46 | * - no support for peripheral flow control. | ||
47 | * | ||
39 | * Memory to peripheral transfer may be visualized as | 48 | * Memory to peripheral transfer may be visualized as |
40 | * Get data from memory to DMAC | 49 | * Get data from memory to DMAC |
41 | * Until no data left | 50 | * Until no data left |
@@ -64,10 +73,7 @@ | |||
64 | * - Peripheral flow control: the transfer size is ignored (and should be | 73 | * - Peripheral flow control: the transfer size is ignored (and should be |
65 | * zero). The data is transferred from the current LLI entry, until | 74 | * zero). The data is transferred from the current LLI entry, until |
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | 75 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC |
67 | * will then move to the next LLI entry. | 76 | * will then move to the next LLI entry. Unsupported by PL080S. |
68 | * | ||
69 | * Global TODO: | ||
70 | * - Break out common code from arch/arm/mach-s3c64xx and share | ||
71 | */ | 77 | */ |
72 | #include <linux/amba/bus.h> | 78 | #include <linux/amba/bus.h> |
73 | #include <linux/amba/pl08x.h> | 79 | #include <linux/amba/pl08x.h> |
@@ -100,24 +106,16 @@ struct pl08x_driver_data; | |||
100 | * @nomadik: whether the channels have Nomadik security extension bits | 106 | * @nomadik: whether the channels have Nomadik security extension bits |
101 | * that need to be checked for permission before use and some registers are | 107 | * that need to be checked for permission before use and some registers are |
102 | * missing | 108 | * missing |
109 | * @pl080s: whether this version is a PL080S, which has separate register and | ||
110 | * LLI word for transfer size. | ||
103 | */ | 111 | */ |
104 | struct vendor_data { | 112 | struct vendor_data { |
113 | u8 config_offset; | ||
105 | u8 channels; | 114 | u8 channels; |
106 | bool dualmaster; | 115 | bool dualmaster; |
107 | bool nomadik; | 116 | bool nomadik; |
108 | }; | 117 | bool pl080s; |
109 | 118 | u32 max_transfer_size; | |
110 | /* | ||
111 | * PL08X private data structures | ||
112 | * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, | ||
113 | * start & end do not - their bus bit info is in cctl. Also note that these | ||
114 | * are fixed 32-bit quantities. | ||
115 | */ | ||
116 | struct pl08x_lli { | ||
117 | u32 src; | ||
118 | u32 dst; | ||
119 | u32 lli; | ||
120 | u32 cctl; | ||
121 | }; | 119 | }; |
122 | 120 | ||
123 | /** | 121 | /** |
@@ -147,6 +145,7 @@ struct pl08x_bus_data { | |||
147 | struct pl08x_phy_chan { | 145 | struct pl08x_phy_chan { |
148 | unsigned int id; | 146 | unsigned int id; |
149 | void __iomem *base; | 147 | void __iomem *base; |
148 | void __iomem *reg_config; | ||
150 | spinlock_t lock; | 149 | spinlock_t lock; |
151 | struct pl08x_dma_chan *serving; | 150 | struct pl08x_dma_chan *serving; |
152 | bool locked; | 151 | bool locked; |
@@ -176,12 +175,13 @@ struct pl08x_sg { | |||
176 | * @ccfg: config reg values for current txd | 175 | * @ccfg: config reg values for current txd |
177 | * @done: this marks completed descriptors, which should not have their | 176 | * @done: this marks completed descriptors, which should not have their |
178 | * mux released. | 177 | * mux released. |
178 | * @cyclic: indicate cyclic transfers | ||
179 | */ | 179 | */ |
180 | struct pl08x_txd { | 180 | struct pl08x_txd { |
181 | struct virt_dma_desc vd; | 181 | struct virt_dma_desc vd; |
182 | struct list_head dsg_list; | 182 | struct list_head dsg_list; |
183 | dma_addr_t llis_bus; | 183 | dma_addr_t llis_bus; |
184 | struct pl08x_lli *llis_va; | 184 | u32 *llis_va; |
185 | /* Default cctl value for LLIs */ | 185 | /* Default cctl value for LLIs */ |
186 | u32 cctl; | 186 | u32 cctl; |
187 | /* | 187 | /* |
@@ -190,6 +190,7 @@ struct pl08x_txd { | |||
190 | */ | 190 | */ |
191 | u32 ccfg; | 191 | u32 ccfg; |
192 | bool done; | 192 | bool done; |
193 | bool cyclic; | ||
193 | }; | 194 | }; |
194 | 195 | ||
195 | /** | 196 | /** |
@@ -265,17 +266,29 @@ struct pl08x_driver_data { | |||
265 | struct dma_pool *pool; | 266 | struct dma_pool *pool; |
266 | u8 lli_buses; | 267 | u8 lli_buses; |
267 | u8 mem_buses; | 268 | u8 mem_buses; |
269 | u8 lli_words; | ||
268 | }; | 270 | }; |
269 | 271 | ||
270 | /* | 272 | /* |
271 | * PL08X specific defines | 273 | * PL08X specific defines |
272 | */ | 274 | */ |
273 | 275 | ||
274 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 276 | /* The order of words in an LLI. */ |
275 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 277 | #define PL080_LLI_SRC 0 |
278 | #define PL080_LLI_DST 1 | ||
279 | #define PL080_LLI_LLI 2 | ||
280 | #define PL080_LLI_CCTL 3 | ||
281 | #define PL080S_LLI_CCTL2 4 | ||
282 | |||
283 | /* Total words in an LLI. */ | ||
284 | #define PL080_LLI_WORDS 4 | ||
285 | #define PL080S_LLI_WORDS 8 | ||
276 | 286 | ||
277 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ | 287 | /* |
278 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) | 288 | * Number of LLIs in each LLI buffer allocated for one transfer |
289 | * (maximum times we call dma_pool_alloc on this pool without freeing) | ||
290 | */ | ||
291 | #define MAX_NUM_TSFR_LLIS 512 | ||
279 | #define PL08X_ALIGN 8 | 292 | #define PL08X_ALIGN 8 |
280 | 293 | ||
281 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | 294 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) |
@@ -336,10 +349,39 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |||
336 | { | 349 | { |
337 | unsigned int val; | 350 | unsigned int val; |
338 | 351 | ||
339 | val = readl(ch->base + PL080_CH_CONFIG); | 352 | val = readl(ch->reg_config); |
340 | return val & PL080_CONFIG_ACTIVE; | 353 | return val & PL080_CONFIG_ACTIVE; |
341 | } | 354 | } |
342 | 355 | ||
356 | static void pl08x_write_lli(struct pl08x_driver_data *pl08x, | ||
357 | struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) | ||
358 | { | ||
359 | if (pl08x->vd->pl080s) | ||
360 | dev_vdbg(&pl08x->adev->dev, | ||
361 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | ||
362 | "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", | ||
363 | phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], | ||
364 | lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], | ||
365 | lli[PL080S_LLI_CCTL2], ccfg); | ||
366 | else | ||
367 | dev_vdbg(&pl08x->adev->dev, | ||
368 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | ||
369 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | ||
370 | phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], | ||
371 | lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); | ||
372 | |||
373 | writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); | ||
374 | writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); | ||
375 | writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); | ||
376 | writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); | ||
377 | |||
378 | if (pl08x->vd->pl080s) | ||
379 | writel_relaxed(lli[PL080S_LLI_CCTL2], | ||
380 | phychan->base + PL080S_CH_CONTROL2); | ||
381 | |||
382 | writel(ccfg, phychan->reg_config); | ||
383 | } | ||
384 | |||
343 | /* | 385 | /* |
344 | * Set the initial DMA register values i.e. those for the first LLI | 386 | * Set the initial DMA register values i.e. those for the first LLI |
345 | * The next LLI pointer and the configuration interrupt bit have | 387 | * The next LLI pointer and the configuration interrupt bit have |
@@ -352,7 +394,6 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) | |||
352 | struct pl08x_phy_chan *phychan = plchan->phychan; | 394 | struct pl08x_phy_chan *phychan = plchan->phychan; |
353 | struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); | 395 | struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); |
354 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | 396 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); |
355 | struct pl08x_lli *lli; | ||
356 | u32 val; | 397 | u32 val; |
357 | 398 | ||
358 | list_del(&txd->vd.node); | 399 | list_del(&txd->vd.node); |
@@ -363,19 +404,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) | |||
363 | while (pl08x_phy_channel_busy(phychan)) | 404 | while (pl08x_phy_channel_busy(phychan)) |
364 | cpu_relax(); | 405 | cpu_relax(); |
365 | 406 | ||
366 | lli = &txd->llis_va[0]; | 407 | pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); |
367 | |||
368 | dev_vdbg(&pl08x->adev->dev, | ||
369 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | ||
370 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | ||
371 | phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, | ||
372 | txd->ccfg); | ||
373 | |||
374 | writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); | ||
375 | writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); | ||
376 | writel(lli->lli, phychan->base + PL080_CH_LLI); | ||
377 | writel(lli->cctl, phychan->base + PL080_CH_CONTROL); | ||
378 | writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); | ||
379 | 408 | ||
380 | /* Enable the DMA channel */ | 409 | /* Enable the DMA channel */ |
381 | /* Do not access config register until channel shows as disabled */ | 410 | /* Do not access config register until channel shows as disabled */ |
@@ -383,11 +412,11 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) | |||
383 | cpu_relax(); | 412 | cpu_relax(); |
384 | 413 | ||
385 | /* Do not access config register until channel shows as inactive */ | 414 | /* Do not access config register until channel shows as inactive */ |
386 | val = readl(phychan->base + PL080_CH_CONFIG); | 415 | val = readl(phychan->reg_config); |
387 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | 416 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) |
388 | val = readl(phychan->base + PL080_CH_CONFIG); | 417 | val = readl(phychan->reg_config); |
389 | 418 | ||
390 | writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); | 419 | writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); |
391 | } | 420 | } |
392 | 421 | ||
393 | /* | 422 | /* |
@@ -406,9 +435,9 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |||
406 | int timeout; | 435 | int timeout; |
407 | 436 | ||
408 | /* Set the HALT bit and wait for the FIFO to drain */ | 437 | /* Set the HALT bit and wait for the FIFO to drain */ |
409 | val = readl(ch->base + PL080_CH_CONFIG); | 438 | val = readl(ch->reg_config); |
410 | val |= PL080_CONFIG_HALT; | 439 | val |= PL080_CONFIG_HALT; |
411 | writel(val, ch->base + PL080_CH_CONFIG); | 440 | writel(val, ch->reg_config); |
412 | 441 | ||
413 | /* Wait for channel inactive */ | 442 | /* Wait for channel inactive */ |
414 | for (timeout = 1000; timeout; timeout--) { | 443 | for (timeout = 1000; timeout; timeout--) { |
@@ -425,9 +454,9 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |||
425 | u32 val; | 454 | u32 val; |
426 | 455 | ||
427 | /* Clear the HALT bit */ | 456 | /* Clear the HALT bit */ |
428 | val = readl(ch->base + PL080_CH_CONFIG); | 457 | val = readl(ch->reg_config); |
429 | val &= ~PL080_CONFIG_HALT; | 458 | val &= ~PL080_CONFIG_HALT; |
430 | writel(val, ch->base + PL080_CH_CONFIG); | 459 | writel(val, ch->reg_config); |
431 | } | 460 | } |
432 | 461 | ||
433 | /* | 462 | /* |
@@ -439,12 +468,12 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |||
439 | static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, | 468 | static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, |
440 | struct pl08x_phy_chan *ch) | 469 | struct pl08x_phy_chan *ch) |
441 | { | 470 | { |
442 | u32 val = readl(ch->base + PL080_CH_CONFIG); | 471 | u32 val = readl(ch->reg_config); |
443 | 472 | ||
444 | val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | | 473 | val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | |
445 | PL080_CONFIG_TC_IRQ_MASK); | 474 | PL080_CONFIG_TC_IRQ_MASK); |
446 | 475 | ||
447 | writel(val, ch->base + PL080_CH_CONFIG); | 476 | writel(val, ch->reg_config); |
448 | 477 | ||
449 | writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); | 478 | writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); |
450 | writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); | 479 | writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); |
@@ -455,6 +484,28 @@ static inline u32 get_bytes_in_cctl(u32 cctl) | |||
455 | /* The source width defines the number of bytes */ | 484 | /* The source width defines the number of bytes */ |
456 | u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; | 485 | u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; |
457 | 486 | ||
487 | cctl &= PL080_CONTROL_SWIDTH_MASK; | ||
488 | |||
489 | switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { | ||
490 | case PL080_WIDTH_8BIT: | ||
491 | break; | ||
492 | case PL080_WIDTH_16BIT: | ||
493 | bytes *= 2; | ||
494 | break; | ||
495 | case PL080_WIDTH_32BIT: | ||
496 | bytes *= 4; | ||
497 | break; | ||
498 | } | ||
499 | return bytes; | ||
500 | } | ||
501 | |||
502 | static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) | ||
503 | { | ||
504 | /* The source width defines the number of bytes */ | ||
505 | u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; | ||
506 | |||
507 | cctl &= PL080_CONTROL_SWIDTH_MASK; | ||
508 | |||
458 | switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { | 509 | switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { |
459 | case PL080_WIDTH_8BIT: | 510 | case PL080_WIDTH_8BIT: |
460 | break; | 511 | break; |
@@ -471,47 +522,66 @@ static inline u32 get_bytes_in_cctl(u32 cctl) | |||
471 | /* The channel should be paused when calling this */ | 522 | /* The channel should be paused when calling this */ |
472 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | 523 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) |
473 | { | 524 | { |
525 | struct pl08x_driver_data *pl08x = plchan->host; | ||
526 | const u32 *llis_va, *llis_va_limit; | ||
474 | struct pl08x_phy_chan *ch; | 527 | struct pl08x_phy_chan *ch; |
528 | dma_addr_t llis_bus; | ||
475 | struct pl08x_txd *txd; | 529 | struct pl08x_txd *txd; |
476 | size_t bytes = 0; | 530 | u32 llis_max_words; |
531 | size_t bytes; | ||
532 | u32 clli; | ||
477 | 533 | ||
478 | ch = plchan->phychan; | 534 | ch = plchan->phychan; |
479 | txd = plchan->at; | 535 | txd = plchan->at; |
480 | 536 | ||
537 | if (!ch || !txd) | ||
538 | return 0; | ||
539 | |||
481 | /* | 540 | /* |
482 | * Follow the LLIs to get the number of remaining | 541 | * Follow the LLIs to get the number of remaining |
483 | * bytes in the currently active transaction. | 542 | * bytes in the currently active transaction. |
484 | */ | 543 | */ |
485 | if (ch && txd) { | 544 | clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; |
486 | u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; | ||
487 | 545 | ||
488 | /* First get the remaining bytes in the active transfer */ | 546 | /* First get the remaining bytes in the active transfer */ |
547 | if (pl08x->vd->pl080s) | ||
548 | bytes = get_bytes_in_cctl_pl080s( | ||
549 | readl(ch->base + PL080_CH_CONTROL), | ||
550 | readl(ch->base + PL080S_CH_CONTROL2)); | ||
551 | else | ||
489 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | 552 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); |
490 | 553 | ||
491 | if (clli) { | 554 | if (!clli) |
492 | struct pl08x_lli *llis_va = txd->llis_va; | 555 | return bytes; |
493 | dma_addr_t llis_bus = txd->llis_bus; | ||
494 | int index; | ||
495 | 556 | ||
496 | BUG_ON(clli < llis_bus || clli >= llis_bus + | 557 | llis_va = txd->llis_va; |
497 | sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); | 558 | llis_bus = txd->llis_bus; |
498 | 559 | ||
499 | /* | 560 | llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; |
500 | * Locate the next LLI - as this is an array, | 561 | BUG_ON(clli < llis_bus || clli >= llis_bus + |
501 | * it's simple maths to find. | 562 | sizeof(u32) * llis_max_words); |
502 | */ | ||
503 | index = (clli - llis_bus) / sizeof(struct pl08x_lli); | ||
504 | 563 | ||
505 | for (; index < MAX_NUM_TSFR_LLIS; index++) { | 564 | /* |
506 | bytes += get_bytes_in_cctl(llis_va[index].cctl); | 565 | * Locate the next LLI - as this is an array, |
566 | * it's simple maths to find. | ||
567 | */ | ||
568 | llis_va += (clli - llis_bus) / sizeof(u32); | ||
507 | 569 | ||
508 | /* | 570 | llis_va_limit = llis_va + llis_max_words; |
509 | * A LLI pointer of 0 terminates the LLI list | 571 | |
510 | */ | 572 | for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { |
511 | if (!llis_va[index].lli) | 573 | if (pl08x->vd->pl080s) |
512 | break; | 574 | bytes += get_bytes_in_cctl_pl080s( |
513 | } | 575 | llis_va[PL080_LLI_CCTL], |
514 | } | 576 | llis_va[PL080S_LLI_CCTL2]); |
577 | else | ||
578 | bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); | ||
579 | |||
580 | /* | ||
581 | * A LLI pointer going backward terminates the LLI list | ||
582 | */ | ||
583 | if (llis_va[PL080_LLI_LLI] <= clli) | ||
584 | break; | ||
515 | } | 585 | } |
516 | 586 | ||
517 | return bytes; | 587 | return bytes; |
@@ -722,6 +792,7 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |||
722 | break; | 792 | break; |
723 | } | 793 | } |
724 | 794 | ||
795 | tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
725 | retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; | 796 | retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; |
726 | return retbits; | 797 | return retbits; |
727 | } | 798 | } |
@@ -766,20 +837,26 @@ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, | |||
766 | /* | 837 | /* |
767 | * Fills in one LLI for a certain transfer descriptor and advance the counter | 838 | * Fills in one LLI for a certain transfer descriptor and advance the counter |
768 | */ | 839 | */ |
769 | static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | 840 | static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, |
770 | int num_llis, int len, u32 cctl) | 841 | struct pl08x_lli_build_data *bd, |
842 | int num_llis, int len, u32 cctl, u32 cctl2) | ||
771 | { | 843 | { |
772 | struct pl08x_lli *llis_va = bd->txd->llis_va; | 844 | u32 offset = num_llis * pl08x->lli_words; |
845 | u32 *llis_va = bd->txd->llis_va + offset; | ||
773 | dma_addr_t llis_bus = bd->txd->llis_bus; | 846 | dma_addr_t llis_bus = bd->txd->llis_bus; |
774 | 847 | ||
775 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | 848 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); |
776 | 849 | ||
777 | llis_va[num_llis].cctl = cctl; | 850 | /* Advance the offset to next LLI. */ |
778 | llis_va[num_llis].src = bd->srcbus.addr; | 851 | offset += pl08x->lli_words; |
779 | llis_va[num_llis].dst = bd->dstbus.addr; | 852 | |
780 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * | 853 | llis_va[PL080_LLI_SRC] = bd->srcbus.addr; |
781 | sizeof(struct pl08x_lli); | 854 | llis_va[PL080_LLI_DST] = bd->dstbus.addr; |
782 | llis_va[num_llis].lli |= bd->lli_bus; | 855 | llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); |
856 | llis_va[PL080_LLI_LLI] |= bd->lli_bus; | ||
857 | llis_va[PL080_LLI_CCTL] = cctl; | ||
858 | if (pl08x->vd->pl080s) | ||
859 | llis_va[PL080S_LLI_CCTL2] = cctl2; | ||
783 | 860 | ||
784 | if (cctl & PL080_CONTROL_SRC_INCR) | 861 | if (cctl & PL080_CONTROL_SRC_INCR) |
785 | bd->srcbus.addr += len; | 862 | bd->srcbus.addr += len; |
@@ -791,14 +868,53 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
791 | bd->remainder -= len; | 868 | bd->remainder -= len; |
792 | } | 869 | } |
793 | 870 | ||
794 | static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, | 871 | static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, |
795 | u32 *cctl, u32 len, int num_llis, size_t *total_bytes) | 872 | struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, |
873 | int num_llis, size_t *total_bytes) | ||
796 | { | 874 | { |
797 | *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); | 875 | *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); |
798 | pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); | 876 | pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); |
799 | (*total_bytes) += len; | 877 | (*total_bytes) += len; |
800 | } | 878 | } |
801 | 879 | ||
880 | #ifdef VERBOSE_DEBUG | ||
881 | static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, | ||
882 | const u32 *llis_va, int num_llis) | ||
883 | { | ||
884 | int i; | ||
885 | |||
886 | if (pl08x->vd->pl080s) { | ||
887 | dev_vdbg(&pl08x->adev->dev, | ||
888 | "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", | ||
889 | "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); | ||
890 | for (i = 0; i < num_llis; i++) { | ||
891 | dev_vdbg(&pl08x->adev->dev, | ||
892 | "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
893 | i, llis_va, llis_va[PL080_LLI_SRC], | ||
894 | llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], | ||
895 | llis_va[PL080_LLI_CCTL], | ||
896 | llis_va[PL080S_LLI_CCTL2]); | ||
897 | llis_va += pl08x->lli_words; | ||
898 | } | ||
899 | } else { | ||
900 | dev_vdbg(&pl08x->adev->dev, | ||
901 | "%-3s %-9s %-10s %-10s %-10s %s\n", | ||
902 | "lli", "", "csrc", "cdst", "clli", "cctl"); | ||
903 | for (i = 0; i < num_llis; i++) { | ||
904 | dev_vdbg(&pl08x->adev->dev, | ||
905 | "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
906 | i, llis_va, llis_va[PL080_LLI_SRC], | ||
907 | llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], | ||
908 | llis_va[PL080_LLI_CCTL]); | ||
909 | llis_va += pl08x->lli_words; | ||
910 | } | ||
911 | } | ||
912 | } | ||
913 | #else | ||
914 | static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, | ||
915 | const u32 *llis_va, int num_llis) {} | ||
916 | #endif | ||
917 | |||
802 | /* | 918 | /* |
803 | * This fills in the table of LLIs for the transfer descriptor | 919 | * This fills in the table of LLIs for the transfer descriptor |
804 | * Note that we assume we never have to change the burst sizes | 920 | * Note that we assume we never have to change the burst sizes |
@@ -812,7 +928,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
812 | int num_llis = 0; | 928 | int num_llis = 0; |
813 | u32 cctl, early_bytes = 0; | 929 | u32 cctl, early_bytes = 0; |
814 | size_t max_bytes_per_lli, total_bytes; | 930 | size_t max_bytes_per_lli, total_bytes; |
815 | struct pl08x_lli *llis_va; | 931 | u32 *llis_va, *last_lli; |
816 | struct pl08x_sg *dsg; | 932 | struct pl08x_sg *dsg; |
817 | 933 | ||
818 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); | 934 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); |
@@ -902,7 +1018,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
902 | 1018 | ||
903 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, | 1019 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, |
904 | bd.dstbus.buswidth, 0); | 1020 | bd.dstbus.buswidth, 0); |
905 | pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); | 1021 | pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, |
1022 | 0, cctl, 0); | ||
906 | break; | 1023 | break; |
907 | } | 1024 | } |
908 | 1025 | ||
@@ -924,8 +1041,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
924 | dev_vdbg(&pl08x->adev->dev, | 1041 | dev_vdbg(&pl08x->adev->dev, |
925 | "%s byte width LLIs (remain 0x%08x)\n", | 1042 | "%s byte width LLIs (remain 0x%08x)\n", |
926 | __func__, bd.remainder); | 1043 | __func__, bd.remainder); |
927 | prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, | 1044 | prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, |
928 | &total_bytes); | 1045 | num_llis++, &total_bytes); |
929 | } | 1046 | } |
930 | 1047 | ||
931 | if (bd.remainder) { | 1048 | if (bd.remainder) { |
@@ -946,7 +1063,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
946 | * MIN(buswidths) | 1063 | * MIN(buswidths) |
947 | */ | 1064 | */ |
948 | max_bytes_per_lli = bd.srcbus.buswidth * | 1065 | max_bytes_per_lli = bd.srcbus.buswidth * |
949 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 1066 | pl08x->vd->max_transfer_size; |
950 | dev_vdbg(&pl08x->adev->dev, | 1067 | dev_vdbg(&pl08x->adev->dev, |
951 | "%s max bytes per lli = %zu\n", | 1068 | "%s max bytes per lli = %zu\n", |
952 | __func__, max_bytes_per_lli); | 1069 | __func__, max_bytes_per_lli); |
@@ -981,8 +1098,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
981 | 1098 | ||
982 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, | 1099 | cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, |
983 | bd.dstbus.buswidth, tsize); | 1100 | bd.dstbus.buswidth, tsize); |
984 | pl08x_fill_lli_for_desc(&bd, num_llis++, | 1101 | pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, |
985 | lli_len, cctl); | 1102 | lli_len, cctl, tsize); |
986 | total_bytes += lli_len; | 1103 | total_bytes += lli_len; |
987 | } | 1104 | } |
988 | 1105 | ||
@@ -993,8 +1110,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
993 | dev_vdbg(&pl08x->adev->dev, | 1110 | dev_vdbg(&pl08x->adev->dev, |
994 | "%s align with boundary, send odd bytes (remain %zu)\n", | 1111 | "%s align with boundary, send odd bytes (remain %zu)\n", |
995 | __func__, bd.remainder); | 1112 | __func__, bd.remainder); |
996 | prep_byte_width_lli(&bd, &cctl, bd.remainder, | 1113 | prep_byte_width_lli(pl08x, &bd, &cctl, |
997 | num_llis++, &total_bytes); | 1114 | bd.remainder, num_llis++, &total_bytes); |
998 | } | 1115 | } |
999 | } | 1116 | } |
1000 | 1117 | ||
@@ -1008,33 +1125,25 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
1008 | if (num_llis >= MAX_NUM_TSFR_LLIS) { | 1125 | if (num_llis >= MAX_NUM_TSFR_LLIS) { |
1009 | dev_err(&pl08x->adev->dev, | 1126 | dev_err(&pl08x->adev->dev, |
1010 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", | 1127 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", |
1011 | __func__, (u32) MAX_NUM_TSFR_LLIS); | 1128 | __func__, MAX_NUM_TSFR_LLIS); |
1012 | return 0; | 1129 | return 0; |
1013 | } | 1130 | } |
1014 | } | 1131 | } |
1015 | 1132 | ||
1016 | llis_va = txd->llis_va; | 1133 | llis_va = txd->llis_va; |
1017 | /* The final LLI terminates the LLI. */ | 1134 | last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; |
1018 | llis_va[num_llis - 1].lli = 0; | ||
1019 | /* The final LLI element shall also fire an interrupt. */ | ||
1020 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
1021 | |||
1022 | #ifdef VERBOSE_DEBUG | ||
1023 | { | ||
1024 | int i; | ||
1025 | 1135 | ||
1026 | dev_vdbg(&pl08x->adev->dev, | 1136 | if (txd->cyclic) { |
1027 | "%-3s %-9s %-10s %-10s %-10s %s\n", | 1137 | /* Link back to the first LLI. */ |
1028 | "lli", "", "csrc", "cdst", "clli", "cctl"); | 1138 | last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; |
1029 | for (i = 0; i < num_llis; i++) { | 1139 | } else { |
1030 | dev_vdbg(&pl08x->adev->dev, | 1140 | /* The final LLI terminates the LLI. */ |
1031 | "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", | 1141 | last_lli[PL080_LLI_LLI] = 0; |
1032 | i, &llis_va[i], llis_va[i].src, | 1142 | /* The final LLI element shall also fire an interrupt. */ |
1033 | llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl | 1143 | last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; |
1034 | ); | ||
1035 | } | ||
1036 | } | 1144 | } |
1037 | #endif | 1145 | |
1146 | pl08x_dump_lli(pl08x, llis_va, num_llis); | ||
1038 | 1147 | ||
1039 | return num_llis; | 1148 | return num_llis; |
1040 | } | 1149 | } |
@@ -1310,6 +1419,7 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1310 | struct dma_slave_config *config) | 1419 | struct dma_slave_config *config) |
1311 | { | 1420 | { |
1312 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1421 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1422 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1313 | 1423 | ||
1314 | if (!plchan->slave) | 1424 | if (!plchan->slave) |
1315 | return -EINVAL; | 1425 | return -EINVAL; |
@@ -1319,6 +1429,13 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1319 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 1429 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
1320 | return -EINVAL; | 1430 | return -EINVAL; |
1321 | 1431 | ||
1432 | if (config->device_fc && pl08x->vd->pl080s) { | ||
1433 | dev_err(&pl08x->adev->dev, | ||
1434 | "%s: PL080S does not support peripheral flow control\n", | ||
1435 | __func__); | ||
1436 | return -EINVAL; | ||
1437 | } | ||
1438 | |||
1322 | plchan->cfg = *config; | 1439 | plchan->cfg = *config; |
1323 | 1440 | ||
1324 | return 0; | 1441 | return 0; |
@@ -1409,25 +1526,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1409 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); | 1526 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); |
1410 | } | 1527 | } |
1411 | 1528 | ||
1412 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1529 | static struct pl08x_txd *pl08x_init_txd( |
1413 | struct dma_chan *chan, struct scatterlist *sgl, | 1530 | struct dma_chan *chan, |
1414 | unsigned int sg_len, enum dma_transfer_direction direction, | 1531 | enum dma_transfer_direction direction, |
1415 | unsigned long flags, void *context) | 1532 | dma_addr_t *slave_addr) |
1416 | { | 1533 | { |
1417 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1534 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1418 | struct pl08x_driver_data *pl08x = plchan->host; | 1535 | struct pl08x_driver_data *pl08x = plchan->host; |
1419 | struct pl08x_txd *txd; | 1536 | struct pl08x_txd *txd; |
1420 | struct pl08x_sg *dsg; | ||
1421 | struct scatterlist *sg; | ||
1422 | enum dma_slave_buswidth addr_width; | 1537 | enum dma_slave_buswidth addr_width; |
1423 | dma_addr_t slave_addr; | ||
1424 | int ret, tmp; | 1538 | int ret, tmp; |
1425 | u8 src_buses, dst_buses; | 1539 | u8 src_buses, dst_buses; |
1426 | u32 maxburst, cctl; | 1540 | u32 maxburst, cctl; |
1427 | 1541 | ||
1428 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | ||
1429 | __func__, sg_dma_len(sgl), plchan->name); | ||
1430 | |||
1431 | txd = pl08x_get_txd(plchan); | 1542 | txd = pl08x_get_txd(plchan); |
1432 | if (!txd) { | 1543 | if (!txd) { |
1433 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | 1544 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); |
@@ -1441,14 +1552,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1441 | */ | 1552 | */ |
1442 | if (direction == DMA_MEM_TO_DEV) { | 1553 | if (direction == DMA_MEM_TO_DEV) { |
1443 | cctl = PL080_CONTROL_SRC_INCR; | 1554 | cctl = PL080_CONTROL_SRC_INCR; |
1444 | slave_addr = plchan->cfg.dst_addr; | 1555 | *slave_addr = plchan->cfg.dst_addr; |
1445 | addr_width = plchan->cfg.dst_addr_width; | 1556 | addr_width = plchan->cfg.dst_addr_width; |
1446 | maxburst = plchan->cfg.dst_maxburst; | 1557 | maxburst = plchan->cfg.dst_maxburst; |
1447 | src_buses = pl08x->mem_buses; | 1558 | src_buses = pl08x->mem_buses; |
1448 | dst_buses = plchan->cd->periph_buses; | 1559 | dst_buses = plchan->cd->periph_buses; |
1449 | } else if (direction == DMA_DEV_TO_MEM) { | 1560 | } else if (direction == DMA_DEV_TO_MEM) { |
1450 | cctl = PL080_CONTROL_DST_INCR; | 1561 | cctl = PL080_CONTROL_DST_INCR; |
1451 | slave_addr = plchan->cfg.src_addr; | 1562 | *slave_addr = plchan->cfg.src_addr; |
1452 | addr_width = plchan->cfg.src_addr_width; | 1563 | addr_width = plchan->cfg.src_addr_width; |
1453 | maxburst = plchan->cfg.src_maxburst; | 1564 | maxburst = plchan->cfg.src_maxburst; |
1454 | src_buses = plchan->cd->periph_buses; | 1565 | src_buses = plchan->cd->periph_buses; |
@@ -1497,24 +1608,107 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1497 | else | 1608 | else |
1498 | txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; | 1609 | txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; |
1499 | 1610 | ||
1611 | return txd; | ||
1612 | } | ||
1613 | |||
1614 | static int pl08x_tx_add_sg(struct pl08x_txd *txd, | ||
1615 | enum dma_transfer_direction direction, | ||
1616 | dma_addr_t slave_addr, | ||
1617 | dma_addr_t buf_addr, | ||
1618 | unsigned int len) | ||
1619 | { | ||
1620 | struct pl08x_sg *dsg; | ||
1621 | |||
1622 | dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); | ||
1623 | if (!dsg) | ||
1624 | return -ENOMEM; | ||
1625 | |||
1626 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
1627 | |||
1628 | dsg->len = len; | ||
1629 | if (direction == DMA_MEM_TO_DEV) { | ||
1630 | dsg->src_addr = buf_addr; | ||
1631 | dsg->dst_addr = slave_addr; | ||
1632 | } else { | ||
1633 | dsg->src_addr = slave_addr; | ||
1634 | dsg->dst_addr = buf_addr; | ||
1635 | } | ||
1636 | |||
1637 | return 0; | ||
1638 | } | ||
1639 | |||
1640 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | ||
1641 | struct dma_chan *chan, struct scatterlist *sgl, | ||
1642 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
1643 | unsigned long flags, void *context) | ||
1644 | { | ||
1645 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1646 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1647 | struct pl08x_txd *txd; | ||
1648 | struct scatterlist *sg; | ||
1649 | int ret, tmp; | ||
1650 | dma_addr_t slave_addr; | ||
1651 | |||
1652 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | ||
1653 | __func__, sg_dma_len(sgl), plchan->name); | ||
1654 | |||
1655 | txd = pl08x_init_txd(chan, direction, &slave_addr); | ||
1656 | if (!txd) | ||
1657 | return NULL; | ||
1658 | |||
1500 | for_each_sg(sgl, sg, sg_len, tmp) { | 1659 | for_each_sg(sgl, sg, sg_len, tmp) { |
1501 | dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); | 1660 | ret = pl08x_tx_add_sg(txd, direction, slave_addr, |
1502 | if (!dsg) { | 1661 | sg_dma_address(sg), |
1662 | sg_dma_len(sg)); | ||
1663 | if (ret) { | ||
1503 | pl08x_release_mux(plchan); | 1664 | pl08x_release_mux(plchan); |
1504 | pl08x_free_txd(pl08x, txd); | 1665 | pl08x_free_txd(pl08x, txd); |
1505 | dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", | 1666 | dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", |
1506 | __func__); | 1667 | __func__); |
1507 | return NULL; | 1668 | return NULL; |
1508 | } | 1669 | } |
1509 | list_add_tail(&dsg->node, &txd->dsg_list); | 1670 | } |
1510 | 1671 | ||
1511 | dsg->len = sg_dma_len(sg); | 1672 | ret = pl08x_fill_llis_for_desc(plchan->host, txd); |
1512 | if (direction == DMA_MEM_TO_DEV) { | 1673 | if (!ret) { |
1513 | dsg->src_addr = sg_dma_address(sg); | 1674 | pl08x_release_mux(plchan); |
1514 | dsg->dst_addr = slave_addr; | 1675 | pl08x_free_txd(pl08x, txd); |
1515 | } else { | 1676 | return NULL; |
1516 | dsg->src_addr = slave_addr; | 1677 | } |
1517 | dsg->dst_addr = sg_dma_address(sg); | 1678 | |
1679 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); | ||
1680 | } | ||
1681 | |||
1682 | static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( | ||
1683 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
1684 | size_t period_len, enum dma_transfer_direction direction, | ||
1685 | unsigned long flags, void *context) | ||
1686 | { | ||
1687 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1688 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1689 | struct pl08x_txd *txd; | ||
1690 | int ret, tmp; | ||
1691 | dma_addr_t slave_addr; | ||
1692 | |||
1693 | dev_dbg(&pl08x->adev->dev, | ||
1694 | "%s prepare cyclic transaction of %d/%d bytes %s %s\n", | ||
1695 | __func__, period_len, buf_len, | ||
1696 | direction == DMA_MEM_TO_DEV ? "to" : "from", | ||
1697 | plchan->name); | ||
1698 | |||
1699 | txd = pl08x_init_txd(chan, direction, &slave_addr); | ||
1700 | if (!txd) | ||
1701 | return NULL; | ||
1702 | |||
1703 | txd->cyclic = true; | ||
1704 | txd->cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
1705 | for (tmp = 0; tmp < buf_len; tmp += period_len) { | ||
1706 | ret = pl08x_tx_add_sg(txd, direction, slave_addr, | ||
1707 | buf_addr + tmp, period_len); | ||
1708 | if (ret) { | ||
1709 | pl08x_release_mux(plchan); | ||
1710 | pl08x_free_txd(pl08x, txd); | ||
1711 | return NULL; | ||
1518 | } | 1712 | } |
1519 | } | 1713 | } |
1520 | 1714 | ||
@@ -1657,7 +1851,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1657 | 1851 | ||
1658 | spin_lock(&plchan->vc.lock); | 1852 | spin_lock(&plchan->vc.lock); |
1659 | tx = plchan->at; | 1853 | tx = plchan->at; |
1660 | if (tx) { | 1854 | if (tx && tx->cyclic) { |
1855 | vchan_cyclic_callback(&tx->vd); | ||
1856 | } else if (tx) { | ||
1661 | plchan->at = NULL; | 1857 | plchan->at = NULL; |
1662 | /* | 1858 | /* |
1663 | * This descriptor is done, release its mux | 1859 | * This descriptor is done, release its mux |
@@ -1851,6 +2047,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1851 | { | 2047 | { |
1852 | struct pl08x_driver_data *pl08x; | 2048 | struct pl08x_driver_data *pl08x; |
1853 | const struct vendor_data *vd = id->data; | 2049 | const struct vendor_data *vd = id->data; |
2050 | u32 tsfr_size; | ||
1854 | int ret = 0; | 2051 | int ret = 0; |
1855 | int i; | 2052 | int i; |
1856 | 2053 | ||
@@ -1878,6 +2075,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1878 | 2075 | ||
1879 | /* Initialize slave engine */ | 2076 | /* Initialize slave engine */ |
1880 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | 2077 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); |
2078 | dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); | ||
1881 | pl08x->slave.dev = &adev->dev; | 2079 | pl08x->slave.dev = &adev->dev; |
1882 | pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; | 2080 | pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; |
1883 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; | 2081 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; |
@@ -1885,6 +2083,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1885 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; | 2083 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; |
1886 | pl08x->slave.device_issue_pending = pl08x_issue_pending; | 2084 | pl08x->slave.device_issue_pending = pl08x_issue_pending; |
1887 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; | 2085 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; |
2086 | pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; | ||
1888 | pl08x->slave.device_control = pl08x_control; | 2087 | pl08x->slave.device_control = pl08x_control; |
1889 | 2088 | ||
1890 | /* Get the platform data */ | 2089 | /* Get the platform data */ |
@@ -1907,9 +2106,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1907 | pl08x->mem_buses = pl08x->pd->mem_buses; | 2106 | pl08x->mem_buses = pl08x->pd->mem_buses; |
1908 | } | 2107 | } |
1909 | 2108 | ||
2109 | if (vd->pl080s) | ||
2110 | pl08x->lli_words = PL080S_LLI_WORDS; | ||
2111 | else | ||
2112 | pl08x->lli_words = PL080_LLI_WORDS; | ||
2113 | tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); | ||
2114 | |||
1910 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | 2115 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ |
1911 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | 2116 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, |
1912 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | 2117 | tsfr_size, PL08X_ALIGN, 0); |
1913 | if (!pl08x->pool) { | 2118 | if (!pl08x->pool) { |
1914 | ret = -ENOMEM; | 2119 | ret = -ENOMEM; |
1915 | goto out_no_lli_pool; | 2120 | goto out_no_lli_pool; |
@@ -1952,6 +2157,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1952 | 2157 | ||
1953 | ch->id = i; | 2158 | ch->id = i; |
1954 | ch->base = pl08x->base + PL080_Cx_BASE(i); | 2159 | ch->base = pl08x->base + PL080_Cx_BASE(i); |
2160 | ch->reg_config = ch->base + vd->config_offset; | ||
1955 | spin_lock_init(&ch->lock); | 2161 | spin_lock_init(&ch->lock); |
1956 | 2162 | ||
1957 | /* | 2163 | /* |
@@ -1962,7 +2168,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1962 | if (vd->nomadik) { | 2168 | if (vd->nomadik) { |
1963 | u32 val; | 2169 | u32 val; |
1964 | 2170 | ||
1965 | val = readl(ch->base + PL080_CH_CONFIG); | 2171 | val = readl(ch->reg_config); |
1966 | if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { | 2172 | if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { |
1967 | dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); | 2173 | dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); |
1968 | ch->locked = true; | 2174 | ch->locked = true; |
@@ -2013,8 +2219,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2013 | 2219 | ||
2014 | amba_set_drvdata(adev, pl08x); | 2220 | amba_set_drvdata(adev, pl08x); |
2015 | init_pl08x_debugfs(pl08x); | 2221 | init_pl08x_debugfs(pl08x); |
2016 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", | 2222 | dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", |
2017 | amba_part(adev), amba_rev(adev), | 2223 | amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), |
2018 | (unsigned long long)adev->res.start, adev->irq[0]); | 2224 | (unsigned long long)adev->res.start, adev->irq[0]); |
2019 | 2225 | ||
2020 | return 0; | 2226 | return 0; |
@@ -2043,22 +2249,41 @@ out_no_pl08x: | |||
2043 | 2249 | ||
2044 | /* PL080 has 8 channels and the PL080 have just 2 */ | 2250 | /* PL080 has 8 channels and the PL080 have just 2 */ |
2045 | static struct vendor_data vendor_pl080 = { | 2251 | static struct vendor_data vendor_pl080 = { |
2252 | .config_offset = PL080_CH_CONFIG, | ||
2046 | .channels = 8, | 2253 | .channels = 8, |
2047 | .dualmaster = true, | 2254 | .dualmaster = true, |
2255 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | ||
2048 | }; | 2256 | }; |
2049 | 2257 | ||
2050 | static struct vendor_data vendor_nomadik = { | 2258 | static struct vendor_data vendor_nomadik = { |
2259 | .config_offset = PL080_CH_CONFIG, | ||
2051 | .channels = 8, | 2260 | .channels = 8, |
2052 | .dualmaster = true, | 2261 | .dualmaster = true, |
2053 | .nomadik = true, | 2262 | .nomadik = true, |
2263 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | ||
2264 | }; | ||
2265 | |||
2266 | static struct vendor_data vendor_pl080s = { | ||
2267 | .config_offset = PL080S_CH_CONFIG, | ||
2268 | .channels = 8, | ||
2269 | .pl080s = true, | ||
2270 | .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, | ||
2054 | }; | 2271 | }; |
2055 | 2272 | ||
2056 | static struct vendor_data vendor_pl081 = { | 2273 | static struct vendor_data vendor_pl081 = { |
2274 | .config_offset = PL080_CH_CONFIG, | ||
2057 | .channels = 2, | 2275 | .channels = 2, |
2058 | .dualmaster = false, | 2276 | .dualmaster = false, |
2277 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | ||
2059 | }; | 2278 | }; |
2060 | 2279 | ||
2061 | static struct amba_id pl08x_ids[] = { | 2280 | static struct amba_id pl08x_ids[] = { |
2281 | /* Samsung PL080S variant */ | ||
2282 | { | ||
2283 | .id = 0x0a141080, | ||
2284 | .mask = 0xffffffff, | ||
2285 | .data = &vendor_pl080s, | ||
2286 | }, | ||
2062 | /* PL080 */ | 2287 | /* PL080 */ |
2063 | { | 2288 | { |
2064 | .id = 0x00041080, | 2289 | .id = 0x00041080, |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index eee16b01fa89..9162ac80c18f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -509,7 +509,33 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |||
509 | } | 509 | } |
510 | 510 | ||
511 | /** | 511 | /** |
512 | * dma_request_channel - try to allocate an exclusive channel | 512 | * dma_request_slave_channel - try to get specific channel exclusively |
513 | * @chan: target channel | ||
514 | */ | ||
515 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | ||
516 | { | ||
517 | int err = -EBUSY; | ||
518 | |||
519 | /* lock against __dma_request_channel */ | ||
520 | mutex_lock(&dma_list_mutex); | ||
521 | |||
522 | if (chan->client_count == 0) { | ||
523 | err = dma_chan_get(chan); | ||
524 | if (err) | ||
525 | pr_debug("%s: failed to get %s: (%d)\n", | ||
526 | __func__, dma_chan_name(chan), err); | ||
527 | } else | ||
528 | chan = NULL; | ||
529 | |||
530 | mutex_unlock(&dma_list_mutex); | ||
531 | |||
532 | |||
533 | return chan; | ||
534 | } | ||
535 | EXPORT_SYMBOL_GPL(dma_get_slave_channel); | ||
536 | |||
537 | /** | ||
538 | * __dma_request_channel - try to allocate an exclusive channel | ||
513 | * @mask: capabilities that the channel must satisfy | 539 | * @mask: capabilities that the channel must satisfy |
514 | * @fn: optional callback to disposition available channels | 540 | * @fn: optional callback to disposition available channels |
515 | * @fn_param: opaque parameter to pass to dma_filter_fn | 541 | * @fn_param: opaque parameter to pass to dma_filter_fn |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index eea479c12173..89eb89f22284 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -37,16 +37,22 @@ | |||
37 | * which does not support descriptor writeback. | 37 | * which does not support descriptor writeback. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | static inline bool is_request_line_unset(struct dw_dma_chan *dwc) | ||
41 | { | ||
42 | return dwc->request_line == (typeof(dwc->request_line))~0; | ||
43 | } | ||
44 | |||
40 | static inline void dwc_set_masters(struct dw_dma_chan *dwc) | 45 | static inline void dwc_set_masters(struct dw_dma_chan *dwc) |
41 | { | 46 | { |
42 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 47 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
43 | struct dw_dma_slave *dws = dwc->chan.private; | 48 | struct dw_dma_slave *dws = dwc->chan.private; |
44 | unsigned char mmax = dw->nr_masters - 1; | 49 | unsigned char mmax = dw->nr_masters - 1; |
45 | 50 | ||
46 | if (dwc->request_line == ~0) { | 51 | if (!is_request_line_unset(dwc)) |
47 | dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); | 52 | return; |
48 | dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); | 53 | |
49 | } | 54 | dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); |
55 | dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); | ||
50 | } | 56 | } |
51 | 57 | ||
52 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 58 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
@@ -644,10 +650,13 @@ static void dw_dma_tasklet(unsigned long data) | |||
644 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | 650 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) |
645 | { | 651 | { |
646 | struct dw_dma *dw = dev_id; | 652 | struct dw_dma *dw = dev_id; |
647 | u32 status; | 653 | u32 status = dma_readl(dw, STATUS_INT); |
654 | |||
655 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); | ||
648 | 656 | ||
649 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, | 657 | /* Check if we have any interrupt from the DMAC */ |
650 | dma_readl(dw, STATUS_INT)); | 658 | if (!status) |
659 | return IRQ_NONE; | ||
651 | 660 | ||
652 | /* | 661 | /* |
653 | * Just disable the interrupts. We'll turn them back on in the | 662 | * Just disable the interrupts. We'll turn them back on in the |
@@ -984,7 +993,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
984 | dwc->direction = sconfig->direction; | 993 | dwc->direction = sconfig->direction; |
985 | 994 | ||
986 | /* Take the request line from slave_id member */ | 995 | /* Take the request line from slave_id member */ |
987 | if (dwc->request_line == ~0) | 996 | if (is_request_line_unset(dwc)) |
988 | dwc->request_line = sconfig->slave_id; | 997 | dwc->request_line = sconfig->slave_id; |
989 | 998 | ||
990 | convert_burst(&dwc->dma_sconfig.src_maxburst); | 999 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
@@ -1089,16 +1098,16 @@ dwc_tx_status(struct dma_chan *chan, | |||
1089 | enum dma_status ret; | 1098 | enum dma_status ret; |
1090 | 1099 | ||
1091 | ret = dma_cookie_status(chan, cookie, txstate); | 1100 | ret = dma_cookie_status(chan, cookie, txstate); |
1092 | if (ret != DMA_SUCCESS) { | 1101 | if (ret == DMA_SUCCESS) |
1093 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 1102 | return ret; |
1094 | 1103 | ||
1095 | ret = dma_cookie_status(chan, cookie, txstate); | 1104 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
1096 | } | ||
1097 | 1105 | ||
1106 | ret = dma_cookie_status(chan, cookie, txstate); | ||
1098 | if (ret != DMA_SUCCESS) | 1107 | if (ret != DMA_SUCCESS) |
1099 | dma_set_residue(txstate, dwc_get_residue(dwc)); | 1108 | dma_set_residue(txstate, dwc_get_residue(dwc)); |
1100 | 1109 | ||
1101 | if (dwc->paused) | 1110 | if (dwc->paused && ret == DMA_IN_PROGRESS) |
1102 | return DMA_PAUSED; | 1111 | return DMA_PAUSED; |
1103 | 1112 | ||
1104 | return ret; | 1113 | return ret; |
@@ -1560,8 +1569,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1560 | /* Disable BLOCK interrupts as well */ | 1569 | /* Disable BLOCK interrupts as well */ |
1561 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1570 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1562 | 1571 | ||
1563 | err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0, | 1572 | err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, |
1564 | "dw_dmac", dw); | 1573 | IRQF_SHARED, "dw_dmac", dw); |
1565 | if (err) | 1574 | if (err) |
1566 | return err; | 1575 | return err; |
1567 | 1576 | ||
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 6c9449cffae8..e35d97590311 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -253,6 +253,7 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = { | |||
253 | { "INTL9C60", 0 }, | 253 | { "INTL9C60", 0 }, |
254 | { } | 254 | { } |
255 | }; | 255 | }; |
256 | MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); | ||
256 | #endif | 257 | #endif |
257 | 258 | ||
258 | #ifdef CONFIG_PM_SLEEP | 259 | #ifdef CONFIG_PM_SLEEP |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 5f3e532436ee..ff50ff4c6a57 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -56,6 +56,7 @@ struct edma_desc { | |||
56 | struct list_head node; | 56 | struct list_head node; |
57 | int absync; | 57 | int absync; |
58 | int pset_nr; | 58 | int pset_nr; |
59 | int processed; | ||
59 | struct edmacc_param pset[0]; | 60 | struct edmacc_param pset[0]; |
60 | }; | 61 | }; |
61 | 62 | ||
@@ -69,6 +70,7 @@ struct edma_chan { | |||
69 | int ch_num; | 70 | int ch_num; |
70 | bool alloced; | 71 | bool alloced; |
71 | int slot[EDMA_MAX_SLOTS]; | 72 | int slot[EDMA_MAX_SLOTS]; |
73 | int missed; | ||
72 | struct dma_slave_config cfg; | 74 | struct dma_slave_config cfg; |
73 | }; | 75 | }; |
74 | 76 | ||
@@ -104,22 +106,34 @@ static void edma_desc_free(struct virt_dma_desc *vdesc) | |||
104 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | 106 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ |
105 | static void edma_execute(struct edma_chan *echan) | 107 | static void edma_execute(struct edma_chan *echan) |
106 | { | 108 | { |
107 | struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); | 109 | struct virt_dma_desc *vdesc; |
108 | struct edma_desc *edesc; | 110 | struct edma_desc *edesc; |
109 | int i; | 111 | struct device *dev = echan->vchan.chan.device->dev; |
110 | 112 | int i, j, left, nslots; | |
111 | if (!vdesc) { | 113 | |
112 | echan->edesc = NULL; | 114 | /* If either we processed all psets or we're still not started */ |
113 | return; | 115 | if (!echan->edesc || |
116 | echan->edesc->pset_nr == echan->edesc->processed) { | ||
117 | /* Get next vdesc */ | ||
118 | vdesc = vchan_next_desc(&echan->vchan); | ||
119 | if (!vdesc) { | ||
120 | echan->edesc = NULL; | ||
121 | return; | ||
122 | } | ||
123 | list_del(&vdesc->node); | ||
124 | echan->edesc = to_edma_desc(&vdesc->tx); | ||
114 | } | 125 | } |
115 | 126 | ||
116 | list_del(&vdesc->node); | 127 | edesc = echan->edesc; |
117 | 128 | ||
118 | echan->edesc = edesc = to_edma_desc(&vdesc->tx); | 129 | /* Find out how many left */ |
130 | left = edesc->pset_nr - edesc->processed; | ||
131 | nslots = min(MAX_NR_SG, left); | ||
119 | 132 | ||
120 | /* Write descriptor PaRAM set(s) */ | 133 | /* Write descriptor PaRAM set(s) */ |
121 | for (i = 0; i < edesc->pset_nr; i++) { | 134 | for (i = 0; i < nslots; i++) { |
122 | edma_write_slot(echan->slot[i], &edesc->pset[i]); | 135 | j = i + edesc->processed; |
136 | edma_write_slot(echan->slot[i], &edesc->pset[j]); | ||
123 | dev_dbg(echan->vchan.chan.device->dev, | 137 | dev_dbg(echan->vchan.chan.device->dev, |
124 | "\n pset[%d]:\n" | 138 | "\n pset[%d]:\n" |
125 | " chnum\t%d\n" | 139 | " chnum\t%d\n" |
@@ -132,24 +146,50 @@ static void edma_execute(struct edma_chan *echan) | |||
132 | " bidx\t%08x\n" | 146 | " bidx\t%08x\n" |
133 | " cidx\t%08x\n" | 147 | " cidx\t%08x\n" |
134 | " lkrld\t%08x\n", | 148 | " lkrld\t%08x\n", |
135 | i, echan->ch_num, echan->slot[i], | 149 | j, echan->ch_num, echan->slot[i], |
136 | edesc->pset[i].opt, | 150 | edesc->pset[j].opt, |
137 | edesc->pset[i].src, | 151 | edesc->pset[j].src, |
138 | edesc->pset[i].dst, | 152 | edesc->pset[j].dst, |
139 | edesc->pset[i].a_b_cnt, | 153 | edesc->pset[j].a_b_cnt, |
140 | edesc->pset[i].ccnt, | 154 | edesc->pset[j].ccnt, |
141 | edesc->pset[i].src_dst_bidx, | 155 | edesc->pset[j].src_dst_bidx, |
142 | edesc->pset[i].src_dst_cidx, | 156 | edesc->pset[j].src_dst_cidx, |
143 | edesc->pset[i].link_bcntrld); | 157 | edesc->pset[j].link_bcntrld); |
144 | /* Link to the previous slot if not the last set */ | 158 | /* Link to the previous slot if not the last set */ |
145 | if (i != (edesc->pset_nr - 1)) | 159 | if (i != (nslots - 1)) |
146 | edma_link(echan->slot[i], echan->slot[i+1]); | 160 | edma_link(echan->slot[i], echan->slot[i+1]); |
147 | /* Final pset links to the dummy pset */ | ||
148 | else | ||
149 | edma_link(echan->slot[i], echan->ecc->dummy_slot); | ||
150 | } | 161 | } |
151 | 162 | ||
152 | edma_start(echan->ch_num); | 163 | edesc->processed += nslots; |
164 | |||
165 | /* | ||
166 | * If this is either the last set in a set of SG-list transactions | ||
167 | * then setup a link to the dummy slot, this results in all future | ||
168 | * events being absorbed and that's OK because we're done | ||
169 | */ | ||
170 | if (edesc->processed == edesc->pset_nr) | ||
171 | edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); | ||
172 | |||
173 | edma_resume(echan->ch_num); | ||
174 | |||
175 | if (edesc->processed <= MAX_NR_SG) { | ||
176 | dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); | ||
177 | edma_start(echan->ch_num); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * This happens due to setup times between intermediate transfers | ||
182 | * in long SG lists which have to be broken up into transfers of | ||
183 | * MAX_NR_SG | ||
184 | */ | ||
185 | if (echan->missed) { | ||
186 | dev_dbg(dev, "missed event in execute detected\n"); | ||
187 | edma_clean_channel(echan->ch_num); | ||
188 | edma_stop(echan->ch_num); | ||
189 | edma_start(echan->ch_num); | ||
190 | edma_trigger_channel(echan->ch_num); | ||
191 | echan->missed = 0; | ||
192 | } | ||
153 | } | 193 | } |
154 | 194 | ||
155 | static int edma_terminate_all(struct edma_chan *echan) | 195 | static int edma_terminate_all(struct edma_chan *echan) |
@@ -222,9 +262,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
222 | enum dma_slave_buswidth dev_width; | 262 | enum dma_slave_buswidth dev_width; |
223 | u32 burst; | 263 | u32 burst; |
224 | struct scatterlist *sg; | 264 | struct scatterlist *sg; |
225 | int i; | ||
226 | int acnt, bcnt, ccnt, src, dst, cidx; | 265 | int acnt, bcnt, ccnt, src, dst, cidx; |
227 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | 266 | int src_bidx, dst_bidx, src_cidx, dst_cidx; |
267 | int i, nslots; | ||
228 | 268 | ||
229 | if (unlikely(!echan || !sgl || !sg_len)) | 269 | if (unlikely(!echan || !sgl || !sg_len)) |
230 | return NULL; | 270 | return NULL; |
@@ -247,12 +287,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
247 | return NULL; | 287 | return NULL; |
248 | } | 288 | } |
249 | 289 | ||
250 | if (sg_len > MAX_NR_SG) { | ||
251 | dev_err(dev, "Exceeded max SG segments %d > %d\n", | ||
252 | sg_len, MAX_NR_SG); | ||
253 | return NULL; | ||
254 | } | ||
255 | |||
256 | edesc = kzalloc(sizeof(*edesc) + sg_len * | 290 | edesc = kzalloc(sizeof(*edesc) + sg_len * |
257 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 291 | sizeof(edesc->pset[0]), GFP_ATOMIC); |
258 | if (!edesc) { | 292 | if (!edesc) { |
@@ -262,8 +296,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
262 | 296 | ||
263 | edesc->pset_nr = sg_len; | 297 | edesc->pset_nr = sg_len; |
264 | 298 | ||
265 | for_each_sg(sgl, sg, sg_len, i) { | 299 | /* Allocate a PaRAM slot, if needed */ |
266 | /* Allocate a PaRAM slot, if needed */ | 300 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); |
301 | |||
302 | for (i = 0; i < nslots; i++) { | ||
267 | if (echan->slot[i] < 0) { | 303 | if (echan->slot[i] < 0) { |
268 | echan->slot[i] = | 304 | echan->slot[i] = |
269 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | 305 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), |
@@ -273,6 +309,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
273 | return NULL; | 309 | return NULL; |
274 | } | 310 | } |
275 | } | 311 | } |
312 | } | ||
313 | |||
314 | /* Configure PaRAM sets for each SG */ | ||
315 | for_each_sg(sgl, sg, sg_len, i) { | ||
276 | 316 | ||
277 | acnt = dev_width; | 317 | acnt = dev_width; |
278 | 318 | ||
@@ -330,6 +370,12 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
330 | /* Configure A or AB synchronized transfers */ | 370 | /* Configure A or AB synchronized transfers */ |
331 | if (edesc->absync) | 371 | if (edesc->absync) |
332 | edesc->pset[i].opt |= SYNCDIM; | 372 | edesc->pset[i].opt |= SYNCDIM; |
373 | |||
374 | /* If this is the last in a current SG set of transactions, | ||
375 | enable interrupts so that next set is processed */ | ||
376 | if (!((i+1) % MAX_NR_SG)) | ||
377 | edesc->pset[i].opt |= TCINTEN; | ||
378 | |||
333 | /* If this is the last set, enable completion interrupt flag */ | 379 | /* If this is the last set, enable completion interrupt flag */ |
334 | if (i == sg_len - 1) | 380 | if (i == sg_len - 1) |
335 | edesc->pset[i].opt |= TCINTEN; | 381 | edesc->pset[i].opt |= TCINTEN; |
@@ -355,27 +401,65 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
355 | struct device *dev = echan->vchan.chan.device->dev; | 401 | struct device *dev = echan->vchan.chan.device->dev; |
356 | struct edma_desc *edesc; | 402 | struct edma_desc *edesc; |
357 | unsigned long flags; | 403 | unsigned long flags; |
404 | struct edmacc_param p; | ||
358 | 405 | ||
359 | /* Stop the channel */ | 406 | /* Pause the channel */ |
360 | edma_stop(echan->ch_num); | 407 | edma_pause(echan->ch_num); |
361 | 408 | ||
362 | switch (ch_status) { | 409 | switch (ch_status) { |
363 | case DMA_COMPLETE: | 410 | case DMA_COMPLETE: |
364 | dev_dbg(dev, "transfer complete on channel %d\n", ch_num); | ||
365 | |||
366 | spin_lock_irqsave(&echan->vchan.lock, flags); | 411 | spin_lock_irqsave(&echan->vchan.lock, flags); |
367 | 412 | ||
368 | edesc = echan->edesc; | 413 | edesc = echan->edesc; |
369 | if (edesc) { | 414 | if (edesc) { |
415 | if (edesc->processed == edesc->pset_nr) { | ||
416 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | ||
417 | edma_stop(echan->ch_num); | ||
418 | vchan_cookie_complete(&edesc->vdesc); | ||
419 | } else { | ||
420 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | ||
421 | } | ||
422 | |||
370 | edma_execute(echan); | 423 | edma_execute(echan); |
371 | vchan_cookie_complete(&edesc->vdesc); | ||
372 | } | 424 | } |
373 | 425 | ||
374 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 426 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
375 | 427 | ||
376 | break; | 428 | break; |
377 | case DMA_CC_ERROR: | 429 | case DMA_CC_ERROR: |
378 | dev_dbg(dev, "transfer error on channel %d\n", ch_num); | 430 | spin_lock_irqsave(&echan->vchan.lock, flags); |
431 | |||
432 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | ||
433 | |||
434 | /* | ||
435 | * Issue later based on missed flag which will be sure | ||
436 | * to happen as: | ||
437 | * (1) we finished transmitting an intermediate slot and | ||
438 | * edma_execute is coming up. | ||
439 | * (2) or we finished current transfer and issue will | ||
440 | * call edma_execute. | ||
441 | * | ||
442 | * Important note: issuing can be dangerous here and | ||
443 | * lead to some nasty recursion when we are in a NULL | ||
444 | * slot. So we avoid doing so and set the missed flag. | ||
445 | */ | ||
446 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | ||
447 | dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); | ||
448 | echan->missed = 1; | ||
449 | } else { | ||
450 | /* | ||
451 | * The slot is already programmed but the event got | ||
452 | * missed, so its safe to issue it here. | ||
453 | */ | ||
454 | dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); | ||
455 | edma_clean_channel(echan->ch_num); | ||
456 | edma_stop(echan->ch_num); | ||
457 | edma_start(echan->ch_num); | ||
458 | edma_trigger_channel(echan->ch_num); | ||
459 | } | ||
460 | |||
461 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
462 | |||
379 | break; | 463 | break; |
380 | default: | 464 | default: |
381 | break; | 465 | break; |
@@ -502,8 +586,6 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
502 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | 586 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { |
503 | struct edma_desc *edesc = echan->edesc; | 587 | struct edma_desc *edesc = echan->edesc; |
504 | txstate->residue = edma_desc_size(edesc); | 588 | txstate->residue = edma_desc_size(edesc); |
505 | } else { | ||
506 | txstate->residue = 0; | ||
507 | } | 589 | } |
508 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 590 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
509 | 591 | ||
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index f2bf8c0c4675..591cd8c63abb 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -1313,15 +1313,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, | |||
1313 | dma_cookie_t cookie, | 1313 | dma_cookie_t cookie, |
1314 | struct dma_tx_state *state) | 1314 | struct dma_tx_state *state) |
1315 | { | 1315 | { |
1316 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1316 | return dma_cookie_status(chan, cookie, state); |
1317 | enum dma_status ret; | ||
1318 | unsigned long flags; | ||
1319 | |||
1320 | spin_lock_irqsave(&edmac->lock, flags); | ||
1321 | ret = dma_cookie_status(chan, cookie, state); | ||
1322 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
1323 | |||
1324 | return ret; | ||
1325 | } | 1317 | } |
1326 | 1318 | ||
1327 | /** | 1319 | /** |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 49e8fbdb8983..b3f3e90054f2 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -979,15 +979,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
979 | dma_cookie_t cookie, | 979 | dma_cookie_t cookie, |
980 | struct dma_tx_state *txstate) | 980 | struct dma_tx_state *txstate) |
981 | { | 981 | { |
982 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 982 | return dma_cookie_status(dchan, cookie, txstate); |
983 | enum dma_status ret; | ||
984 | unsigned long flags; | ||
985 | |||
986 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
987 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
988 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
989 | |||
990 | return ret; | ||
991 | } | 983 | } |
992 | 984 | ||
993 | /*----------------------------------------------------------------------------*/ | 985 | /*----------------------------------------------------------------------------*/ |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index ff2aab973b45..78f8ca5fccee 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -805,10 +805,8 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) | |||
805 | } | 805 | } |
806 | INIT_LIST_HEAD(&imxdmac->ld_free); | 806 | INIT_LIST_HEAD(&imxdmac->ld_free); |
807 | 807 | ||
808 | if (imxdmac->sg_list) { | 808 | kfree(imxdmac->sg_list); |
809 | kfree(imxdmac->sg_list); | 809 | imxdmac->sg_list = NULL; |
810 | imxdmac->sg_list = NULL; | ||
811 | } | ||
812 | } | 810 | } |
813 | 811 | ||
814 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | 812 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 1e44b8cf95da..fc43603cf0bb 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -243,7 +243,6 @@ struct sdma_engine; | |||
243 | * @event_id1 for channels that use 2 events | 243 | * @event_id1 for channels that use 2 events |
244 | * @word_size peripheral access size | 244 | * @word_size peripheral access size |
245 | * @buf_tail ID of the buffer that was processed | 245 | * @buf_tail ID of the buffer that was processed |
246 | * @done channel completion | ||
247 | * @num_bd max NUM_BD. number of descriptors currently handling | 246 | * @num_bd max NUM_BD. number of descriptors currently handling |
248 | */ | 247 | */ |
249 | struct sdma_channel { | 248 | struct sdma_channel { |
@@ -255,7 +254,6 @@ struct sdma_channel { | |||
255 | unsigned int event_id1; | 254 | unsigned int event_id1; |
256 | enum dma_slave_buswidth word_size; | 255 | enum dma_slave_buswidth word_size; |
257 | unsigned int buf_tail; | 256 | unsigned int buf_tail; |
258 | struct completion done; | ||
259 | unsigned int num_bd; | 257 | unsigned int num_bd; |
260 | struct sdma_buffer_descriptor *bd; | 258 | struct sdma_buffer_descriptor *bd; |
261 | dma_addr_t bd_phys; | 259 | dma_addr_t bd_phys; |
@@ -307,9 +305,10 @@ struct sdma_firmware_header { | |||
307 | u32 ram_code_size; | 305 | u32 ram_code_size; |
308 | }; | 306 | }; |
309 | 307 | ||
310 | enum sdma_devtype { | 308 | struct sdma_driver_data { |
311 | IMX31_SDMA, /* runs on i.mx31 */ | 309 | int chnenbl0; |
312 | IMX35_SDMA, /* runs on i.mx35 and later */ | 310 | int num_events; |
311 | struct sdma_script_start_addrs *script_addrs; | ||
313 | }; | 312 | }; |
314 | 313 | ||
315 | struct sdma_engine { | 314 | struct sdma_engine { |
@@ -318,8 +317,6 @@ struct sdma_engine { | |||
318 | struct sdma_channel channel[MAX_DMA_CHANNELS]; | 317 | struct sdma_channel channel[MAX_DMA_CHANNELS]; |
319 | struct sdma_channel_control *channel_control; | 318 | struct sdma_channel_control *channel_control; |
320 | void __iomem *regs; | 319 | void __iomem *regs; |
321 | enum sdma_devtype devtype; | ||
322 | unsigned int num_events; | ||
323 | struct sdma_context_data *context; | 320 | struct sdma_context_data *context; |
324 | dma_addr_t context_phys; | 321 | dma_addr_t context_phys; |
325 | struct dma_device dma_device; | 322 | struct dma_device dma_device; |
@@ -327,15 +324,118 @@ struct sdma_engine { | |||
327 | struct clk *clk_ahb; | 324 | struct clk *clk_ahb; |
328 | spinlock_t channel_0_lock; | 325 | spinlock_t channel_0_lock; |
329 | struct sdma_script_start_addrs *script_addrs; | 326 | struct sdma_script_start_addrs *script_addrs; |
327 | const struct sdma_driver_data *drvdata; | ||
328 | }; | ||
329 | |||
330 | static struct sdma_driver_data sdma_imx31 = { | ||
331 | .chnenbl0 = SDMA_CHNENBL0_IMX31, | ||
332 | .num_events = 32, | ||
333 | }; | ||
334 | |||
335 | static struct sdma_script_start_addrs sdma_script_imx25 = { | ||
336 | .ap_2_ap_addr = 729, | ||
337 | .uart_2_mcu_addr = 904, | ||
338 | .per_2_app_addr = 1255, | ||
339 | .mcu_2_app_addr = 834, | ||
340 | .uartsh_2_mcu_addr = 1120, | ||
341 | .per_2_shp_addr = 1329, | ||
342 | .mcu_2_shp_addr = 1048, | ||
343 | .ata_2_mcu_addr = 1560, | ||
344 | .mcu_2_ata_addr = 1479, | ||
345 | .app_2_per_addr = 1189, | ||
346 | .app_2_mcu_addr = 770, | ||
347 | .shp_2_per_addr = 1407, | ||
348 | .shp_2_mcu_addr = 979, | ||
349 | }; | ||
350 | |||
351 | static struct sdma_driver_data sdma_imx25 = { | ||
352 | .chnenbl0 = SDMA_CHNENBL0_IMX35, | ||
353 | .num_events = 48, | ||
354 | .script_addrs = &sdma_script_imx25, | ||
355 | }; | ||
356 | |||
357 | static struct sdma_driver_data sdma_imx35 = { | ||
358 | .chnenbl0 = SDMA_CHNENBL0_IMX35, | ||
359 | .num_events = 48, | ||
360 | }; | ||
361 | |||
362 | static struct sdma_script_start_addrs sdma_script_imx51 = { | ||
363 | .ap_2_ap_addr = 642, | ||
364 | .uart_2_mcu_addr = 817, | ||
365 | .mcu_2_app_addr = 747, | ||
366 | .mcu_2_shp_addr = 961, | ||
367 | .ata_2_mcu_addr = 1473, | ||
368 | .mcu_2_ata_addr = 1392, | ||
369 | .app_2_per_addr = 1033, | ||
370 | .app_2_mcu_addr = 683, | ||
371 | .shp_2_per_addr = 1251, | ||
372 | .shp_2_mcu_addr = 892, | ||
373 | }; | ||
374 | |||
375 | static struct sdma_driver_data sdma_imx51 = { | ||
376 | .chnenbl0 = SDMA_CHNENBL0_IMX35, | ||
377 | .num_events = 48, | ||
378 | .script_addrs = &sdma_script_imx51, | ||
379 | }; | ||
380 | |||
381 | static struct sdma_script_start_addrs sdma_script_imx53 = { | ||
382 | .ap_2_ap_addr = 642, | ||
383 | .app_2_mcu_addr = 683, | ||
384 | .mcu_2_app_addr = 747, | ||
385 | .uart_2_mcu_addr = 817, | ||
386 | .shp_2_mcu_addr = 891, | ||
387 | .mcu_2_shp_addr = 960, | ||
388 | .uartsh_2_mcu_addr = 1032, | ||
389 | .spdif_2_mcu_addr = 1100, | ||
390 | .mcu_2_spdif_addr = 1134, | ||
391 | .firi_2_mcu_addr = 1193, | ||
392 | .mcu_2_firi_addr = 1290, | ||
393 | }; | ||
394 | |||
395 | static struct sdma_driver_data sdma_imx53 = { | ||
396 | .chnenbl0 = SDMA_CHNENBL0_IMX35, | ||
397 | .num_events = 48, | ||
398 | .script_addrs = &sdma_script_imx53, | ||
399 | }; | ||
400 | |||
401 | static struct sdma_script_start_addrs sdma_script_imx6q = { | ||
402 | .ap_2_ap_addr = 642, | ||
403 | .uart_2_mcu_addr = 817, | ||
404 | .mcu_2_app_addr = 747, | ||
405 | .per_2_per_addr = 6331, | ||
406 | .uartsh_2_mcu_addr = 1032, | ||
407 | .mcu_2_shp_addr = 960, | ||
408 | .app_2_mcu_addr = 683, | ||
409 | .shp_2_mcu_addr = 891, | ||
410 | .spdif_2_mcu_addr = 1100, | ||
411 | .mcu_2_spdif_addr = 1134, | ||
412 | }; | ||
413 | |||
414 | static struct sdma_driver_data sdma_imx6q = { | ||
415 | .chnenbl0 = SDMA_CHNENBL0_IMX35, | ||
416 | .num_events = 48, | ||
417 | .script_addrs = &sdma_script_imx6q, | ||
330 | }; | 418 | }; |
331 | 419 | ||
332 | static struct platform_device_id sdma_devtypes[] = { | 420 | static struct platform_device_id sdma_devtypes[] = { |
333 | { | 421 | { |
422 | .name = "imx25-sdma", | ||
423 | .driver_data = (unsigned long)&sdma_imx25, | ||
424 | }, { | ||
334 | .name = "imx31-sdma", | 425 | .name = "imx31-sdma", |
335 | .driver_data = IMX31_SDMA, | 426 | .driver_data = (unsigned long)&sdma_imx31, |
336 | }, { | 427 | }, { |
337 | .name = "imx35-sdma", | 428 | .name = "imx35-sdma", |
338 | .driver_data = IMX35_SDMA, | 429 | .driver_data = (unsigned long)&sdma_imx35, |
430 | }, { | ||
431 | .name = "imx51-sdma", | ||
432 | .driver_data = (unsigned long)&sdma_imx51, | ||
433 | }, { | ||
434 | .name = "imx53-sdma", | ||
435 | .driver_data = (unsigned long)&sdma_imx53, | ||
436 | }, { | ||
437 | .name = "imx6q-sdma", | ||
438 | .driver_data = (unsigned long)&sdma_imx6q, | ||
339 | }, { | 439 | }, { |
340 | /* sentinel */ | 440 | /* sentinel */ |
341 | } | 441 | } |
@@ -343,8 +443,11 @@ static struct platform_device_id sdma_devtypes[] = { | |||
343 | MODULE_DEVICE_TABLE(platform, sdma_devtypes); | 443 | MODULE_DEVICE_TABLE(platform, sdma_devtypes); |
344 | 444 | ||
345 | static const struct of_device_id sdma_dt_ids[] = { | 445 | static const struct of_device_id sdma_dt_ids[] = { |
346 | { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], }, | 446 | { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, }, |
347 | { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], }, | 447 | { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, }, |
448 | { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, | ||
449 | { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, | ||
450 | { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, | ||
348 | { /* sentinel */ } | 451 | { /* sentinel */ } |
349 | }; | 452 | }; |
350 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); | 453 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); |
@@ -356,8 +459,7 @@ MODULE_DEVICE_TABLE(of, sdma_dt_ids); | |||
356 | 459 | ||
357 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) | 460 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) |
358 | { | 461 | { |
359 | u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 : | 462 | u32 chnenbl0 = sdma->drvdata->chnenbl0; |
360 | SDMA_CHNENBL0_IMX35); | ||
361 | return chnenbl0 + event * 4; | 463 | return chnenbl0 + event * 4; |
362 | } | 464 | } |
363 | 465 | ||
@@ -547,8 +649,6 @@ static void sdma_tasklet(unsigned long data) | |||
547 | { | 649 | { |
548 | struct sdma_channel *sdmac = (struct sdma_channel *) data; | 650 | struct sdma_channel *sdmac = (struct sdma_channel *) data; |
549 | 651 | ||
550 | complete(&sdmac->done); | ||
551 | |||
552 | if (sdmac->flags & IMX_DMA_SG_LOOP) | 652 | if (sdmac->flags & IMX_DMA_SG_LOOP) |
553 | sdma_handle_channel_loop(sdmac); | 653 | sdma_handle_channel_loop(sdmac); |
554 | else | 654 | else |
@@ -733,7 +833,7 @@ static int sdma_config_channel(struct sdma_channel *sdmac) | |||
733 | sdmac->per_addr = 0; | 833 | sdmac->per_addr = 0; |
734 | 834 | ||
735 | if (sdmac->event_id0) { | 835 | if (sdmac->event_id0) { |
736 | if (sdmac->event_id0 >= sdmac->sdma->num_events) | 836 | if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) |
737 | return -EINVAL; | 837 | return -EINVAL; |
738 | sdma_event_enable(sdmac, sdmac->event_id0); | 838 | sdma_event_enable(sdmac, sdmac->event_id0); |
739 | } | 839 | } |
@@ -812,9 +912,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac) | |||
812 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | 912 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; |
813 | 913 | ||
814 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); | 914 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); |
815 | |||
816 | init_completion(&sdmac->done); | ||
817 | |||
818 | return 0; | 915 | return 0; |
819 | out: | 916 | out: |
820 | 917 | ||
@@ -1120,15 +1217,12 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1120 | } | 1217 | } |
1121 | 1218 | ||
1122 | static enum dma_status sdma_tx_status(struct dma_chan *chan, | 1219 | static enum dma_status sdma_tx_status(struct dma_chan *chan, |
1123 | dma_cookie_t cookie, | 1220 | dma_cookie_t cookie, |
1124 | struct dma_tx_state *txstate) | 1221 | struct dma_tx_state *txstate) |
1125 | { | 1222 | { |
1126 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1223 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1127 | dma_cookie_t last_used; | ||
1128 | |||
1129 | last_used = chan->cookie; | ||
1130 | 1224 | ||
1131 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, | 1225 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
1132 | sdmac->chn_count - sdmac->chn_real_count); | 1226 | sdmac->chn_count - sdmac->chn_real_count); |
1133 | 1227 | ||
1134 | return sdmac->status; | 1228 | return sdmac->status; |
@@ -1218,19 +1312,6 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1218 | int i, ret; | 1312 | int i, ret; |
1219 | dma_addr_t ccb_phys; | 1313 | dma_addr_t ccb_phys; |
1220 | 1314 | ||
1221 | switch (sdma->devtype) { | ||
1222 | case IMX31_SDMA: | ||
1223 | sdma->num_events = 32; | ||
1224 | break; | ||
1225 | case IMX35_SDMA: | ||
1226 | sdma->num_events = 48; | ||
1227 | break; | ||
1228 | default: | ||
1229 | dev_err(sdma->dev, "Unknown sdma type %d. aborting\n", | ||
1230 | sdma->devtype); | ||
1231 | return -ENODEV; | ||
1232 | } | ||
1233 | |||
1234 | clk_enable(sdma->clk_ipg); | 1315 | clk_enable(sdma->clk_ipg); |
1235 | clk_enable(sdma->clk_ahb); | 1316 | clk_enable(sdma->clk_ahb); |
1236 | 1317 | ||
@@ -1257,7 +1338,7 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1257 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); | 1338 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); |
1258 | 1339 | ||
1259 | /* disable all channels */ | 1340 | /* disable all channels */ |
1260 | for (i = 0; i < sdma->num_events; i++) | 1341 | for (i = 0; i < sdma->drvdata->num_events; i++) |
1261 | writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); | 1342 | writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); |
1262 | 1343 | ||
1263 | /* All channels have priority 0 */ | 1344 | /* All channels have priority 0 */ |
@@ -1335,10 +1416,21 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1335 | int ret; | 1416 | int ret; |
1336 | int irq; | 1417 | int irq; |
1337 | struct resource *iores; | 1418 | struct resource *iores; |
1338 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | 1419 | struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); |
1339 | int i; | 1420 | int i; |
1340 | struct sdma_engine *sdma; | 1421 | struct sdma_engine *sdma; |
1341 | s32 *saddr_arr; | 1422 | s32 *saddr_arr; |
1423 | const struct sdma_driver_data *drvdata = NULL; | ||
1424 | |||
1425 | if (of_id) | ||
1426 | drvdata = of_id->data; | ||
1427 | else if (pdev->id_entry) | ||
1428 | drvdata = (void *)pdev->id_entry->driver_data; | ||
1429 | |||
1430 | if (!drvdata) { | ||
1431 | dev_err(&pdev->dev, "unable to find driver data\n"); | ||
1432 | return -EINVAL; | ||
1433 | } | ||
1342 | 1434 | ||
1343 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | 1435 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); |
1344 | if (!sdma) | 1436 | if (!sdma) |
@@ -1347,6 +1439,7 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1347 | spin_lock_init(&sdma->channel_0_lock); | 1439 | spin_lock_init(&sdma->channel_0_lock); |
1348 | 1440 | ||
1349 | sdma->dev = &pdev->dev; | 1441 | sdma->dev = &pdev->dev; |
1442 | sdma->drvdata = drvdata; | ||
1350 | 1443 | ||
1351 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1444 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1352 | irq = platform_get_irq(pdev, 0); | 1445 | irq = platform_get_irq(pdev, 0); |
@@ -1396,10 +1489,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1396 | for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) | 1489 | for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) |
1397 | saddr_arr[i] = -EINVAL; | 1490 | saddr_arr[i] = -EINVAL; |
1398 | 1491 | ||
1399 | if (of_id) | ||
1400 | pdev->id_entry = of_id->data; | ||
1401 | sdma->devtype = pdev->id_entry->driver_data; | ||
1402 | |||
1403 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | 1492 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); |
1404 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | 1493 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); |
1405 | 1494 | ||
@@ -1431,6 +1520,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1431 | if (ret) | 1520 | if (ret) |
1432 | goto err_init; | 1521 | goto err_init; |
1433 | 1522 | ||
1523 | if (sdma->drvdata->script_addrs) | ||
1524 | sdma_add_scripts(sdma, sdma->drvdata->script_addrs); | ||
1434 | if (pdata && pdata->script_addrs) | 1525 | if (pdata && pdata->script_addrs) |
1435 | sdma_add_scripts(sdma, pdata->script_addrs); | 1526 | sdma_add_scripts(sdma, pdata->script_addrs); |
1436 | 1527 | ||
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index cc727ec78c4e..dd8b44a56e5d 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -518,7 +518,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | |||
518 | struct iop_adma_desc_slot *slot = NULL; | 518 | struct iop_adma_desc_slot *slot = NULL; |
519 | int init = iop_chan->slots_allocated ? 0 : 1; | 519 | int init = iop_chan->slots_allocated ? 0 : 1; |
520 | struct iop_adma_platform_data *plat_data = | 520 | struct iop_adma_platform_data *plat_data = |
521 | iop_chan->device->pdev->dev.platform_data; | 521 | dev_get_platdata(&iop_chan->device->pdev->dev); |
522 | int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; | 522 | int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; |
523 | 523 | ||
524 | /* Allocate descriptor slots */ | 524 | /* Allocate descriptor slots */ |
@@ -1351,7 +1351,7 @@ static int iop_adma_remove(struct platform_device *dev) | |||
1351 | struct iop_adma_device *device = platform_get_drvdata(dev); | 1351 | struct iop_adma_device *device = platform_get_drvdata(dev); |
1352 | struct dma_chan *chan, *_chan; | 1352 | struct dma_chan *chan, *_chan; |
1353 | struct iop_adma_chan *iop_chan; | 1353 | struct iop_adma_chan *iop_chan; |
1354 | struct iop_adma_platform_data *plat_data = dev->dev.platform_data; | 1354 | struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev); |
1355 | 1355 | ||
1356 | dma_async_device_unregister(&device->common); | 1356 | dma_async_device_unregister(&device->common); |
1357 | 1357 | ||
@@ -1376,7 +1376,7 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1376 | struct iop_adma_device *adev; | 1376 | struct iop_adma_device *adev; |
1377 | struct iop_adma_chan *iop_chan; | 1377 | struct iop_adma_chan *iop_chan; |
1378 | struct dma_device *dma_dev; | 1378 | struct dma_device *dma_dev; |
1379 | struct iop_adma_platform_data *plat_data = pdev->dev.platform_data; | 1379 | struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev); |
1380 | 1380 | ||
1381 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1381 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1382 | if (!res) | 1382 | if (!res) |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index d39c2cd0795d..cb9c0bc317e8 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1593,10 +1593,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan) | |||
1593 | static enum dma_status idmac_tx_status(struct dma_chan *chan, | 1593 | static enum dma_status idmac_tx_status(struct dma_chan *chan, |
1594 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 1594 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
1595 | { | 1595 | { |
1596 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); | 1596 | return dma_cookie_status(chan, cookie, txstate); |
1597 | if (cookie != chan->cookie) | ||
1598 | return DMA_ERROR; | ||
1599 | return DMA_SUCCESS; | ||
1600 | } | 1597 | } |
1601 | 1598 | ||
1602 | static int __init ipu_idmac_init(struct ipu *ipu) | 1599 | static int __init ipu_idmac_init(struct ipu *ipu) |
@@ -1767,7 +1764,6 @@ static int ipu_remove(struct platform_device *pdev) | |||
1767 | iounmap(ipu->reg_ic); | 1764 | iounmap(ipu->reg_ic); |
1768 | iounmap(ipu->reg_ipu); | 1765 | iounmap(ipu->reg_ipu); |
1769 | tasklet_kill(&ipu->tasklet); | 1766 | tasklet_kill(&ipu->tasklet); |
1770 | platform_set_drvdata(pdev, NULL); | ||
1771 | 1767 | ||
1772 | return 0; | 1768 | return 0; |
1773 | } | 1769 | } |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c new file mode 100644 index 000000000000..a2c330f5f952 --- /dev/null +++ b/drivers/dma/k3dma.c | |||
@@ -0,0 +1,837 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013 Linaro Ltd. | ||
3 | * Copyright (c) 2013 Hisilicon Limited. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/dmaengine.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/of_device.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/of_dma.h> | ||
23 | |||
24 | #include "virt-dma.h" | ||
25 | |||
26 | #define DRIVER_NAME "k3-dma" | ||
27 | #define DMA_ALIGN 3 | ||
28 | #define DMA_MAX_SIZE 0x1ffc | ||
29 | |||
30 | #define INT_STAT 0x00 | ||
31 | #define INT_TC1 0x04 | ||
32 | #define INT_ERR1 0x0c | ||
33 | #define INT_ERR2 0x10 | ||
34 | #define INT_TC1_MASK 0x18 | ||
35 | #define INT_ERR1_MASK 0x20 | ||
36 | #define INT_ERR2_MASK 0x24 | ||
37 | #define INT_TC1_RAW 0x600 | ||
38 | #define INT_ERR1_RAW 0x608 | ||
39 | #define INT_ERR2_RAW 0x610 | ||
40 | #define CH_PRI 0x688 | ||
41 | #define CH_STAT 0x690 | ||
42 | #define CX_CUR_CNT 0x704 | ||
43 | #define CX_LLI 0x800 | ||
44 | #define CX_CNT 0x810 | ||
45 | #define CX_SRC 0x814 | ||
46 | #define CX_DST 0x818 | ||
47 | #define CX_CFG 0x81c | ||
48 | #define AXI_CFG 0x820 | ||
49 | #define AXI_CFG_DEFAULT 0x201201 | ||
50 | |||
51 | #define CX_LLI_CHAIN_EN 0x2 | ||
52 | #define CX_CFG_EN 0x1 | ||
53 | #define CX_CFG_MEM2PER (0x1 << 2) | ||
54 | #define CX_CFG_PER2MEM (0x2 << 2) | ||
55 | #define CX_CFG_SRCINCR (0x1 << 31) | ||
56 | #define CX_CFG_DSTINCR (0x1 << 30) | ||
57 | |||
58 | struct k3_desc_hw { | ||
59 | u32 lli; | ||
60 | u32 reserved[3]; | ||
61 | u32 count; | ||
62 | u32 saddr; | ||
63 | u32 daddr; | ||
64 | u32 config; | ||
65 | } __aligned(32); | ||
66 | |||
67 | struct k3_dma_desc_sw { | ||
68 | struct virt_dma_desc vd; | ||
69 | dma_addr_t desc_hw_lli; | ||
70 | size_t desc_num; | ||
71 | size_t size; | ||
72 | struct k3_desc_hw desc_hw[0]; | ||
73 | }; | ||
74 | |||
75 | struct k3_dma_phy; | ||
76 | |||
77 | struct k3_dma_chan { | ||
78 | u32 ccfg; | ||
79 | struct virt_dma_chan vc; | ||
80 | struct k3_dma_phy *phy; | ||
81 | struct list_head node; | ||
82 | enum dma_transfer_direction dir; | ||
83 | dma_addr_t dev_addr; | ||
84 | enum dma_status status; | ||
85 | }; | ||
86 | |||
87 | struct k3_dma_phy { | ||
88 | u32 idx; | ||
89 | void __iomem *base; | ||
90 | struct k3_dma_chan *vchan; | ||
91 | struct k3_dma_desc_sw *ds_run; | ||
92 | struct k3_dma_desc_sw *ds_done; | ||
93 | }; | ||
94 | |||
95 | struct k3_dma_dev { | ||
96 | struct dma_device slave; | ||
97 | void __iomem *base; | ||
98 | struct tasklet_struct task; | ||
99 | spinlock_t lock; | ||
100 | struct list_head chan_pending; | ||
101 | struct k3_dma_phy *phy; | ||
102 | struct k3_dma_chan *chans; | ||
103 | struct clk *clk; | ||
104 | u32 dma_channels; | ||
105 | u32 dma_requests; | ||
106 | }; | ||
107 | |||
108 | #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) | ||
109 | |||
110 | static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) | ||
111 | { | ||
112 | return container_of(chan, struct k3_dma_chan, vc.chan); | ||
113 | } | ||
114 | |||
115 | static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) | ||
116 | { | ||
117 | u32 val = 0; | ||
118 | |||
119 | if (on) { | ||
120 | val = readl_relaxed(phy->base + CX_CFG); | ||
121 | val |= CX_CFG_EN; | ||
122 | writel_relaxed(val, phy->base + CX_CFG); | ||
123 | } else { | ||
124 | val = readl_relaxed(phy->base + CX_CFG); | ||
125 | val &= ~CX_CFG_EN; | ||
126 | writel_relaxed(val, phy->base + CX_CFG); | ||
127 | } | ||
128 | } | ||
129 | |||
130 | static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) | ||
131 | { | ||
132 | u32 val = 0; | ||
133 | |||
134 | k3_dma_pause_dma(phy, false); | ||
135 | |||
136 | val = 0x1 << phy->idx; | ||
137 | writel_relaxed(val, d->base + INT_TC1_RAW); | ||
138 | writel_relaxed(val, d->base + INT_ERR1_RAW); | ||
139 | writel_relaxed(val, d->base + INT_ERR2_RAW); | ||
140 | } | ||
141 | |||
142 | static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) | ||
143 | { | ||
144 | writel_relaxed(hw->lli, phy->base + CX_LLI); | ||
145 | writel_relaxed(hw->count, phy->base + CX_CNT); | ||
146 | writel_relaxed(hw->saddr, phy->base + CX_SRC); | ||
147 | writel_relaxed(hw->daddr, phy->base + CX_DST); | ||
148 | writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); | ||
149 | writel_relaxed(hw->config, phy->base + CX_CFG); | ||
150 | } | ||
151 | |||
152 | static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) | ||
153 | { | ||
154 | u32 cnt = 0; | ||
155 | |||
156 | cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); | ||
157 | cnt &= 0xffff; | ||
158 | return cnt; | ||
159 | } | ||
160 | |||
161 | static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) | ||
162 | { | ||
163 | return readl_relaxed(phy->base + CX_LLI); | ||
164 | } | ||
165 | |||
166 | static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) | ||
167 | { | ||
168 | return readl_relaxed(d->base + CH_STAT); | ||
169 | } | ||
170 | |||
171 | static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) | ||
172 | { | ||
173 | if (on) { | ||
174 | /* set same priority */ | ||
175 | writel_relaxed(0x0, d->base + CH_PRI); | ||
176 | |||
177 | /* unmask irq */ | ||
178 | writel_relaxed(0xffff, d->base + INT_TC1_MASK); | ||
179 | writel_relaxed(0xffff, d->base + INT_ERR1_MASK); | ||
180 | writel_relaxed(0xffff, d->base + INT_ERR2_MASK); | ||
181 | } else { | ||
182 | /* mask irq */ | ||
183 | writel_relaxed(0x0, d->base + INT_TC1_MASK); | ||
184 | writel_relaxed(0x0, d->base + INT_ERR1_MASK); | ||
185 | writel_relaxed(0x0, d->base + INT_ERR2_MASK); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) | ||
190 | { | ||
191 | struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; | ||
192 | struct k3_dma_phy *p; | ||
193 | struct k3_dma_chan *c; | ||
194 | u32 stat = readl_relaxed(d->base + INT_STAT); | ||
195 | u32 tc1 = readl_relaxed(d->base + INT_TC1); | ||
196 | u32 err1 = readl_relaxed(d->base + INT_ERR1); | ||
197 | u32 err2 = readl_relaxed(d->base + INT_ERR2); | ||
198 | u32 i, irq_chan = 0; | ||
199 | |||
200 | while (stat) { | ||
201 | i = __ffs(stat); | ||
202 | stat &= (stat - 1); | ||
203 | if (likely(tc1 & BIT(i))) { | ||
204 | p = &d->phy[i]; | ||
205 | c = p->vchan; | ||
206 | if (c) { | ||
207 | unsigned long flags; | ||
208 | |||
209 | spin_lock_irqsave(&c->vc.lock, flags); | ||
210 | vchan_cookie_complete(&p->ds_run->vd); | ||
211 | p->ds_done = p->ds_run; | ||
212 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
213 | } | ||
214 | irq_chan |= BIT(i); | ||
215 | } | ||
216 | if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) | ||
217 | dev_warn(d->slave.dev, "DMA ERR\n"); | ||
218 | } | ||
219 | |||
220 | writel_relaxed(irq_chan, d->base + INT_TC1_RAW); | ||
221 | writel_relaxed(err1, d->base + INT_ERR1_RAW); | ||
222 | writel_relaxed(err2, d->base + INT_ERR2_RAW); | ||
223 | |||
224 | if (irq_chan) { | ||
225 | tasklet_schedule(&d->task); | ||
226 | return IRQ_HANDLED; | ||
227 | } else | ||
228 | return IRQ_NONE; | ||
229 | } | ||
230 | |||
231 | static int k3_dma_start_txd(struct k3_dma_chan *c) | ||
232 | { | ||
233 | struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); | ||
234 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | ||
235 | |||
236 | if (!c->phy) | ||
237 | return -EAGAIN; | ||
238 | |||
239 | if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) | ||
240 | return -EAGAIN; | ||
241 | |||
242 | if (vd) { | ||
243 | struct k3_dma_desc_sw *ds = | ||
244 | container_of(vd, struct k3_dma_desc_sw, vd); | ||
245 | /* | ||
246 | * fetch and remove request from vc->desc_issued | ||
247 | * so vc->desc_issued only contains desc pending | ||
248 | */ | ||
249 | list_del(&ds->vd.node); | ||
250 | c->phy->ds_run = ds; | ||
251 | c->phy->ds_done = NULL; | ||
252 | /* start dma */ | ||
253 | k3_dma_set_desc(c->phy, &ds->desc_hw[0]); | ||
254 | return 0; | ||
255 | } | ||
256 | c->phy->ds_done = NULL; | ||
257 | c->phy->ds_run = NULL; | ||
258 | return -EAGAIN; | ||
259 | } | ||
260 | |||
261 | static void k3_dma_tasklet(unsigned long arg) | ||
262 | { | ||
263 | struct k3_dma_dev *d = (struct k3_dma_dev *)arg; | ||
264 | struct k3_dma_phy *p; | ||
265 | struct k3_dma_chan *c, *cn; | ||
266 | unsigned pch, pch_alloc = 0; | ||
267 | |||
268 | /* check new dma request of running channel in vc->desc_issued */ | ||
269 | list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { | ||
270 | spin_lock_irq(&c->vc.lock); | ||
271 | p = c->phy; | ||
272 | if (p && p->ds_done) { | ||
273 | if (k3_dma_start_txd(c)) { | ||
274 | /* No current txd associated with this channel */ | ||
275 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); | ||
276 | /* Mark this channel free */ | ||
277 | c->phy = NULL; | ||
278 | p->vchan = NULL; | ||
279 | } | ||
280 | } | ||
281 | spin_unlock_irq(&c->vc.lock); | ||
282 | } | ||
283 | |||
284 | /* check new channel request in d->chan_pending */ | ||
285 | spin_lock_irq(&d->lock); | ||
286 | for (pch = 0; pch < d->dma_channels; pch++) { | ||
287 | p = &d->phy[pch]; | ||
288 | |||
289 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { | ||
290 | c = list_first_entry(&d->chan_pending, | ||
291 | struct k3_dma_chan, node); | ||
292 | /* remove from d->chan_pending */ | ||
293 | list_del_init(&c->node); | ||
294 | pch_alloc |= 1 << pch; | ||
295 | /* Mark this channel allocated */ | ||
296 | p->vchan = c; | ||
297 | c->phy = p; | ||
298 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); | ||
299 | } | ||
300 | } | ||
301 | spin_unlock_irq(&d->lock); | ||
302 | |||
303 | for (pch = 0; pch < d->dma_channels; pch++) { | ||
304 | if (pch_alloc & (1 << pch)) { | ||
305 | p = &d->phy[pch]; | ||
306 | c = p->vchan; | ||
307 | if (c) { | ||
308 | spin_lock_irq(&c->vc.lock); | ||
309 | k3_dma_start_txd(c); | ||
310 | spin_unlock_irq(&c->vc.lock); | ||
311 | } | ||
312 | } | ||
313 | } | ||
314 | } | ||
315 | |||
316 | static int k3_dma_alloc_chan_resources(struct dma_chan *chan) | ||
317 | { | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static void k3_dma_free_chan_resources(struct dma_chan *chan) | ||
322 | { | ||
323 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
324 | struct k3_dma_dev *d = to_k3_dma(chan->device); | ||
325 | unsigned long flags; | ||
326 | |||
327 | spin_lock_irqsave(&d->lock, flags); | ||
328 | list_del_init(&c->node); | ||
329 | spin_unlock_irqrestore(&d->lock, flags); | ||
330 | |||
331 | vchan_free_chan_resources(&c->vc); | ||
332 | c->ccfg = 0; | ||
333 | } | ||
334 | |||
335 | static enum dma_status k3_dma_tx_status(struct dma_chan *chan, | ||
336 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
337 | { | ||
338 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
339 | struct k3_dma_dev *d = to_k3_dma(chan->device); | ||
340 | struct k3_dma_phy *p; | ||
341 | struct virt_dma_desc *vd; | ||
342 | unsigned long flags; | ||
343 | enum dma_status ret; | ||
344 | size_t bytes = 0; | ||
345 | |||
346 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | ||
347 | if (ret == DMA_SUCCESS) | ||
348 | return ret; | ||
349 | |||
350 | spin_lock_irqsave(&c->vc.lock, flags); | ||
351 | p = c->phy; | ||
352 | ret = c->status; | ||
353 | |||
354 | /* | ||
355 | * If the cookie is on our issue queue, then the residue is | ||
356 | * its total size. | ||
357 | */ | ||
358 | vd = vchan_find_desc(&c->vc, cookie); | ||
359 | if (vd) { | ||
360 | bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; | ||
361 | } else if ((!p) || (!p->ds_run)) { | ||
362 | bytes = 0; | ||
363 | } else { | ||
364 | struct k3_dma_desc_sw *ds = p->ds_run; | ||
365 | u32 clli = 0, index = 0; | ||
366 | |||
367 | bytes = k3_dma_get_curr_cnt(d, p); | ||
368 | clli = k3_dma_get_curr_lli(p); | ||
369 | index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw); | ||
370 | for (; index < ds->desc_num; index++) { | ||
371 | bytes += ds->desc_hw[index].count; | ||
372 | /* end of lli */ | ||
373 | if (!ds->desc_hw[index].lli) | ||
374 | break; | ||
375 | } | ||
376 | } | ||
377 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
378 | dma_set_residue(state, bytes); | ||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static void k3_dma_issue_pending(struct dma_chan *chan) | ||
383 | { | ||
384 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
385 | struct k3_dma_dev *d = to_k3_dma(chan->device); | ||
386 | unsigned long flags; | ||
387 | |||
388 | spin_lock_irqsave(&c->vc.lock, flags); | ||
389 | /* add request to vc->desc_issued */ | ||
390 | if (vchan_issue_pending(&c->vc)) { | ||
391 | spin_lock(&d->lock); | ||
392 | if (!c->phy) { | ||
393 | if (list_empty(&c->node)) { | ||
394 | /* if new channel, add chan_pending */ | ||
395 | list_add_tail(&c->node, &d->chan_pending); | ||
396 | /* check in tasklet */ | ||
397 | tasklet_schedule(&d->task); | ||
398 | dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); | ||
399 | } | ||
400 | } | ||
401 | spin_unlock(&d->lock); | ||
402 | } else | ||
403 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); | ||
404 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
405 | } | ||
406 | |||
407 | static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, | ||
408 | dma_addr_t src, size_t len, u32 num, u32 ccfg) | ||
409 | { | ||
410 | if ((num + 1) < ds->desc_num) | ||
411 | ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * | ||
412 | sizeof(struct k3_desc_hw); | ||
413 | ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; | ||
414 | ds->desc_hw[num].count = len; | ||
415 | ds->desc_hw[num].saddr = src; | ||
416 | ds->desc_hw[num].daddr = dst; | ||
417 | ds->desc_hw[num].config = ccfg; | ||
418 | } | ||
419 | |||
420 | static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( | ||
421 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | ||
422 | size_t len, unsigned long flags) | ||
423 | { | ||
424 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
425 | struct k3_dma_desc_sw *ds; | ||
426 | size_t copy = 0; | ||
427 | int num = 0; | ||
428 | |||
429 | if (!len) | ||
430 | return NULL; | ||
431 | |||
432 | num = DIV_ROUND_UP(len, DMA_MAX_SIZE); | ||
433 | ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); | ||
434 | if (!ds) { | ||
435 | dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); | ||
436 | return NULL; | ||
437 | } | ||
438 | ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); | ||
439 | ds->size = len; | ||
440 | ds->desc_num = num; | ||
441 | num = 0; | ||
442 | |||
443 | if (!c->ccfg) { | ||
444 | /* default is memtomem, without calling device_control */ | ||
445 | c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; | ||
446 | c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ | ||
447 | c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ | ||
448 | } | ||
449 | |||
450 | do { | ||
451 | copy = min_t(size_t, len, DMA_MAX_SIZE); | ||
452 | k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); | ||
453 | |||
454 | if (c->dir == DMA_MEM_TO_DEV) { | ||
455 | src += copy; | ||
456 | } else if (c->dir == DMA_DEV_TO_MEM) { | ||
457 | dst += copy; | ||
458 | } else { | ||
459 | src += copy; | ||
460 | dst += copy; | ||
461 | } | ||
462 | len -= copy; | ||
463 | } while (len); | ||
464 | |||
465 | ds->desc_hw[num-1].lli = 0; /* end of link */ | ||
466 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | ||
467 | } | ||
468 | |||
469 | static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( | ||
470 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, | ||
471 | enum dma_transfer_direction dir, unsigned long flags, void *context) | ||
472 | { | ||
473 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
474 | struct k3_dma_desc_sw *ds; | ||
475 | size_t len, avail, total = 0; | ||
476 | struct scatterlist *sg; | ||
477 | dma_addr_t addr, src = 0, dst = 0; | ||
478 | int num = sglen, i; | ||
479 | |||
480 | if (sgl == 0) | ||
481 | return NULL; | ||
482 | |||
483 | for_each_sg(sgl, sg, sglen, i) { | ||
484 | avail = sg_dma_len(sg); | ||
485 | if (avail > DMA_MAX_SIZE) | ||
486 | num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; | ||
487 | } | ||
488 | |||
489 | ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); | ||
490 | if (!ds) { | ||
491 | dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); | ||
492 | return NULL; | ||
493 | } | ||
494 | ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); | ||
495 | ds->desc_num = num; | ||
496 | num = 0; | ||
497 | |||
498 | for_each_sg(sgl, sg, sglen, i) { | ||
499 | addr = sg_dma_address(sg); | ||
500 | avail = sg_dma_len(sg); | ||
501 | total += avail; | ||
502 | |||
503 | do { | ||
504 | len = min_t(size_t, avail, DMA_MAX_SIZE); | ||
505 | |||
506 | if (dir == DMA_MEM_TO_DEV) { | ||
507 | src = addr; | ||
508 | dst = c->dev_addr; | ||
509 | } else if (dir == DMA_DEV_TO_MEM) { | ||
510 | src = c->dev_addr; | ||
511 | dst = addr; | ||
512 | } | ||
513 | |||
514 | k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); | ||
515 | |||
516 | addr += len; | ||
517 | avail -= len; | ||
518 | } while (avail); | ||
519 | } | ||
520 | |||
521 | ds->desc_hw[num-1].lli = 0; /* end of link */ | ||
522 | ds->size = total; | ||
523 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | ||
524 | } | ||
525 | |||
526 | static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
527 | unsigned long arg) | ||
528 | { | ||
529 | struct k3_dma_chan *c = to_k3_chan(chan); | ||
530 | struct k3_dma_dev *d = to_k3_dma(chan->device); | ||
531 | struct dma_slave_config *cfg = (void *)arg; | ||
532 | struct k3_dma_phy *p = c->phy; | ||
533 | unsigned long flags; | ||
534 | u32 maxburst = 0, val = 0; | ||
535 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
536 | LIST_HEAD(head); | ||
537 | |||
538 | switch (cmd) { | ||
539 | case DMA_SLAVE_CONFIG: | ||
540 | if (cfg == NULL) | ||
541 | return -EINVAL; | ||
542 | c->dir = cfg->direction; | ||
543 | if (c->dir == DMA_DEV_TO_MEM) { | ||
544 | c->ccfg = CX_CFG_DSTINCR; | ||
545 | c->dev_addr = cfg->src_addr; | ||
546 | maxburst = cfg->src_maxburst; | ||
547 | width = cfg->src_addr_width; | ||
548 | } else if (c->dir == DMA_MEM_TO_DEV) { | ||
549 | c->ccfg = CX_CFG_SRCINCR; | ||
550 | c->dev_addr = cfg->dst_addr; | ||
551 | maxburst = cfg->dst_maxburst; | ||
552 | width = cfg->dst_addr_width; | ||
553 | } | ||
554 | switch (width) { | ||
555 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
556 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
557 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
558 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
559 | val = __ffs(width); | ||
560 | break; | ||
561 | default: | ||
562 | val = 3; | ||
563 | break; | ||
564 | } | ||
565 | c->ccfg |= (val << 12) | (val << 16); | ||
566 | |||
567 | if ((maxburst == 0) || (maxburst > 16)) | ||
568 | val = 16; | ||
569 | else | ||
570 | val = maxburst - 1; | ||
571 | c->ccfg |= (val << 20) | (val << 24); | ||
572 | c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; | ||
573 | |||
574 | /* specific request line */ | ||
575 | c->ccfg |= c->vc.chan.chan_id << 4; | ||
576 | break; | ||
577 | |||
578 | case DMA_TERMINATE_ALL: | ||
579 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | ||
580 | |||
581 | /* Prevent this channel being scheduled */ | ||
582 | spin_lock(&d->lock); | ||
583 | list_del_init(&c->node); | ||
584 | spin_unlock(&d->lock); | ||
585 | |||
586 | /* Clear the tx descriptor lists */ | ||
587 | spin_lock_irqsave(&c->vc.lock, flags); | ||
588 | vchan_get_all_descriptors(&c->vc, &head); | ||
589 | if (p) { | ||
590 | /* vchan is assigned to a pchan - stop the channel */ | ||
591 | k3_dma_terminate_chan(p, d); | ||
592 | c->phy = NULL; | ||
593 | p->vchan = NULL; | ||
594 | p->ds_run = p->ds_done = NULL; | ||
595 | } | ||
596 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
597 | vchan_dma_desc_free_list(&c->vc, &head); | ||
598 | break; | ||
599 | |||
600 | case DMA_PAUSE: | ||
601 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); | ||
602 | if (c->status == DMA_IN_PROGRESS) { | ||
603 | c->status = DMA_PAUSED; | ||
604 | if (p) { | ||
605 | k3_dma_pause_dma(p, false); | ||
606 | } else { | ||
607 | spin_lock(&d->lock); | ||
608 | list_del_init(&c->node); | ||
609 | spin_unlock(&d->lock); | ||
610 | } | ||
611 | } | ||
612 | break; | ||
613 | |||
614 | case DMA_RESUME: | ||
615 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); | ||
616 | spin_lock_irqsave(&c->vc.lock, flags); | ||
617 | if (c->status == DMA_PAUSED) { | ||
618 | c->status = DMA_IN_PROGRESS; | ||
619 | if (p) { | ||
620 | k3_dma_pause_dma(p, true); | ||
621 | } else if (!list_empty(&c->vc.desc_issued)) { | ||
622 | spin_lock(&d->lock); | ||
623 | list_add_tail(&c->node, &d->chan_pending); | ||
624 | spin_unlock(&d->lock); | ||
625 | } | ||
626 | } | ||
627 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
628 | break; | ||
629 | default: | ||
630 | return -ENXIO; | ||
631 | } | ||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | static void k3_dma_free_desc(struct virt_dma_desc *vd) | ||
636 | { | ||
637 | struct k3_dma_desc_sw *ds = | ||
638 | container_of(vd, struct k3_dma_desc_sw, vd); | ||
639 | |||
640 | kfree(ds); | ||
641 | } | ||
642 | |||
643 | static struct of_device_id k3_pdma_dt_ids[] = { | ||
644 | { .compatible = "hisilicon,k3-dma-1.0", }, | ||
645 | {} | ||
646 | }; | ||
647 | MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); | ||
648 | |||
649 | static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | ||
650 | struct of_dma *ofdma) | ||
651 | { | ||
652 | struct k3_dma_dev *d = ofdma->of_dma_data; | ||
653 | unsigned int request = dma_spec->args[0]; | ||
654 | |||
655 | if (request > d->dma_requests) | ||
656 | return NULL; | ||
657 | |||
658 | return dma_get_slave_channel(&(d->chans[request].vc.chan)); | ||
659 | } | ||
660 | |||
661 | static int k3_dma_probe(struct platform_device *op) | ||
662 | { | ||
663 | struct k3_dma_dev *d; | ||
664 | const struct of_device_id *of_id; | ||
665 | struct resource *iores; | ||
666 | int i, ret, irq = 0; | ||
667 | |||
668 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | ||
669 | if (!iores) | ||
670 | return -EINVAL; | ||
671 | |||
672 | d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); | ||
673 | if (!d) | ||
674 | return -ENOMEM; | ||
675 | |||
676 | d->base = devm_ioremap_resource(&op->dev, iores); | ||
677 | if (IS_ERR(d->base)) | ||
678 | return PTR_ERR(d->base); | ||
679 | |||
680 | of_id = of_match_device(k3_pdma_dt_ids, &op->dev); | ||
681 | if (of_id) { | ||
682 | of_property_read_u32((&op->dev)->of_node, | ||
683 | "dma-channels", &d->dma_channels); | ||
684 | of_property_read_u32((&op->dev)->of_node, | ||
685 | "dma-requests", &d->dma_requests); | ||
686 | } | ||
687 | |||
688 | d->clk = devm_clk_get(&op->dev, NULL); | ||
689 | if (IS_ERR(d->clk)) { | ||
690 | dev_err(&op->dev, "no dma clk\n"); | ||
691 | return PTR_ERR(d->clk); | ||
692 | } | ||
693 | |||
694 | irq = platform_get_irq(op, 0); | ||
695 | ret = devm_request_irq(&op->dev, irq, | ||
696 | k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); | ||
697 | if (ret) | ||
698 | return ret; | ||
699 | |||
700 | /* init phy channel */ | ||
701 | d->phy = devm_kzalloc(&op->dev, | ||
702 | d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); | ||
703 | if (d->phy == NULL) | ||
704 | return -ENOMEM; | ||
705 | |||
706 | for (i = 0; i < d->dma_channels; i++) { | ||
707 | struct k3_dma_phy *p = &d->phy[i]; | ||
708 | |||
709 | p->idx = i; | ||
710 | p->base = d->base + i * 0x40; | ||
711 | } | ||
712 | |||
713 | INIT_LIST_HEAD(&d->slave.channels); | ||
714 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | ||
715 | dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); | ||
716 | d->slave.dev = &op->dev; | ||
717 | d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources; | ||
718 | d->slave.device_free_chan_resources = k3_dma_free_chan_resources; | ||
719 | d->slave.device_tx_status = k3_dma_tx_status; | ||
720 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; | ||
721 | d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; | ||
722 | d->slave.device_issue_pending = k3_dma_issue_pending; | ||
723 | d->slave.device_control = k3_dma_control; | ||
724 | d->slave.copy_align = DMA_ALIGN; | ||
725 | d->slave.chancnt = d->dma_requests; | ||
726 | |||
727 | /* init virtual channel */ | ||
728 | d->chans = devm_kzalloc(&op->dev, | ||
729 | d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL); | ||
730 | if (d->chans == NULL) | ||
731 | return -ENOMEM; | ||
732 | |||
733 | for (i = 0; i < d->dma_requests; i++) { | ||
734 | struct k3_dma_chan *c = &d->chans[i]; | ||
735 | |||
736 | c->status = DMA_IN_PROGRESS; | ||
737 | INIT_LIST_HEAD(&c->node); | ||
738 | c->vc.desc_free = k3_dma_free_desc; | ||
739 | vchan_init(&c->vc, &d->slave); | ||
740 | } | ||
741 | |||
742 | /* Enable clock before accessing registers */ | ||
743 | ret = clk_prepare_enable(d->clk); | ||
744 | if (ret < 0) { | ||
745 | dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); | ||
746 | return ret; | ||
747 | } | ||
748 | |||
749 | k3_dma_enable_dma(d, true); | ||
750 | |||
751 | ret = dma_async_device_register(&d->slave); | ||
752 | if (ret) | ||
753 | return ret; | ||
754 | |||
755 | ret = of_dma_controller_register((&op->dev)->of_node, | ||
756 | k3_of_dma_simple_xlate, d); | ||
757 | if (ret) | ||
758 | goto of_dma_register_fail; | ||
759 | |||
760 | spin_lock_init(&d->lock); | ||
761 | INIT_LIST_HEAD(&d->chan_pending); | ||
762 | tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d); | ||
763 | platform_set_drvdata(op, d); | ||
764 | dev_info(&op->dev, "initialized\n"); | ||
765 | |||
766 | return 0; | ||
767 | |||
768 | of_dma_register_fail: | ||
769 | dma_async_device_unregister(&d->slave); | ||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | static int k3_dma_remove(struct platform_device *op) | ||
774 | { | ||
775 | struct k3_dma_chan *c, *cn; | ||
776 | struct k3_dma_dev *d = platform_get_drvdata(op); | ||
777 | |||
778 | dma_async_device_unregister(&d->slave); | ||
779 | of_dma_controller_free((&op->dev)->of_node); | ||
780 | |||
781 | list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { | ||
782 | list_del(&c->vc.chan.device_node); | ||
783 | tasklet_kill(&c->vc.task); | ||
784 | } | ||
785 | tasklet_kill(&d->task); | ||
786 | clk_disable_unprepare(d->clk); | ||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | static int k3_dma_suspend(struct device *dev) | ||
791 | { | ||
792 | struct k3_dma_dev *d = dev_get_drvdata(dev); | ||
793 | u32 stat = 0; | ||
794 | |||
795 | stat = k3_dma_get_chan_stat(d); | ||
796 | if (stat) { | ||
797 | dev_warn(d->slave.dev, | ||
798 | "chan %d is running fail to suspend\n", stat); | ||
799 | return -1; | ||
800 | } | ||
801 | k3_dma_enable_dma(d, false); | ||
802 | clk_disable_unprepare(d->clk); | ||
803 | return 0; | ||
804 | } | ||
805 | |||
806 | static int k3_dma_resume(struct device *dev) | ||
807 | { | ||
808 | struct k3_dma_dev *d = dev_get_drvdata(dev); | ||
809 | int ret = 0; | ||
810 | |||
811 | ret = clk_prepare_enable(d->clk); | ||
812 | if (ret < 0) { | ||
813 | dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); | ||
814 | return ret; | ||
815 | } | ||
816 | k3_dma_enable_dma(d, true); | ||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); | ||
821 | |||
822 | static struct platform_driver k3_pdma_driver = { | ||
823 | .driver = { | ||
824 | .name = DRIVER_NAME, | ||
825 | .owner = THIS_MODULE, | ||
826 | .pm = &k3_dma_pmops, | ||
827 | .of_match_table = k3_pdma_dt_ids, | ||
828 | }, | ||
829 | .probe = k3_dma_probe, | ||
830 | .remove = k3_dma_remove, | ||
831 | }; | ||
832 | |||
833 | module_platform_driver(k3_pdma_driver); | ||
834 | |||
835 | MODULE_DESCRIPTION("Hisilicon k3 DMA Driver"); | ||
836 | MODULE_ALIAS("platform:k3dma"); | ||
837 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index c26699f9c4df..ff8d7827f8cb 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -18,7 +18,9 @@ | |||
18 | #include <linux/platform_data/mmp_dma.h> | 18 | #include <linux/platform_data/mmp_dma.h> |
19 | #include <linux/dmapool.h> | 19 | #include <linux/dmapool.h> |
20 | #include <linux/of_device.h> | 20 | #include <linux/of_device.h> |
21 | #include <linux/of_dma.h> | ||
21 | #include <linux/of.h> | 22 | #include <linux/of.h> |
23 | #include <linux/dma/mmp-pdma.h> | ||
22 | 24 | ||
23 | #include "dmaengine.h" | 25 | #include "dmaengine.h" |
24 | 26 | ||
@@ -47,6 +49,8 @@ | |||
47 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ | 49 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ |
48 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ | 50 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ |
49 | 51 | ||
52 | #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ | ||
53 | (((n) & 0x3f) << 2)) | ||
50 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ | 54 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ |
51 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | 55 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ |
52 | 56 | ||
@@ -69,7 +73,7 @@ | |||
69 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | 73 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ |
70 | 74 | ||
71 | #define PDMA_ALIGNMENT 3 | 75 | #define PDMA_ALIGNMENT 3 |
72 | #define PDMA_MAX_DESC_BYTES 0x1000 | 76 | #define PDMA_MAX_DESC_BYTES DCMD_LENGTH |
73 | 77 | ||
74 | struct mmp_pdma_desc_hw { | 78 | struct mmp_pdma_desc_hw { |
75 | u32 ddadr; /* Points to the next descriptor + flags */ | 79 | u32 ddadr; /* Points to the next descriptor + flags */ |
@@ -94,6 +98,9 @@ struct mmp_pdma_chan { | |||
94 | struct mmp_pdma_phy *phy; | 98 | struct mmp_pdma_phy *phy; |
95 | enum dma_transfer_direction dir; | 99 | enum dma_transfer_direction dir; |
96 | 100 | ||
101 | struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel | ||
102 | * is in cyclic mode */ | ||
103 | |||
97 | /* channel's basic info */ | 104 | /* channel's basic info */ |
98 | struct tasklet_struct tasklet; | 105 | struct tasklet_struct tasklet; |
99 | u32 dcmd; | 106 | u32 dcmd; |
@@ -105,6 +112,7 @@ struct mmp_pdma_chan { | |||
105 | struct list_head chain_pending; /* Link descriptors queue for pending */ | 112 | struct list_head chain_pending; /* Link descriptors queue for pending */ |
106 | struct list_head chain_running; /* Link descriptors queue for running */ | 113 | struct list_head chain_running; /* Link descriptors queue for running */ |
107 | bool idle; /* channel statue machine */ | 114 | bool idle; /* channel statue machine */ |
115 | bool byte_align; | ||
108 | 116 | ||
109 | struct dma_pool *desc_pool; /* Descriptors pool */ | 117 | struct dma_pool *desc_pool; /* Descriptors pool */ |
110 | }; | 118 | }; |
@@ -121,6 +129,7 @@ struct mmp_pdma_device { | |||
121 | struct device *dev; | 129 | struct device *dev; |
122 | struct dma_device device; | 130 | struct dma_device device; |
123 | struct mmp_pdma_phy *phy; | 131 | struct mmp_pdma_phy *phy; |
132 | spinlock_t phy_lock; /* protect alloc/free phy channels */ | ||
124 | }; | 133 | }; |
125 | 134 | ||
126 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) | 135 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) |
@@ -137,15 +146,21 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | |||
137 | 146 | ||
138 | static void enable_chan(struct mmp_pdma_phy *phy) | 147 | static void enable_chan(struct mmp_pdma_phy *phy) |
139 | { | 148 | { |
140 | u32 reg; | 149 | u32 reg, dalgn; |
141 | 150 | ||
142 | if (!phy->vchan) | 151 | if (!phy->vchan) |
143 | return; | 152 | return; |
144 | 153 | ||
145 | reg = phy->vchan->drcmr; | 154 | reg = DRCMR(phy->vchan->drcmr); |
146 | reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2); | ||
147 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); | 155 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); |
148 | 156 | ||
157 | dalgn = readl(phy->base + DALGN); | ||
158 | if (phy->vchan->byte_align) | ||
159 | dalgn |= 1 << phy->idx; | ||
160 | else | ||
161 | dalgn &= ~(1 << phy->idx); | ||
162 | writel(dalgn, phy->base + DALGN); | ||
163 | |||
149 | reg = (phy->idx << 2) + DCSR; | 164 | reg = (phy->idx << 2) + DCSR; |
150 | writel(readl(phy->base + reg) | DCSR_RUN, | 165 | writel(readl(phy->base + reg) | DCSR_RUN, |
151 | phy->base + reg); | 166 | phy->base + reg); |
@@ -218,7 +233,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | |||
218 | { | 233 | { |
219 | int prio, i; | 234 | int prio, i; |
220 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | 235 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); |
221 | struct mmp_pdma_phy *phy; | 236 | struct mmp_pdma_phy *phy, *found = NULL; |
237 | unsigned long flags; | ||
222 | 238 | ||
223 | /* | 239 | /* |
224 | * dma channel priorities | 240 | * dma channel priorities |
@@ -227,6 +243,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | |||
227 | * ch 8 - 11, 24 - 27 <--> (2) | 243 | * ch 8 - 11, 24 - 27 <--> (2) |
228 | * ch 12 - 15, 28 - 31 <--> (3) | 244 | * ch 12 - 15, 28 - 31 <--> (3) |
229 | */ | 245 | */ |
246 | |||
247 | spin_lock_irqsave(&pdev->phy_lock, flags); | ||
230 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { | 248 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { |
231 | for (i = 0; i < pdev->dma_channels; i++) { | 249 | for (i = 0; i < pdev->dma_channels; i++) { |
232 | if (prio != ((i & 0xf) >> 2)) | 250 | if (prio != ((i & 0xf) >> 2)) |
@@ -234,31 +252,34 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | |||
234 | phy = &pdev->phy[i]; | 252 | phy = &pdev->phy[i]; |
235 | if (!phy->vchan) { | 253 | if (!phy->vchan) { |
236 | phy->vchan = pchan; | 254 | phy->vchan = pchan; |
237 | return phy; | 255 | found = phy; |
256 | goto out_unlock; | ||
238 | } | 257 | } |
239 | } | 258 | } |
240 | } | 259 | } |
241 | 260 | ||
242 | return NULL; | 261 | out_unlock: |
262 | spin_unlock_irqrestore(&pdev->phy_lock, flags); | ||
263 | return found; | ||
243 | } | 264 | } |
244 | 265 | ||
245 | /* desc->tx_list ==> pending list */ | 266 | static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) |
246 | static void append_pending_queue(struct mmp_pdma_chan *chan, | ||
247 | struct mmp_pdma_desc_sw *desc) | ||
248 | { | 267 | { |
249 | struct mmp_pdma_desc_sw *tail = | 268 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); |
250 | to_mmp_pdma_desc(chan->chain_pending.prev); | 269 | unsigned long flags; |
270 | u32 reg; | ||
251 | 271 | ||
252 | if (list_empty(&chan->chain_pending)) | 272 | if (!pchan->phy) |
253 | goto out_splice; | 273 | return; |
254 | 274 | ||
255 | /* one irq per queue, even appended */ | 275 | /* clear the channel mapping in DRCMR */ |
256 | tail->desc.ddadr = desc->async_tx.phys; | 276 | reg = DRCMR(pchan->phy->vchan->drcmr); |
257 | tail->desc.dcmd &= ~DCMD_ENDIRQEN; | 277 | writel(0, pchan->phy->base + reg); |
258 | 278 | ||
259 | /* softly link to pending list */ | 279 | spin_lock_irqsave(&pdev->phy_lock, flags); |
260 | out_splice: | 280 | pchan->phy->vchan = NULL; |
261 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | 281 | pchan->phy = NULL; |
282 | spin_unlock_irqrestore(&pdev->phy_lock, flags); | ||
262 | } | 283 | } |
263 | 284 | ||
264 | /** | 285 | /** |
@@ -277,10 +298,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan) | |||
277 | 298 | ||
278 | if (list_empty(&chan->chain_pending)) { | 299 | if (list_empty(&chan->chain_pending)) { |
279 | /* chance to re-fetch phy channel with higher prio */ | 300 | /* chance to re-fetch phy channel with higher prio */ |
280 | if (chan->phy) { | 301 | mmp_pdma_free_phy(chan); |
281 | chan->phy->vchan = NULL; | ||
282 | chan->phy = NULL; | ||
283 | } | ||
284 | dev_dbg(chan->dev, "no pending list\n"); | 302 | dev_dbg(chan->dev, "no pending list\n"); |
285 | return; | 303 | return; |
286 | } | 304 | } |
@@ -326,14 +344,16 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
326 | cookie = dma_cookie_assign(&child->async_tx); | 344 | cookie = dma_cookie_assign(&child->async_tx); |
327 | } | 345 | } |
328 | 346 | ||
329 | append_pending_queue(chan, desc); | 347 | /* softly link to pending list - desc->tx_list ==> pending list */ |
348 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | ||
330 | 349 | ||
331 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 350 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
332 | 351 | ||
333 | return cookie; | 352 | return cookie; |
334 | } | 353 | } |
335 | 354 | ||
336 | struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | 355 | static struct mmp_pdma_desc_sw * |
356 | mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | ||
337 | { | 357 | { |
338 | struct mmp_pdma_desc_sw *desc; | 358 | struct mmp_pdma_desc_sw *desc; |
339 | dma_addr_t pdesc; | 359 | dma_addr_t pdesc; |
@@ -377,10 +397,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | |||
377 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | 397 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); |
378 | return -ENOMEM; | 398 | return -ENOMEM; |
379 | } | 399 | } |
380 | if (chan->phy) { | 400 | mmp_pdma_free_phy(chan); |
381 | chan->phy->vchan = NULL; | ||
382 | chan->phy = NULL; | ||
383 | } | ||
384 | chan->idle = true; | 401 | chan->idle = true; |
385 | chan->dev_addr = 0; | 402 | chan->dev_addr = 0; |
386 | return 1; | 403 | return 1; |
@@ -411,10 +428,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | |||
411 | chan->desc_pool = NULL; | 428 | chan->desc_pool = NULL; |
412 | chan->idle = true; | 429 | chan->idle = true; |
413 | chan->dev_addr = 0; | 430 | chan->dev_addr = 0; |
414 | if (chan->phy) { | 431 | mmp_pdma_free_phy(chan); |
415 | chan->phy->vchan = NULL; | ||
416 | chan->phy = NULL; | ||
417 | } | ||
418 | return; | 432 | return; |
419 | } | 433 | } |
420 | 434 | ||
@@ -434,6 +448,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan, | |||
434 | return NULL; | 448 | return NULL; |
435 | 449 | ||
436 | chan = to_mmp_pdma_chan(dchan); | 450 | chan = to_mmp_pdma_chan(dchan); |
451 | chan->byte_align = false; | ||
437 | 452 | ||
438 | if (!chan->dir) { | 453 | if (!chan->dir) { |
439 | chan->dir = DMA_MEM_TO_MEM; | 454 | chan->dir = DMA_MEM_TO_MEM; |
@@ -450,6 +465,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan, | |||
450 | } | 465 | } |
451 | 466 | ||
452 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | 467 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); |
468 | if (dma_src & 0x7 || dma_dst & 0x7) | ||
469 | chan->byte_align = true; | ||
453 | 470 | ||
454 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | 471 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); |
455 | new->desc.dsadr = dma_src; | 472 | new->desc.dsadr = dma_src; |
@@ -486,6 +503,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan, | |||
486 | new->desc.ddadr = DDADR_STOP; | 503 | new->desc.ddadr = DDADR_STOP; |
487 | new->desc.dcmd |= DCMD_ENDIRQEN; | 504 | new->desc.dcmd |= DCMD_ENDIRQEN; |
488 | 505 | ||
506 | chan->cyclic_first = NULL; | ||
507 | |||
489 | return &first->async_tx; | 508 | return &first->async_tx; |
490 | 509 | ||
491 | fail: | 510 | fail: |
@@ -509,12 +528,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
509 | if ((sgl == NULL) || (sg_len == 0)) | 528 | if ((sgl == NULL) || (sg_len == 0)) |
510 | return NULL; | 529 | return NULL; |
511 | 530 | ||
531 | chan->byte_align = false; | ||
532 | |||
512 | for_each_sg(sgl, sg, sg_len, i) { | 533 | for_each_sg(sgl, sg, sg_len, i) { |
513 | addr = sg_dma_address(sg); | 534 | addr = sg_dma_address(sg); |
514 | avail = sg_dma_len(sgl); | 535 | avail = sg_dma_len(sgl); |
515 | 536 | ||
516 | do { | 537 | do { |
517 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | 538 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); |
539 | if (addr & 0x7) | ||
540 | chan->byte_align = true; | ||
518 | 541 | ||
519 | /* allocate and populate the descriptor */ | 542 | /* allocate and populate the descriptor */ |
520 | new = mmp_pdma_alloc_descriptor(chan); | 543 | new = mmp_pdma_alloc_descriptor(chan); |
@@ -557,6 +580,94 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
557 | new->desc.ddadr = DDADR_STOP; | 580 | new->desc.ddadr = DDADR_STOP; |
558 | new->desc.dcmd |= DCMD_ENDIRQEN; | 581 | new->desc.dcmd |= DCMD_ENDIRQEN; |
559 | 582 | ||
583 | chan->dir = dir; | ||
584 | chan->cyclic_first = NULL; | ||
585 | |||
586 | return &first->async_tx; | ||
587 | |||
588 | fail: | ||
589 | if (first) | ||
590 | mmp_pdma_free_desc_list(chan, &first->tx_list); | ||
591 | return NULL; | ||
592 | } | ||
593 | |||
594 | static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( | ||
595 | struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, | ||
596 | size_t period_len, enum dma_transfer_direction direction, | ||
597 | unsigned long flags, void *context) | ||
598 | { | ||
599 | struct mmp_pdma_chan *chan; | ||
600 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | ||
601 | dma_addr_t dma_src, dma_dst; | ||
602 | |||
603 | if (!dchan || !len || !period_len) | ||
604 | return NULL; | ||
605 | |||
606 | /* the buffer length must be a multiple of period_len */ | ||
607 | if (len % period_len != 0) | ||
608 | return NULL; | ||
609 | |||
610 | if (period_len > PDMA_MAX_DESC_BYTES) | ||
611 | return NULL; | ||
612 | |||
613 | chan = to_mmp_pdma_chan(dchan); | ||
614 | |||
615 | switch (direction) { | ||
616 | case DMA_MEM_TO_DEV: | ||
617 | dma_src = buf_addr; | ||
618 | dma_dst = chan->dev_addr; | ||
619 | break; | ||
620 | case DMA_DEV_TO_MEM: | ||
621 | dma_dst = buf_addr; | ||
622 | dma_src = chan->dev_addr; | ||
623 | break; | ||
624 | default: | ||
625 | dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); | ||
626 | return NULL; | ||
627 | } | ||
628 | |||
629 | chan->dir = direction; | ||
630 | |||
631 | do { | ||
632 | /* Allocate the link descriptor from DMA pool */ | ||
633 | new = mmp_pdma_alloc_descriptor(chan); | ||
634 | if (!new) { | ||
635 | dev_err(chan->dev, "no memory for desc\n"); | ||
636 | goto fail; | ||
637 | } | ||
638 | |||
639 | new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN | | ||
640 | (DCMD_LENGTH & period_len); | ||
641 | new->desc.dsadr = dma_src; | ||
642 | new->desc.dtadr = dma_dst; | ||
643 | |||
644 | if (!first) | ||
645 | first = new; | ||
646 | else | ||
647 | prev->desc.ddadr = new->async_tx.phys; | ||
648 | |||
649 | new->async_tx.cookie = 0; | ||
650 | async_tx_ack(&new->async_tx); | ||
651 | |||
652 | prev = new; | ||
653 | len -= period_len; | ||
654 | |||
655 | if (chan->dir == DMA_MEM_TO_DEV) | ||
656 | dma_src += period_len; | ||
657 | else | ||
658 | dma_dst += period_len; | ||
659 | |||
660 | /* Insert the link descriptor to the LD ring */ | ||
661 | list_add_tail(&new->node, &first->tx_list); | ||
662 | } while (len); | ||
663 | |||
664 | first->async_tx.flags = flags; /* client is in control of this ack */ | ||
665 | first->async_tx.cookie = -EBUSY; | ||
666 | |||
667 | /* make the cyclic link */ | ||
668 | new->desc.ddadr = first->async_tx.phys; | ||
669 | chan->cyclic_first = first; | ||
670 | |||
560 | return &first->async_tx; | 671 | return &first->async_tx; |
561 | 672 | ||
562 | fail: | 673 | fail: |
@@ -581,10 +692,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |||
581 | switch (cmd) { | 692 | switch (cmd) { |
582 | case DMA_TERMINATE_ALL: | 693 | case DMA_TERMINATE_ALL: |
583 | disable_chan(chan->phy); | 694 | disable_chan(chan->phy); |
584 | if (chan->phy) { | 695 | mmp_pdma_free_phy(chan); |
585 | chan->phy->vchan = NULL; | ||
586 | chan->phy = NULL; | ||
587 | } | ||
588 | spin_lock_irqsave(&chan->desc_lock, flags); | 696 | spin_lock_irqsave(&chan->desc_lock, flags); |
589 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | 697 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); |
590 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | 698 | mmp_pdma_free_desc_list(chan, &chan->chain_running); |
@@ -619,8 +727,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |||
619 | chan->dcmd |= DCMD_BURST32; | 727 | chan->dcmd |= DCMD_BURST32; |
620 | 728 | ||
621 | chan->dir = cfg->direction; | 729 | chan->dir = cfg->direction; |
622 | chan->drcmr = cfg->slave_id; | ||
623 | chan->dev_addr = addr; | 730 | chan->dev_addr = addr; |
731 | /* FIXME: drivers should be ported over to use the filter | ||
732 | * function. Once that's done, the following two lines can | ||
733 | * be removed. | ||
734 | */ | ||
735 | if (cfg->slave_id) | ||
736 | chan->drcmr = cfg->slave_id; | ||
624 | break; | 737 | break; |
625 | default: | 738 | default: |
626 | return -ENOSYS; | 739 | return -ENOSYS; |
@@ -632,15 +745,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |||
632 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | 745 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, |
633 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 746 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
634 | { | 747 | { |
635 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 748 | return dma_cookie_status(dchan, cookie, txstate); |
636 | enum dma_status ret; | ||
637 | unsigned long flags; | ||
638 | |||
639 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
640 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
641 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
642 | |||
643 | return ret; | ||
644 | } | 749 | } |
645 | 750 | ||
646 | /** | 751 | /** |
@@ -669,29 +774,51 @@ static void dma_do_tasklet(unsigned long data) | |||
669 | LIST_HEAD(chain_cleanup); | 774 | LIST_HEAD(chain_cleanup); |
670 | unsigned long flags; | 775 | unsigned long flags; |
671 | 776 | ||
672 | /* submit pending list; callback for each desc; free desc */ | 777 | if (chan->cyclic_first) { |
778 | dma_async_tx_callback cb = NULL; | ||
779 | void *cb_data = NULL; | ||
673 | 780 | ||
674 | spin_lock_irqsave(&chan->desc_lock, flags); | 781 | spin_lock_irqsave(&chan->desc_lock, flags); |
782 | desc = chan->cyclic_first; | ||
783 | cb = desc->async_tx.callback; | ||
784 | cb_data = desc->async_tx.callback_param; | ||
785 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
786 | |||
787 | if (cb) | ||
788 | cb(cb_data); | ||
675 | 789 | ||
676 | /* update the cookie if we have some descriptors to cleanup */ | 790 | return; |
677 | if (!list_empty(&chan->chain_running)) { | 791 | } |
678 | dma_cookie_t cookie; | ||
679 | 792 | ||
680 | desc = to_mmp_pdma_desc(chan->chain_running.prev); | 793 | /* submit pending list; callback for each desc; free desc */ |
681 | cookie = desc->async_tx.cookie; | 794 | spin_lock_irqsave(&chan->desc_lock, flags); |
682 | dma_cookie_complete(&desc->async_tx); | ||
683 | 795 | ||
684 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | 796 | list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { |
797 | /* | ||
798 | * move the descriptors to a temporary list so we can drop | ||
799 | * the lock during the entire cleanup operation | ||
800 | */ | ||
801 | list_del(&desc->node); | ||
802 | list_add(&desc->node, &chain_cleanup); | ||
803 | |||
804 | /* | ||
805 | * Look for the first list entry which has the ENDIRQEN flag | ||
806 | * set. That is the descriptor we got an interrupt for, so | ||
807 | * complete that transaction and its cookie. | ||
808 | */ | ||
809 | if (desc->desc.dcmd & DCMD_ENDIRQEN) { | ||
810 | dma_cookie_t cookie = desc->async_tx.cookie; | ||
811 | dma_cookie_complete(&desc->async_tx); | ||
812 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | ||
813 | break; | ||
814 | } | ||
685 | } | 815 | } |
686 | 816 | ||
687 | /* | 817 | /* |
688 | * move the descriptors to a temporary list so we can drop the lock | 818 | * The hardware is idle and ready for more when the |
689 | * during the entire cleanup operation | 819 | * chain_running list is empty. |
690 | */ | 820 | */ |
691 | list_splice_tail_init(&chan->chain_running, &chain_cleanup); | 821 | chan->idle = list_empty(&chan->chain_running); |
692 | |||
693 | /* the hardware is now idle and ready for more */ | ||
694 | chan->idle = true; | ||
695 | 822 | ||
696 | /* Start any pending transactions automatically */ | 823 | /* Start any pending transactions automatically */ |
697 | start_pending_queue(chan); | 824 | start_pending_queue(chan); |
@@ -763,6 +890,39 @@ static struct of_device_id mmp_pdma_dt_ids[] = { | |||
763 | }; | 890 | }; |
764 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); | 891 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); |
765 | 892 | ||
893 | static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, | ||
894 | struct of_dma *ofdma) | ||
895 | { | ||
896 | struct mmp_pdma_device *d = ofdma->of_dma_data; | ||
897 | struct dma_chan *chan, *candidate; | ||
898 | |||
899 | retry: | ||
900 | candidate = NULL; | ||
901 | |||
902 | /* walk the list of channels registered with the current instance and | ||
903 | * find one that is currently unused */ | ||
904 | list_for_each_entry(chan, &d->device.channels, device_node) | ||
905 | if (chan->client_count == 0) { | ||
906 | candidate = chan; | ||
907 | break; | ||
908 | } | ||
909 | |||
910 | if (!candidate) | ||
911 | return NULL; | ||
912 | |||
913 | /* dma_get_slave_channel will return NULL if we lost a race between | ||
914 | * the lookup and the reservation */ | ||
915 | chan = dma_get_slave_channel(candidate); | ||
916 | |||
917 | if (chan) { | ||
918 | struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); | ||
919 | c->drcmr = dma_spec->args[0]; | ||
920 | return chan; | ||
921 | } | ||
922 | |||
923 | goto retry; | ||
924 | } | ||
925 | |||
766 | static int mmp_pdma_probe(struct platform_device *op) | 926 | static int mmp_pdma_probe(struct platform_device *op) |
767 | { | 927 | { |
768 | struct mmp_pdma_device *pdev; | 928 | struct mmp_pdma_device *pdev; |
@@ -777,10 +937,9 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
777 | return -ENOMEM; | 937 | return -ENOMEM; |
778 | pdev->dev = &op->dev; | 938 | pdev->dev = &op->dev; |
779 | 939 | ||
780 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | 940 | spin_lock_init(&pdev->phy_lock); |
781 | if (!iores) | ||
782 | return -EINVAL; | ||
783 | 941 | ||
942 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | ||
784 | pdev->base = devm_ioremap_resource(pdev->dev, iores); | 943 | pdev->base = devm_ioremap_resource(pdev->dev, iores); |
785 | if (IS_ERR(pdev->base)) | 944 | if (IS_ERR(pdev->base)) |
786 | return PTR_ERR(pdev->base); | 945 | return PTR_ERR(pdev->base); |
@@ -825,13 +984,15 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
825 | 984 | ||
826 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | 985 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); |
827 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); | 986 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); |
828 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | 987 | dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); |
988 | dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); | ||
829 | pdev->device.dev = &op->dev; | 989 | pdev->device.dev = &op->dev; |
830 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; | 990 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; |
831 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; | 991 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; |
832 | pdev->device.device_tx_status = mmp_pdma_tx_status; | 992 | pdev->device.device_tx_status = mmp_pdma_tx_status; |
833 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; | 993 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; |
834 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | 994 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; |
995 | pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; | ||
835 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; | 996 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; |
836 | pdev->device.device_control = mmp_pdma_control; | 997 | pdev->device.device_control = mmp_pdma_control; |
837 | pdev->device.copy_align = PDMA_ALIGNMENT; | 998 | pdev->device.copy_align = PDMA_ALIGNMENT; |
@@ -847,7 +1008,17 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
847 | return ret; | 1008 | return ret; |
848 | } | 1009 | } |
849 | 1010 | ||
850 | dev_info(pdev->device.dev, "initialized\n"); | 1011 | if (op->dev.of_node) { |
1012 | /* Device-tree DMA controller registration */ | ||
1013 | ret = of_dma_controller_register(op->dev.of_node, | ||
1014 | mmp_pdma_dma_xlate, pdev); | ||
1015 | if (ret < 0) { | ||
1016 | dev_err(&op->dev, "of_dma_controller_register failed\n"); | ||
1017 | return ret; | ||
1018 | } | ||
1019 | } | ||
1020 | |||
1021 | dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); | ||
851 | return 0; | 1022 | return 0; |
852 | } | 1023 | } |
853 | 1024 | ||
@@ -867,6 +1038,19 @@ static struct platform_driver mmp_pdma_driver = { | |||
867 | .remove = mmp_pdma_remove, | 1038 | .remove = mmp_pdma_remove, |
868 | }; | 1039 | }; |
869 | 1040 | ||
1041 | bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) | ||
1042 | { | ||
1043 | struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); | ||
1044 | |||
1045 | if (chan->device->dev->driver != &mmp_pdma_driver.driver) | ||
1046 | return false; | ||
1047 | |||
1048 | c->drcmr = *(unsigned int *) param; | ||
1049 | |||
1050 | return true; | ||
1051 | } | ||
1052 | EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn); | ||
1053 | |||
870 | module_platform_driver(mmp_pdma_driver); | 1054 | module_platform_driver(mmp_pdma_driver); |
871 | 1055 | ||
872 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); | 1056 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 9b9366537d73..38cb517fb2eb 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -460,7 +460,8 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, | |||
460 | { | 460 | { |
461 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | 461 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
462 | 462 | ||
463 | dma_set_residue(txstate, tdmac->buf_len - tdmac->pos); | 463 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
464 | tdmac->buf_len - tdmac->pos); | ||
464 | 465 | ||
465 | return tdmac->status; | 466 | return tdmac->status; |
466 | } | 467 | } |
@@ -549,9 +550,6 @@ static int mmp_tdma_probe(struct platform_device *pdev) | |||
549 | } | 550 | } |
550 | 551 | ||
551 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 552 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
552 | if (!iores) | ||
553 | return -EINVAL; | ||
554 | |||
555 | tdev->base = devm_ioremap_resource(&pdev->dev, iores); | 553 | tdev->base = devm_ioremap_resource(&pdev->dev, iores); |
556 | if (IS_ERR(tdev->base)) | 554 | if (IS_ERR(tdev->base)) |
557 | return PTR_ERR(tdev->base); | 555 | return PTR_ERR(tdev->base); |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 2d956732aa3d..2fe435377333 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -556,15 +556,7 @@ static enum dma_status | |||
556 | mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 556 | mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
557 | struct dma_tx_state *txstate) | 557 | struct dma_tx_state *txstate) |
558 | { | 558 | { |
559 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | 559 | return dma_cookie_status(chan, cookie, txstate); |
560 | enum dma_status ret; | ||
561 | unsigned long flags; | ||
562 | |||
563 | spin_lock_irqsave(&mchan->lock, flags); | ||
564 | ret = dma_cookie_status(chan, cookie, txstate); | ||
565 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
566 | |||
567 | return ret; | ||
568 | } | 560 | } |
569 | 561 | ||
570 | /* Prepare descriptor for memory to memory copy */ | 562 | /* Prepare descriptor for memory to memory copy */ |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 0ec086d2b6a0..536dcb8ba5fd 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -654,7 +654,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
654 | 654 | ||
655 | dev_dbg(mv_chan_to_devp(mv_chan), | 655 | dev_dbg(mv_chan_to_devp(mv_chan), |
656 | "%s sw_desc %p async_tx %p\n", | 656 | "%s sw_desc %p async_tx %p\n", |
657 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); | 657 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL); |
658 | 658 | ||
659 | return sw_desc ? &sw_desc->async_tx : NULL; | 659 | return sw_desc ? &sw_desc->async_tx : NULL; |
660 | } | 660 | } |
@@ -1171,7 +1171,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1171 | { | 1171 | { |
1172 | const struct mbus_dram_target_info *dram; | 1172 | const struct mbus_dram_target_info *dram; |
1173 | struct mv_xor_device *xordev; | 1173 | struct mv_xor_device *xordev; |
1174 | struct mv_xor_platform_data *pdata = pdev->dev.platform_data; | 1174 | struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); |
1175 | struct resource *res; | 1175 | struct resource *res; |
1176 | int i, ret; | 1176 | int i, ret; |
1177 | 1177 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 719593002ab7..ccd13df841db 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/fsl/mxs-dma.h> | ||
27 | #include <linux/stmp_device.h> | 26 | #include <linux/stmp_device.h> |
28 | #include <linux/of.h> | 27 | #include <linux/of.h> |
29 | #include <linux/of_device.h> | 28 | #include <linux/of_device.h> |
@@ -197,24 +196,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | |||
197 | return container_of(chan, struct mxs_dma_chan, chan); | 196 | return container_of(chan, struct mxs_dma_chan, chan); |
198 | } | 197 | } |
199 | 198 | ||
200 | int mxs_dma_is_apbh(struct dma_chan *chan) | ||
201 | { | ||
202 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
203 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
204 | |||
205 | return dma_is_apbh(mxs_dma); | ||
206 | } | ||
207 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbh); | ||
208 | |||
209 | int mxs_dma_is_apbx(struct dma_chan *chan) | ||
210 | { | ||
211 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
212 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
213 | |||
214 | return !dma_is_apbh(mxs_dma); | ||
215 | } | ||
216 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbx); | ||
217 | |||
218 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 199 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
219 | { | 200 | { |
220 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 201 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -349,13 +330,9 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
349 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | 330 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) |
350 | { | 331 | { |
351 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 332 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
352 | struct mxs_dma_data *data = chan->private; | ||
353 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 333 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
354 | int ret; | 334 | int ret; |
355 | 335 | ||
356 | if (data) | ||
357 | mxs_chan->chan_irq = data->chan_irq; | ||
358 | |||
359 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, | 336 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, |
360 | CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, | 337 | CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, |
361 | GFP_KERNEL); | 338 | GFP_KERNEL); |
@@ -622,10 +599,8 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | |||
622 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 599 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
623 | { | 600 | { |
624 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 601 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
625 | dma_cookie_t last_used; | ||
626 | 602 | ||
627 | last_used = chan->cookie; | 603 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); |
628 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); | ||
629 | 604 | ||
630 | return mxs_chan->status; | 605 | return mxs_chan->status; |
631 | } | 606 | } |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 75334bdd2c56..0b88dd3d05f4 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -160,7 +160,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
160 | 160 | ||
161 | count = of_property_count_strings(np, "dma-names"); | 161 | count = of_property_count_strings(np, "dma-names"); |
162 | if (count < 0) { | 162 | if (count < 0) { |
163 | pr_err("%s: dma-names property missing or empty\n", __func__); | 163 | pr_err("%s: dma-names property of node '%s' missing or empty\n", |
164 | __func__, np->full_name); | ||
164 | return NULL; | 165 | return NULL; |
165 | } | 166 | } |
166 | 167 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 0bbdea5059f3..61fdc54a3c88 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -564,14 +564,7 @@ static void pd_free_chan_resources(struct dma_chan *chan) | |||
564 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 564 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
565 | struct dma_tx_state *txstate) | 565 | struct dma_tx_state *txstate) |
566 | { | 566 | { |
567 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 567 | return dma_cookie_status(chan, cookie, txstate); |
568 | enum dma_status ret; | ||
569 | |||
570 | spin_lock_irq(&pd_chan->lock); | ||
571 | ret = dma_cookie_status(chan, cookie, txstate); | ||
572 | spin_unlock_irq(&pd_chan->lock); | ||
573 | |||
574 | return ret; | ||
575 | } | 568 | } |
576 | 569 | ||
577 | static void pd_issue_pending(struct dma_chan *chan) | 570 | static void pd_issue_pending(struct dma_chan *chan) |
@@ -1036,3 +1029,4 @@ MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " | |||
1036 | "DMA controller driver"); | 1029 | "DMA controller driver"); |
1037 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 1030 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
1038 | MODULE_LICENSE("GPL v2"); | 1031 | MODULE_LICENSE("GPL v2"); |
1032 | MODULE_DEVICE_TABLE(pci, pch_dma_id_table); | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index fa645d825009..a562d24d20bf 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -545,6 +545,8 @@ struct dma_pl330_chan { | |||
545 | 545 | ||
546 | /* List of to be xfered descriptors */ | 546 | /* List of to be xfered descriptors */ |
547 | struct list_head work_list; | 547 | struct list_head work_list; |
548 | /* List of completed descriptors */ | ||
549 | struct list_head completed_list; | ||
548 | 550 | ||
549 | /* Pointer to the DMAC that manages this channel, | 551 | /* Pointer to the DMAC that manages this channel, |
550 | * NULL if the channel is available to be acquired. | 552 | * NULL if the channel is available to be acquired. |
@@ -2198,66 +2200,6 @@ to_desc(struct dma_async_tx_descriptor *tx) | |||
2198 | return container_of(tx, struct dma_pl330_desc, txd); | 2200 | return container_of(tx, struct dma_pl330_desc, txd); |
2199 | } | 2201 | } |
2200 | 2202 | ||
2201 | static inline void free_desc_list(struct list_head *list) | ||
2202 | { | ||
2203 | struct dma_pl330_dmac *pdmac; | ||
2204 | struct dma_pl330_desc *desc; | ||
2205 | struct dma_pl330_chan *pch = NULL; | ||
2206 | unsigned long flags; | ||
2207 | |||
2208 | /* Finish off the work list */ | ||
2209 | list_for_each_entry(desc, list, node) { | ||
2210 | dma_async_tx_callback callback; | ||
2211 | void *param; | ||
2212 | |||
2213 | /* All desc in a list belong to same channel */ | ||
2214 | pch = desc->pchan; | ||
2215 | callback = desc->txd.callback; | ||
2216 | param = desc->txd.callback_param; | ||
2217 | |||
2218 | if (callback) | ||
2219 | callback(param); | ||
2220 | |||
2221 | desc->pchan = NULL; | ||
2222 | } | ||
2223 | |||
2224 | /* pch will be unset if list was empty */ | ||
2225 | if (!pch) | ||
2226 | return; | ||
2227 | |||
2228 | pdmac = pch->dmac; | ||
2229 | |||
2230 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
2231 | list_splice_tail_init(list, &pdmac->desc_pool); | ||
2232 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | ||
2233 | } | ||
2234 | |||
2235 | static inline void handle_cyclic_desc_list(struct list_head *list) | ||
2236 | { | ||
2237 | struct dma_pl330_desc *desc; | ||
2238 | struct dma_pl330_chan *pch = NULL; | ||
2239 | unsigned long flags; | ||
2240 | |||
2241 | list_for_each_entry(desc, list, node) { | ||
2242 | dma_async_tx_callback callback; | ||
2243 | |||
2244 | /* Change status to reload it */ | ||
2245 | desc->status = PREP; | ||
2246 | pch = desc->pchan; | ||
2247 | callback = desc->txd.callback; | ||
2248 | if (callback) | ||
2249 | callback(desc->txd.callback_param); | ||
2250 | } | ||
2251 | |||
2252 | /* pch will be unset if list was empty */ | ||
2253 | if (!pch) | ||
2254 | return; | ||
2255 | |||
2256 | spin_lock_irqsave(&pch->lock, flags); | ||
2257 | list_splice_tail_init(list, &pch->work_list); | ||
2258 | spin_unlock_irqrestore(&pch->lock, flags); | ||
2259 | } | ||
2260 | |||
2261 | static inline void fill_queue(struct dma_pl330_chan *pch) | 2203 | static inline void fill_queue(struct dma_pl330_chan *pch) |
2262 | { | 2204 | { |
2263 | struct dma_pl330_desc *desc; | 2205 | struct dma_pl330_desc *desc; |
@@ -2291,7 +2233,6 @@ static void pl330_tasklet(unsigned long data) | |||
2291 | struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; | 2233 | struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; |
2292 | struct dma_pl330_desc *desc, *_dt; | 2234 | struct dma_pl330_desc *desc, *_dt; |
2293 | unsigned long flags; | 2235 | unsigned long flags; |
2294 | LIST_HEAD(list); | ||
2295 | 2236 | ||
2296 | spin_lock_irqsave(&pch->lock, flags); | 2237 | spin_lock_irqsave(&pch->lock, flags); |
2297 | 2238 | ||
@@ -2300,7 +2241,7 @@ static void pl330_tasklet(unsigned long data) | |||
2300 | if (desc->status == DONE) { | 2241 | if (desc->status == DONE) { |
2301 | if (!pch->cyclic) | 2242 | if (!pch->cyclic) |
2302 | dma_cookie_complete(&desc->txd); | 2243 | dma_cookie_complete(&desc->txd); |
2303 | list_move_tail(&desc->node, &list); | 2244 | list_move_tail(&desc->node, &pch->completed_list); |
2304 | } | 2245 | } |
2305 | 2246 | ||
2306 | /* Try to submit a req imm. next to the last completed cookie */ | 2247 | /* Try to submit a req imm. next to the last completed cookie */ |
@@ -2309,12 +2250,31 @@ static void pl330_tasklet(unsigned long data) | |||
2309 | /* Make sure the PL330 Channel thread is active */ | 2250 | /* Make sure the PL330 Channel thread is active */ |
2310 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); | 2251 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); |
2311 | 2252 | ||
2312 | spin_unlock_irqrestore(&pch->lock, flags); | 2253 | while (!list_empty(&pch->completed_list)) { |
2254 | dma_async_tx_callback callback; | ||
2255 | void *callback_param; | ||
2313 | 2256 | ||
2314 | if (pch->cyclic) | 2257 | desc = list_first_entry(&pch->completed_list, |
2315 | handle_cyclic_desc_list(&list); | 2258 | struct dma_pl330_desc, node); |
2316 | else | 2259 | |
2317 | free_desc_list(&list); | 2260 | callback = desc->txd.callback; |
2261 | callback_param = desc->txd.callback_param; | ||
2262 | |||
2263 | if (pch->cyclic) { | ||
2264 | desc->status = PREP; | ||
2265 | list_move_tail(&desc->node, &pch->work_list); | ||
2266 | } else { | ||
2267 | desc->status = FREE; | ||
2268 | list_move_tail(&desc->node, &pch->dmac->desc_pool); | ||
2269 | } | ||
2270 | |||
2271 | if (callback) { | ||
2272 | spin_unlock_irqrestore(&pch->lock, flags); | ||
2273 | callback(callback_param); | ||
2274 | spin_lock_irqsave(&pch->lock, flags); | ||
2275 | } | ||
2276 | } | ||
2277 | spin_unlock_irqrestore(&pch->lock, flags); | ||
2318 | } | 2278 | } |
2319 | 2279 | ||
2320 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | 2280 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) |
@@ -2409,7 +2369,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
2409 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) | 2369 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) |
2410 | { | 2370 | { |
2411 | struct dma_pl330_chan *pch = to_pchan(chan); | 2371 | struct dma_pl330_chan *pch = to_pchan(chan); |
2412 | struct dma_pl330_desc *desc, *_dt; | 2372 | struct dma_pl330_desc *desc; |
2413 | unsigned long flags; | 2373 | unsigned long flags; |
2414 | struct dma_pl330_dmac *pdmac = pch->dmac; | 2374 | struct dma_pl330_dmac *pdmac = pch->dmac; |
2415 | struct dma_slave_config *slave_config; | 2375 | struct dma_slave_config *slave_config; |
@@ -2423,12 +2383,18 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
2423 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); | 2383 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); |
2424 | 2384 | ||
2425 | /* Mark all desc done */ | 2385 | /* Mark all desc done */ |
2426 | list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { | 2386 | list_for_each_entry(desc, &pch->work_list , node) { |
2427 | desc->status = DONE; | 2387 | desc->status = FREE; |
2428 | list_move_tail(&desc->node, &list); | 2388 | dma_cookie_complete(&desc->txd); |
2429 | } | 2389 | } |
2430 | 2390 | ||
2431 | list_splice_tail_init(&list, &pdmac->desc_pool); | 2391 | list_for_each_entry(desc, &pch->completed_list , node) { |
2392 | desc->status = FREE; | ||
2393 | dma_cookie_complete(&desc->txd); | ||
2394 | } | ||
2395 | |||
2396 | list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); | ||
2397 | list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); | ||
2432 | spin_unlock_irqrestore(&pch->lock, flags); | 2398 | spin_unlock_irqrestore(&pch->lock, flags); |
2433 | break; | 2399 | break; |
2434 | case DMA_SLAVE_CONFIG: | 2400 | case DMA_SLAVE_CONFIG: |
@@ -2814,6 +2780,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
2814 | return &desc->txd; | 2780 | return &desc->txd; |
2815 | } | 2781 | } |
2816 | 2782 | ||
2783 | static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac, | ||
2784 | struct dma_pl330_desc *first) | ||
2785 | { | ||
2786 | unsigned long flags; | ||
2787 | struct dma_pl330_desc *desc; | ||
2788 | |||
2789 | if (!first) | ||
2790 | return; | ||
2791 | |||
2792 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
2793 | |||
2794 | while (!list_empty(&first->node)) { | ||
2795 | desc = list_entry(first->node.next, | ||
2796 | struct dma_pl330_desc, node); | ||
2797 | list_move_tail(&desc->node, &pdmac->desc_pool); | ||
2798 | } | ||
2799 | |||
2800 | list_move_tail(&first->node, &pdmac->desc_pool); | ||
2801 | |||
2802 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | ||
2803 | } | ||
2804 | |||
2817 | static struct dma_async_tx_descriptor * | 2805 | static struct dma_async_tx_descriptor * |
2818 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 2806 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
2819 | unsigned int sg_len, enum dma_transfer_direction direction, | 2807 | unsigned int sg_len, enum dma_transfer_direction direction, |
@@ -2822,7 +2810,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
2822 | struct dma_pl330_desc *first, *desc = NULL; | 2810 | struct dma_pl330_desc *first, *desc = NULL; |
2823 | struct dma_pl330_chan *pch = to_pchan(chan); | 2811 | struct dma_pl330_chan *pch = to_pchan(chan); |
2824 | struct scatterlist *sg; | 2812 | struct scatterlist *sg; |
2825 | unsigned long flags; | ||
2826 | int i; | 2813 | int i; |
2827 | dma_addr_t addr; | 2814 | dma_addr_t addr; |
2828 | 2815 | ||
@@ -2842,20 +2829,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
2842 | dev_err(pch->dmac->pif.dev, | 2829 | dev_err(pch->dmac->pif.dev, |
2843 | "%s:%d Unable to fetch desc\n", | 2830 | "%s:%d Unable to fetch desc\n", |
2844 | __func__, __LINE__); | 2831 | __func__, __LINE__); |
2845 | if (!first) | 2832 | __pl330_giveback_desc(pdmac, first); |
2846 | return NULL; | ||
2847 | |||
2848 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
2849 | |||
2850 | while (!list_empty(&first->node)) { | ||
2851 | desc = list_entry(first->node.next, | ||
2852 | struct dma_pl330_desc, node); | ||
2853 | list_move_tail(&desc->node, &pdmac->desc_pool); | ||
2854 | } | ||
2855 | |||
2856 | list_move_tail(&first->node, &pdmac->desc_pool); | ||
2857 | |||
2858 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | ||
2859 | 2833 | ||
2860 | return NULL; | 2834 | return NULL; |
2861 | } | 2835 | } |
@@ -2896,6 +2870,25 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) | |||
2896 | return IRQ_NONE; | 2870 | return IRQ_NONE; |
2897 | } | 2871 | } |
2898 | 2872 | ||
2873 | #define PL330_DMA_BUSWIDTHS \ | ||
2874 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | ||
2875 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
2876 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
2877 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | ||
2878 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
2879 | |||
2880 | static int pl330_dma_device_slave_caps(struct dma_chan *dchan, | ||
2881 | struct dma_slave_caps *caps) | ||
2882 | { | ||
2883 | caps->src_addr_widths = PL330_DMA_BUSWIDTHS; | ||
2884 | caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS; | ||
2885 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
2886 | caps->cmd_pause = false; | ||
2887 | caps->cmd_terminate = true; | ||
2888 | |||
2889 | return 0; | ||
2890 | } | ||
2891 | |||
2899 | static int | 2892 | static int |
2900 | pl330_probe(struct amba_device *adev, const struct amba_id *id) | 2893 | pl330_probe(struct amba_device *adev, const struct amba_id *id) |
2901 | { | 2894 | { |
@@ -2908,7 +2901,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2908 | int i, ret, irq; | 2901 | int i, ret, irq; |
2909 | int num_chan; | 2902 | int num_chan; |
2910 | 2903 | ||
2911 | pdat = adev->dev.platform_data; | 2904 | pdat = dev_get_platdata(&adev->dev); |
2912 | 2905 | ||
2913 | /* Allocate a new DMAC and its Channels */ | 2906 | /* Allocate a new DMAC and its Channels */ |
2914 | pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); | 2907 | pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); |
@@ -2971,6 +2964,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2971 | pch->chan.private = adev->dev.of_node; | 2964 | pch->chan.private = adev->dev.of_node; |
2972 | 2965 | ||
2973 | INIT_LIST_HEAD(&pch->work_list); | 2966 | INIT_LIST_HEAD(&pch->work_list); |
2967 | INIT_LIST_HEAD(&pch->completed_list); | ||
2974 | spin_lock_init(&pch->lock); | 2968 | spin_lock_init(&pch->lock); |
2975 | pch->pl330_chid = NULL; | 2969 | pch->pl330_chid = NULL; |
2976 | pch->chan.device = pd; | 2970 | pch->chan.device = pd; |
@@ -3000,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
3000 | pd->device_prep_slave_sg = pl330_prep_slave_sg; | 2994 | pd->device_prep_slave_sg = pl330_prep_slave_sg; |
3001 | pd->device_control = pl330_control; | 2995 | pd->device_control = pl330_control; |
3002 | pd->device_issue_pending = pl330_issue_pending; | 2996 | pd->device_issue_pending = pl330_issue_pending; |
2997 | pd->device_slave_caps = pl330_dma_device_slave_caps; | ||
3003 | 2998 | ||
3004 | ret = dma_async_device_register(pd); | 2999 | ret = dma_async_device_register(pd); |
3005 | if (ret) { | 3000 | if (ret) { |
@@ -3015,6 +3010,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
3015 | "unable to register DMA to the generic DT DMA helpers\n"); | 3010 | "unable to register DMA to the generic DT DMA helpers\n"); |
3016 | } | 3011 | } |
3017 | } | 3012 | } |
3013 | /* | ||
3014 | * This is the limit for transfers with a buswidth of 1, larger | ||
3015 | * buswidths will have larger limits. | ||
3016 | */ | ||
3017 | ret = dma_set_max_seg_size(&adev->dev, 1900800); | ||
3018 | if (ret) | ||
3019 | dev_err(&adev->dev, "unable to set the seg size\n"); | ||
3020 | |||
3018 | 3021 | ||
3019 | dev_info(&adev->dev, | 3022 | dev_info(&adev->dev, |
3020 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); | 3023 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 5c1dee20c13e..dadd9e010c0b 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -22,3 +22,13 @@ config SUDMAC | |||
22 | depends on SH_DMAE_BASE | 22 | depends on SH_DMAE_BASE |
23 | help | 23 | help |
24 | Enable support for the Renesas SUDMAC controllers. | 24 | Enable support for the Renesas SUDMAC controllers. |
25 | |||
26 | config RCAR_HPB_DMAE | ||
27 | tristate "Renesas R-Car HPB DMAC support" | ||
28 | depends on SH_DMAE_BASE | ||
29 | help | ||
30 | Enable support for the Renesas R-Car series DMA controllers. | ||
31 | |||
32 | config SHDMA_R8A73A4 | ||
33 | def_bool y | ||
34 | depends on ARCH_R8A73A4 && SH_DMAE != n | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index c962138dde96..e856af23b789 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -1,3 +1,9 @@ | |||
1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o | 1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o |
2 | obj-$(CONFIG_SH_DMAE) += shdma.o | 2 | obj-$(CONFIG_SH_DMAE) += shdma.o |
3 | shdma-y := shdmac.o | ||
4 | ifeq ($(CONFIG_OF),y) | ||
5 | shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o | ||
6 | endif | ||
7 | shdma-objs := $(shdma-y) | ||
3 | obj-$(CONFIG_SUDMAC) += sudmac.o | 8 | obj-$(CONFIG_SUDMAC) += sudmac.o |
9 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o | ||
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c new file mode 100644 index 000000000000..45a520281ce1 --- /dev/null +++ b/drivers/dma/sh/rcar-hpbdma.c | |||
@@ -0,0 +1,655 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011-2013 Renesas Electronics Corporation | ||
3 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
4 | * | ||
5 | * This file is based on the drivers/dma/sh/shdma.c | ||
6 | * | ||
7 | * Renesas SuperH DMA Engine support | ||
8 | * | ||
9 | * This is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
15 | * - max DMA size is 16MB. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/dmaengine.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_data/dma-rcar-hpbdma.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/shdma-base.h> | ||
28 | #include <linux/slab.h> | ||
29 | |||
30 | /* DMA channel registers */ | ||
31 | #define HPB_DMAE_DSAR0 0x00 | ||
32 | #define HPB_DMAE_DDAR0 0x04 | ||
33 | #define HPB_DMAE_DTCR0 0x08 | ||
34 | #define HPB_DMAE_DSAR1 0x0C | ||
35 | #define HPB_DMAE_DDAR1 0x10 | ||
36 | #define HPB_DMAE_DTCR1 0x14 | ||
37 | #define HPB_DMAE_DSASR 0x18 | ||
38 | #define HPB_DMAE_DDASR 0x1C | ||
39 | #define HPB_DMAE_DTCSR 0x20 | ||
40 | #define HPB_DMAE_DPTR 0x24 | ||
41 | #define HPB_DMAE_DCR 0x28 | ||
42 | #define HPB_DMAE_DCMDR 0x2C | ||
43 | #define HPB_DMAE_DSTPR 0x30 | ||
44 | #define HPB_DMAE_DSTSR 0x34 | ||
45 | #define HPB_DMAE_DDBGR 0x38 | ||
46 | #define HPB_DMAE_DDBGR2 0x3C | ||
47 | #define HPB_DMAE_CHAN(n) (0x40 * (n)) | ||
48 | |||
49 | /* DMA command register (DCMDR) bits */ | ||
50 | #define HPB_DMAE_DCMDR_BDOUT BIT(7) | ||
51 | #define HPB_DMAE_DCMDR_DQSPD BIT(6) | ||
52 | #define HPB_DMAE_DCMDR_DQSPC BIT(5) | ||
53 | #define HPB_DMAE_DCMDR_DMSPD BIT(4) | ||
54 | #define HPB_DMAE_DCMDR_DMSPC BIT(3) | ||
55 | #define HPB_DMAE_DCMDR_DQEND BIT(2) | ||
56 | #define HPB_DMAE_DCMDR_DNXT BIT(1) | ||
57 | #define HPB_DMAE_DCMDR_DMEN BIT(0) | ||
58 | |||
59 | /* DMA forced stop register (DSTPR) bits */ | ||
60 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) | ||
61 | |||
62 | /* DMA status register (DSTSR) bits */ | ||
63 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) | ||
64 | |||
65 | /* DMA common registers */ | ||
66 | #define HPB_DMAE_DTIMR 0x00 | ||
67 | #define HPB_DMAE_DINTSR0 0x0C | ||
68 | #define HPB_DMAE_DINTSR1 0x10 | ||
69 | #define HPB_DMAE_DINTCR0 0x14 | ||
70 | #define HPB_DMAE_DINTCR1 0x18 | ||
71 | #define HPB_DMAE_DINTMR0 0x1C | ||
72 | #define HPB_DMAE_DINTMR1 0x20 | ||
73 | #define HPB_DMAE_DACTSR0 0x24 | ||
74 | #define HPB_DMAE_DACTSR1 0x28 | ||
75 | #define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4) | ||
76 | #define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4) | ||
77 | #define HPB_DMAE_HPB_DMLVLR0 0x160 | ||
78 | #define HPB_DMAE_HPB_DMLVLR1 0x164 | ||
79 | #define HPB_DMAE_HPB_DMSHPT0 0x168 | ||
80 | #define HPB_DMAE_HPB_DMSHPT1 0x16C | ||
81 | |||
82 | #define HPB_DMA_SLAVE_NUMBER 256 | ||
83 | #define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */ | ||
84 | |||
85 | struct hpb_dmae_chan { | ||
86 | struct shdma_chan shdma_chan; | ||
87 | int xfer_mode; /* DMA transfer mode */ | ||
88 | #define XFER_SINGLE 1 | ||
89 | #define XFER_DOUBLE 2 | ||
90 | unsigned plane_idx; /* current DMA information set */ | ||
91 | bool first_desc; /* first/next transfer */ | ||
92 | int xmit_shift; /* log_2(bytes_per_xfer) */ | ||
93 | void __iomem *base; | ||
94 | const struct hpb_dmae_slave_config *cfg; | ||
95 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
96 | }; | ||
97 | |||
98 | struct hpb_dmae_device { | ||
99 | struct shdma_dev shdma_dev; | ||
100 | spinlock_t reg_lock; /* comm_reg operation lock */ | ||
101 | struct hpb_dmae_pdata *pdata; | ||
102 | void __iomem *chan_reg; | ||
103 | void __iomem *comm_reg; | ||
104 | void __iomem *reset_reg; | ||
105 | void __iomem *mode_reg; | ||
106 | }; | ||
107 | |||
108 | struct hpb_dmae_regs { | ||
109 | u32 sar; /* SAR / source address */ | ||
110 | u32 dar; /* DAR / destination address */ | ||
111 | u32 tcr; /* TCR / transfer count */ | ||
112 | }; | ||
113 | |||
114 | struct hpb_desc { | ||
115 | struct shdma_desc shdma_desc; | ||
116 | struct hpb_dmae_regs hw; | ||
117 | unsigned plane_idx; | ||
118 | }; | ||
119 | |||
120 | #define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan) | ||
121 | #define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc) | ||
122 | #define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \ | ||
123 | struct hpb_dmae_device, shdma_dev.dma_dev) | ||
124 | |||
125 | static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg) | ||
126 | { | ||
127 | iowrite32(data, hpb_dc->base + reg); | ||
128 | } | ||
129 | |||
130 | static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg) | ||
131 | { | ||
132 | return ioread32(hpb_dc->base + reg); | ||
133 | } | ||
134 | |||
135 | static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
136 | { | ||
137 | iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR); | ||
138 | } | ||
139 | |||
140 | static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
141 | { | ||
142 | iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch)); | ||
143 | } | ||
144 | |||
145 | static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch) | ||
146 | { | ||
147 | u32 v; | ||
148 | |||
149 | if (ch < 32) | ||
150 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch; | ||
151 | else | ||
152 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32); | ||
153 | return v & 0x1; | ||
154 | } | ||
155 | |||
156 | static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
157 | { | ||
158 | if (ch < 32) | ||
159 | iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0); | ||
160 | else | ||
161 | iowrite32((0x1 << (ch - 32)), | ||
162 | hpbdev->comm_reg + HPB_DMAE_DINTCR1); | ||
163 | } | ||
164 | |||
165 | static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
166 | { | ||
167 | iowrite32(data, hpbdev->mode_reg); | ||
168 | } | ||
169 | |||
170 | static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev) | ||
171 | { | ||
172 | return ioread32(hpbdev->mode_reg); | ||
173 | } | ||
174 | |||
175 | static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch) | ||
176 | { | ||
177 | u32 intreg; | ||
178 | |||
179 | spin_lock_irq(&hpbdev->reg_lock); | ||
180 | if (ch < 32) { | ||
181 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
182 | iowrite32(BIT(ch) | intreg, | ||
183 | hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
184 | } else { | ||
185 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
186 | iowrite32(BIT(ch - 32) | intreg, | ||
187 | hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
188 | } | ||
189 | spin_unlock_irq(&hpbdev->reg_lock); | ||
190 | } | ||
191 | |||
192 | static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data) | ||
193 | { | ||
194 | u32 rstr; | ||
195 | int timeout = 10000; /* 100 ms */ | ||
196 | |||
197 | spin_lock(&hpbdev->reg_lock); | ||
198 | rstr = ioread32(hpbdev->reset_reg); | ||
199 | rstr |= data; | ||
200 | iowrite32(rstr, hpbdev->reset_reg); | ||
201 | do { | ||
202 | rstr = ioread32(hpbdev->reset_reg); | ||
203 | if ((rstr & data) == data) | ||
204 | break; | ||
205 | udelay(10); | ||
206 | } while (timeout--); | ||
207 | |||
208 | if (timeout < 0) | ||
209 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
210 | "%s timeout\n", __func__); | ||
211 | |||
212 | rstr &= ~data; | ||
213 | iowrite32(rstr, hpbdev->reset_reg); | ||
214 | spin_unlock(&hpbdev->reg_lock); | ||
215 | } | ||
216 | |||
217 | static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev, | ||
218 | u32 mask, u32 data) | ||
219 | { | ||
220 | u32 mode; | ||
221 | |||
222 | spin_lock_irq(&hpbdev->reg_lock); | ||
223 | mode = asyncmdr_read(hpbdev); | ||
224 | mode &= ~mask; | ||
225 | mode |= data; | ||
226 | asyncmdr_write(hpbdev, mode); | ||
227 | spin_unlock_irq(&hpbdev->reg_lock); | ||
228 | } | ||
229 | |||
230 | static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev) | ||
231 | { | ||
232 | dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD); | ||
233 | } | ||
234 | |||
235 | static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev) | ||
236 | { | ||
237 | u32 ch; | ||
238 | |||
239 | for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++) | ||
240 | hsrstr_write(hpbdev, ch); | ||
241 | } | ||
242 | |||
243 | static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan) | ||
244 | { | ||
245 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
246 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
247 | int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR); | ||
248 | int i; | ||
249 | |||
250 | switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) { | ||
251 | case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT: | ||
252 | default: | ||
253 | i = XMIT_SZ_8BIT; | ||
254 | break; | ||
255 | case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT: | ||
256 | i = XMIT_SZ_16BIT; | ||
257 | break; | ||
258 | case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT: | ||
259 | i = XMIT_SZ_32BIT; | ||
260 | break; | ||
261 | } | ||
262 | return pdata->ts_shift[i]; | ||
263 | } | ||
264 | |||
265 | static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan, | ||
266 | struct hpb_dmae_regs *hw, unsigned plane) | ||
267 | { | ||
268 | ch_reg_write(hpb_chan, hw->sar, | ||
269 | plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0); | ||
270 | ch_reg_write(hpb_chan, hw->dar, | ||
271 | plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0); | ||
272 | ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift, | ||
273 | plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
274 | } | ||
275 | |||
276 | static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next) | ||
277 | { | ||
278 | ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) | | ||
279 | HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR); | ||
280 | } | ||
281 | |||
282 | static void hpb_dmae_halt(struct shdma_chan *schan) | ||
283 | { | ||
284 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
285 | |||
286 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); | ||
287 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); | ||
288 | } | ||
289 | |||
290 | static const struct hpb_dmae_slave_config * | ||
291 | hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id) | ||
292 | { | ||
293 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
294 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
295 | int i; | ||
296 | |||
297 | if (slave_id >= HPB_DMA_SLAVE_NUMBER) | ||
298 | return NULL; | ||
299 | |||
300 | for (i = 0; i < pdata->num_slaves; i++) | ||
301 | if (pdata->slaves[i].id == slave_id) | ||
302 | return pdata->slaves + i; | ||
303 | |||
304 | return NULL; | ||
305 | } | ||
306 | |||
307 | static void hpb_dmae_start_xfer(struct shdma_chan *schan, | ||
308 | struct shdma_desc *sdesc) | ||
309 | { | ||
310 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
311 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
312 | struct hpb_desc *desc = to_desc(sdesc); | ||
313 | |||
314 | if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET) | ||
315 | hpb_dmae_async_reset(hpbdev, chan->cfg->rstr); | ||
316 | |||
317 | desc->plane_idx = chan->plane_idx; | ||
318 | hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx); | ||
319 | hpb_dmae_start(chan, !chan->first_desc); | ||
320 | |||
321 | if (chan->xfer_mode == XFER_DOUBLE) { | ||
322 | chan->plane_idx ^= 1; | ||
323 | chan->first_desc = false; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | static bool hpb_dmae_desc_completed(struct shdma_chan *schan, | ||
328 | struct shdma_desc *sdesc) | ||
329 | { | ||
330 | /* | ||
331 | * This is correct since we always have at most single | ||
332 | * outstanding DMA transfer per channel, and by the time | ||
333 | * we get completion interrupt the transfer is completed. | ||
334 | * This will change if we ever use alternating DMA | ||
335 | * information sets and submit two descriptors at once. | ||
336 | */ | ||
337 | return true; | ||
338 | } | ||
339 | |||
340 | static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq) | ||
341 | { | ||
342 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
343 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
344 | int ch = chan->cfg->dma_ch; | ||
345 | |||
346 | /* Check Complete DMA Transfer */ | ||
347 | if (dintsr_read(hpbdev, ch)) { | ||
348 | /* Clear Interrupt status */ | ||
349 | dintcr_write(hpbdev, ch); | ||
350 | return true; | ||
351 | } | ||
352 | return false; | ||
353 | } | ||
354 | |||
355 | static int hpb_dmae_desc_setup(struct shdma_chan *schan, | ||
356 | struct shdma_desc *sdesc, | ||
357 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
358 | { | ||
359 | struct hpb_desc *desc = to_desc(sdesc); | ||
360 | |||
361 | if (*len > (size_t)HPB_DMA_TCR_MAX) | ||
362 | *len = (size_t)HPB_DMA_TCR_MAX; | ||
363 | |||
364 | desc->hw.sar = src; | ||
365 | desc->hw.dar = dst; | ||
366 | desc->hw.tcr = *len; | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static size_t hpb_dmae_get_partial(struct shdma_chan *schan, | ||
372 | struct shdma_desc *sdesc) | ||
373 | { | ||
374 | struct hpb_desc *desc = to_desc(sdesc); | ||
375 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
376 | u32 tcr = ch_reg_read(chan, desc->plane_idx ? | ||
377 | HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
378 | |||
379 | return (desc->hw.tcr - tcr) << chan->xmit_shift; | ||
380 | } | ||
381 | |||
382 | static bool hpb_dmae_channel_busy(struct shdma_chan *schan) | ||
383 | { | ||
384 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
385 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); | ||
386 | |||
387 | return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; | ||
388 | } | ||
389 | |||
390 | static int | ||
391 | hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan, | ||
392 | const struct hpb_dmae_slave_config *cfg) | ||
393 | { | ||
394 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
395 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
396 | const struct hpb_dmae_channel *channel = pdata->channels; | ||
397 | int slave_id = cfg->id; | ||
398 | int i, err; | ||
399 | |||
400 | for (i = 0; i < pdata->num_channels; i++, channel++) { | ||
401 | if (channel->s_id == slave_id) { | ||
402 | struct device *dev = hpb_chan->shdma_chan.dev; | ||
403 | |||
404 | hpb_chan->base = hpbdev->chan_reg + | ||
405 | HPB_DMAE_CHAN(cfg->dma_ch); | ||
406 | |||
407 | dev_dbg(dev, "Detected Slave device\n"); | ||
408 | dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id); | ||
409 | dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch); | ||
410 | dev_dbg(dev, " -- channel->ch_irq: %d\n", | ||
411 | channel->ch_irq); | ||
412 | break; | ||
413 | } | ||
414 | } | ||
415 | |||
416 | err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq, | ||
417 | IRQF_SHARED, hpb_chan->dev_id); | ||
418 | if (err) { | ||
419 | dev_err(hpb_chan->shdma_chan.dev, | ||
420 | "DMA channel request_irq %d failed with error %d\n", | ||
421 | channel->ch_irq, err); | ||
422 | return err; | ||
423 | } | ||
424 | |||
425 | hpb_chan->plane_idx = 0; | ||
426 | hpb_chan->first_desc = true; | ||
427 | |||
428 | if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) { | ||
429 | hpb_chan->xfer_mode = XFER_SINGLE; | ||
430 | } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == | ||
431 | (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) { | ||
432 | hpb_chan->xfer_mode = XFER_DOUBLE; | ||
433 | } else { | ||
434 | dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); | ||
435 | shdma_free_irq(&hpb_chan->shdma_chan); | ||
436 | return -EINVAL; | ||
437 | } | ||
438 | |||
439 | if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE) | ||
440 | hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr); | ||
441 | ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR); | ||
442 | ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR); | ||
443 | hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan); | ||
444 | hpb_dmae_enable_int(hpbdev, cfg->dma_ch); | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) | ||
450 | { | ||
451 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
452 | const struct hpb_dmae_slave_config *sc = | ||
453 | hpb_dmae_find_slave(chan, slave_id); | ||
454 | |||
455 | if (!sc) | ||
456 | return -ENODEV; | ||
457 | if (try) | ||
458 | return 0; | ||
459 | chan->cfg = sc; | ||
460 | return hpb_dmae_alloc_chan_resources(chan, sc); | ||
461 | } | ||
462 | |||
463 | static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) | ||
464 | { | ||
465 | } | ||
466 | |||
467 | static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan) | ||
468 | { | ||
469 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
470 | |||
471 | return chan->cfg->addr; | ||
472 | } | ||
473 | |||
474 | static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) | ||
475 | { | ||
476 | return &((struct hpb_desc *)buf)[i].shdma_desc; | ||
477 | } | ||
478 | |||
479 | static const struct shdma_ops hpb_dmae_ops = { | ||
480 | .desc_completed = hpb_dmae_desc_completed, | ||
481 | .halt_channel = hpb_dmae_halt, | ||
482 | .channel_busy = hpb_dmae_channel_busy, | ||
483 | .slave_addr = hpb_dmae_slave_addr, | ||
484 | .desc_setup = hpb_dmae_desc_setup, | ||
485 | .set_slave = hpb_dmae_set_slave, | ||
486 | .setup_xfer = hpb_dmae_setup_xfer, | ||
487 | .start_xfer = hpb_dmae_start_xfer, | ||
488 | .embedded_desc = hpb_dmae_embedded_desc, | ||
489 | .chan_irq = hpb_dmae_chan_irq, | ||
490 | .get_partial = hpb_dmae_get_partial, | ||
491 | }; | ||
492 | |||
493 | static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) | ||
494 | { | ||
495 | struct shdma_dev *sdev = &hpbdev->shdma_dev; | ||
496 | struct platform_device *pdev = | ||
497 | to_platform_device(hpbdev->shdma_dev.dma_dev.dev); | ||
498 | struct hpb_dmae_chan *new_hpb_chan; | ||
499 | struct shdma_chan *schan; | ||
500 | |||
501 | /* Alloc channel */ | ||
502 | new_hpb_chan = devm_kzalloc(&pdev->dev, | ||
503 | sizeof(struct hpb_dmae_chan), GFP_KERNEL); | ||
504 | if (!new_hpb_chan) { | ||
505 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
506 | "No free memory for allocating DMA channels!\n"); | ||
507 | return -ENOMEM; | ||
508 | } | ||
509 | |||
510 | schan = &new_hpb_chan->shdma_chan; | ||
511 | shdma_chan_probe(sdev, schan, id); | ||
512 | |||
513 | if (pdev->id >= 0) | ||
514 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
515 | "hpb-dmae%d.%d", pdev->id, id); | ||
516 | else | ||
517 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
518 | "hpb-dma.%d", id); | ||
519 | |||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | static int hpb_dmae_probe(struct platform_device *pdev) | ||
524 | { | ||
525 | struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; | ||
526 | struct hpb_dmae_device *hpbdev; | ||
527 | struct dma_device *dma_dev; | ||
528 | struct resource *chan, *comm, *rest, *mode, *irq_res; | ||
529 | int err, i; | ||
530 | |||
531 | /* Get platform data */ | ||
532 | if (!pdata || !pdata->num_channels) | ||
533 | return -ENODEV; | ||
534 | |||
535 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
536 | comm = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
537 | rest = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
538 | mode = platform_get_resource(pdev, IORESOURCE_MEM, 3); | ||
539 | |||
540 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
541 | if (!irq_res) | ||
542 | return -ENODEV; | ||
543 | |||
544 | hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device), | ||
545 | GFP_KERNEL); | ||
546 | if (!hpbdev) { | ||
547 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
548 | return -ENOMEM; | ||
549 | } | ||
550 | |||
551 | hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); | ||
552 | if (IS_ERR(hpbdev->chan_reg)) | ||
553 | return PTR_ERR(hpbdev->chan_reg); | ||
554 | |||
555 | hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm); | ||
556 | if (IS_ERR(hpbdev->comm_reg)) | ||
557 | return PTR_ERR(hpbdev->comm_reg); | ||
558 | |||
559 | hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest); | ||
560 | if (IS_ERR(hpbdev->reset_reg)) | ||
561 | return PTR_ERR(hpbdev->reset_reg); | ||
562 | |||
563 | hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode); | ||
564 | if (IS_ERR(hpbdev->mode_reg)) | ||
565 | return PTR_ERR(hpbdev->mode_reg); | ||
566 | |||
567 | dma_dev = &hpbdev->shdma_dev.dma_dev; | ||
568 | |||
569 | spin_lock_init(&hpbdev->reg_lock); | ||
570 | |||
571 | /* Platform data */ | ||
572 | hpbdev->pdata = pdata; | ||
573 | |||
574 | pm_runtime_enable(&pdev->dev); | ||
575 | err = pm_runtime_get_sync(&pdev->dev); | ||
576 | if (err < 0) | ||
577 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
578 | |||
579 | /* Reset DMA controller */ | ||
580 | hpb_dmae_reset(hpbdev); | ||
581 | |||
582 | pm_runtime_put(&pdev->dev); | ||
583 | |||
584 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
585 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
586 | |||
587 | hpbdev->shdma_dev.ops = &hpb_dmae_ops; | ||
588 | hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); | ||
589 | err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels); | ||
590 | if (err < 0) | ||
591 | goto error; | ||
592 | |||
593 | /* Create DMA channels */ | ||
594 | for (i = 0; i < pdata->num_channels; i++) | ||
595 | hpb_dmae_chan_probe(hpbdev, i); | ||
596 | |||
597 | platform_set_drvdata(pdev, hpbdev); | ||
598 | err = dma_async_device_register(dma_dev); | ||
599 | if (!err) | ||
600 | return 0; | ||
601 | |||
602 | shdma_cleanup(&hpbdev->shdma_dev); | ||
603 | error: | ||
604 | pm_runtime_disable(&pdev->dev); | ||
605 | return err; | ||
606 | } | ||
607 | |||
608 | static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) | ||
609 | { | ||
610 | struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev; | ||
611 | struct shdma_chan *schan; | ||
612 | int i; | ||
613 | |||
614 | shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { | ||
615 | BUG_ON(!schan); | ||
616 | |||
617 | shdma_free_irq(schan); | ||
618 | shdma_chan_remove(schan); | ||
619 | } | ||
620 | dma_dev->chancnt = 0; | ||
621 | } | ||
622 | |||
623 | static int hpb_dmae_remove(struct platform_device *pdev) | ||
624 | { | ||
625 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
626 | |||
627 | dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev); | ||
628 | |||
629 | pm_runtime_disable(&pdev->dev); | ||
630 | |||
631 | hpb_dmae_chan_remove(hpbdev); | ||
632 | |||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static void hpb_dmae_shutdown(struct platform_device *pdev) | ||
637 | { | ||
638 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
639 | hpb_dmae_ctl_stop(hpbdev); | ||
640 | } | ||
641 | |||
642 | static struct platform_driver hpb_dmae_driver = { | ||
643 | .probe = hpb_dmae_probe, | ||
644 | .remove = hpb_dmae_remove, | ||
645 | .shutdown = hpb_dmae_shutdown, | ||
646 | .driver = { | ||
647 | .owner = THIS_MODULE, | ||
648 | .name = "hpb-dma-engine", | ||
649 | }, | ||
650 | }; | ||
651 | module_platform_driver(hpb_dmae_driver); | ||
652 | |||
653 | MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>"); | ||
654 | MODULE_DESCRIPTION("Renesas HPB DMA Engine driver"); | ||
655 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h new file mode 100644 index 000000000000..a2b8258426c9 --- /dev/null +++ b/drivers/dma/sh/shdma-arm.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2013 Renesas Electronics, Inc. | ||
5 | * | ||
6 | * This is free software; you can redistribute it and/or modify it under the | ||
7 | * terms of version 2 the GNU General Public License as published by the Free | ||
8 | * Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef SHDMA_ARM_H | ||
12 | #define SHDMA_ARM_H | ||
13 | |||
14 | #include "shdma.h" | ||
15 | |||
16 | /* Transmit sizes and respective CHCR register values */ | ||
17 | enum { | ||
18 | XMIT_SZ_8BIT = 0, | ||
19 | XMIT_SZ_16BIT = 1, | ||
20 | XMIT_SZ_32BIT = 2, | ||
21 | XMIT_SZ_64BIT = 7, | ||
22 | XMIT_SZ_128BIT = 3, | ||
23 | XMIT_SZ_256BIT = 4, | ||
24 | XMIT_SZ_512BIT = 5, | ||
25 | }; | ||
26 | |||
27 | /* log2(size / 8) - used to calculate number of transfers */ | ||
28 | #define SH_DMAE_TS_SHIFT { \ | ||
29 | [XMIT_SZ_8BIT] = 0, \ | ||
30 | [XMIT_SZ_16BIT] = 1, \ | ||
31 | [XMIT_SZ_32BIT] = 2, \ | ||
32 | [XMIT_SZ_64BIT] = 3, \ | ||
33 | [XMIT_SZ_128BIT] = 4, \ | ||
34 | [XMIT_SZ_256BIT] = 5, \ | ||
35 | [XMIT_SZ_512BIT] = 6, \ | ||
36 | } | ||
37 | |||
38 | #define TS_LOW_BIT 0x3 /* --xx */ | ||
39 | #define TS_HI_BIT 0xc /* xx-- */ | ||
40 | |||
41 | #define TS_LOW_SHIFT (3) | ||
42 | #define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */ | ||
43 | |||
44 | #define TS_INDEX2VAL(i) \ | ||
45 | ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ | ||
46 | (((i) & TS_HI_BIT) << TS_HI_SHIFT)) | ||
47 | |||
48 | #define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz))) | ||
49 | #define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz))) | ||
50 | |||
51 | #endif | ||
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 28ca36121631..d94ab592cc1b 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -171,7 +171,8 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) | |||
171 | return NULL; | 171 | return NULL; |
172 | } | 172 | } |
173 | 173 | ||
174 | static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | 174 | static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, |
175 | dma_addr_t slave_addr) | ||
175 | { | 176 | { |
176 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | 177 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
177 | const struct shdma_ops *ops = sdev->ops; | 178 | const struct shdma_ops *ops = sdev->ops; |
@@ -179,7 +180,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | |||
179 | 180 | ||
180 | if (schan->dev->of_node) { | 181 | if (schan->dev->of_node) { |
181 | match = schan->hw_req; | 182 | match = schan->hw_req; |
182 | ret = ops->set_slave(schan, match, true); | 183 | ret = ops->set_slave(schan, match, slave_addr, true); |
183 | if (ret < 0) | 184 | if (ret < 0) |
184 | return ret; | 185 | return ret; |
185 | 186 | ||
@@ -194,7 +195,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) | |||
194 | if (test_and_set_bit(slave_id, shdma_slave_used)) | 195 | if (test_and_set_bit(slave_id, shdma_slave_used)) |
195 | return -EBUSY; | 196 | return -EBUSY; |
196 | 197 | ||
197 | ret = ops->set_slave(schan, match, false); | 198 | ret = ops->set_slave(schan, match, slave_addr, false); |
198 | if (ret < 0) { | 199 | if (ret < 0) { |
199 | clear_bit(slave_id, shdma_slave_used); | 200 | clear_bit(slave_id, shdma_slave_used); |
200 | return ret; | 201 | return ret; |
@@ -236,7 +237,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg) | |||
236 | if (!schan->dev->of_node && match >= slave_num) | 237 | if (!schan->dev->of_node && match >= slave_num) |
237 | return false; | 238 | return false; |
238 | 239 | ||
239 | ret = ops->set_slave(schan, match, true); | 240 | ret = ops->set_slave(schan, match, 0, true); |
240 | if (ret < 0) | 241 | if (ret < 0) |
241 | return false; | 242 | return false; |
242 | 243 | ||
@@ -259,7 +260,7 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan) | |||
259 | */ | 260 | */ |
260 | if (slave) { | 261 | if (slave) { |
261 | /* Legacy mode: .private is set in filter */ | 262 | /* Legacy mode: .private is set in filter */ |
262 | ret = shdma_setup_slave(schan, slave->slave_id); | 263 | ret = shdma_setup_slave(schan, slave->slave_id, 0); |
263 | if (ret < 0) | 264 | if (ret < 0) |
264 | goto esetslave; | 265 | goto esetslave; |
265 | } else { | 266 | } else { |
@@ -680,7 +681,9 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
680 | * channel, while using it... | 681 | * channel, while using it... |
681 | */ | 682 | */ |
682 | config = (struct dma_slave_config *)arg; | 683 | config = (struct dma_slave_config *)arg; |
683 | ret = shdma_setup_slave(schan, config->slave_id); | 684 | ret = shdma_setup_slave(schan, config->slave_id, |
685 | config->direction == DMA_DEV_TO_MEM ? | ||
686 | config->src_addr : config->dst_addr); | ||
684 | if (ret < 0) | 687 | if (ret < 0) |
685 | return ret; | 688 | return ret; |
686 | break; | 689 | break; |
@@ -831,8 +834,8 @@ static irqreturn_t chan_irqt(int irq, void *dev) | |||
831 | int shdma_request_irq(struct shdma_chan *schan, int irq, | 834 | int shdma_request_irq(struct shdma_chan *schan, int irq, |
832 | unsigned long flags, const char *name) | 835 | unsigned long flags, const char *name) |
833 | { | 836 | { |
834 | int ret = request_threaded_irq(irq, chan_irq, chan_irqt, | 837 | int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, |
835 | flags, name, schan); | 838 | chan_irqt, flags, name, schan); |
836 | 839 | ||
837 | schan->irq = ret < 0 ? ret : irq; | 840 | schan->irq = ret < 0 ? ret : irq; |
838 | 841 | ||
@@ -840,13 +843,6 @@ int shdma_request_irq(struct shdma_chan *schan, int irq, | |||
840 | } | 843 | } |
841 | EXPORT_SYMBOL(shdma_request_irq); | 844 | EXPORT_SYMBOL(shdma_request_irq); |
842 | 845 | ||
843 | void shdma_free_irq(struct shdma_chan *schan) | ||
844 | { | ||
845 | if (schan->irq >= 0) | ||
846 | free_irq(schan->irq, schan); | ||
847 | } | ||
848 | EXPORT_SYMBOL(shdma_free_irq); | ||
849 | |||
850 | void shdma_chan_probe(struct shdma_dev *sdev, | 846 | void shdma_chan_probe(struct shdma_dev *sdev, |
851 | struct shdma_chan *schan, int id) | 847 | struct shdma_chan *schan, int id) |
852 | { | 848 | { |
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c index 11bcb05cd79c..06473a05fe4e 100644 --- a/drivers/dma/sh/shdma-of.c +++ b/drivers/dma/sh/shdma-of.c | |||
@@ -42,12 +42,9 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec, | |||
42 | 42 | ||
43 | static int shdma_of_probe(struct platform_device *pdev) | 43 | static int shdma_of_probe(struct platform_device *pdev) |
44 | { | 44 | { |
45 | const struct of_dev_auxdata *lookup = pdev->dev.platform_data; | 45 | const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev); |
46 | int ret; | 46 | int ret; |
47 | 47 | ||
48 | if (!lookup) | ||
49 | return -EINVAL; | ||
50 | |||
51 | ret = of_dma_controller_register(pdev->dev.of_node, | 48 | ret = of_dma_controller_register(pdev->dev.of_node, |
52 | shdma_of_xlate, pdev); | 49 | shdma_of_xlate, pdev); |
53 | if (ret < 0) | 50 | if (ret < 0) |
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c new file mode 100644 index 000000000000..4fb99970a3ea --- /dev/null +++ b/drivers/dma/sh/shdma-r8a73a4.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs | ||
3 | * | ||
4 | * Copyright (C) 2013 Renesas Electronics, Inc. | ||
5 | * | ||
6 | * This is free software; you can redistribute it and/or modify it under the | ||
7 | * terms of version 2 the GNU General Public License as published by the Free | ||
8 | * Software Foundation. | ||
9 | */ | ||
10 | #include <linux/sh_dma.h> | ||
11 | |||
12 | #include "shdma-arm.h" | ||
13 | |||
14 | const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT; | ||
15 | |||
16 | static const struct sh_dmae_slave_config dma_slaves[] = { | ||
17 | { | ||
18 | .chcr = CHCR_TX(XMIT_SZ_32BIT), | ||
19 | .mid_rid = 0xd1, /* MMC0 Tx */ | ||
20 | }, { | ||
21 | .chcr = CHCR_RX(XMIT_SZ_32BIT), | ||
22 | .mid_rid = 0xd2, /* MMC0 Rx */ | ||
23 | }, { | ||
24 | .chcr = CHCR_TX(XMIT_SZ_32BIT), | ||
25 | .mid_rid = 0xe1, /* MMC1 Tx */ | ||
26 | }, { | ||
27 | .chcr = CHCR_RX(XMIT_SZ_32BIT), | ||
28 | .mid_rid = 0xe2, /* MMC1 Rx */ | ||
29 | }, | ||
30 | }; | ||
31 | |||
32 | #define DMAE_CHANNEL(a, b) \ | ||
33 | { \ | ||
34 | .offset = (a) - 0x20, \ | ||
35 | .dmars = (a) - 0x20 + 0x40, \ | ||
36 | .chclr_bit = (b), \ | ||
37 | .chclr_offset = 0x80 - 0x20, \ | ||
38 | } | ||
39 | |||
40 | static const struct sh_dmae_channel dma_channels[] = { | ||
41 | DMAE_CHANNEL(0x8000, 0), | ||
42 | DMAE_CHANNEL(0x8080, 1), | ||
43 | DMAE_CHANNEL(0x8100, 2), | ||
44 | DMAE_CHANNEL(0x8180, 3), | ||
45 | DMAE_CHANNEL(0x8200, 4), | ||
46 | DMAE_CHANNEL(0x8280, 5), | ||
47 | DMAE_CHANNEL(0x8300, 6), | ||
48 | DMAE_CHANNEL(0x8380, 7), | ||
49 | DMAE_CHANNEL(0x8400, 8), | ||
50 | DMAE_CHANNEL(0x8480, 9), | ||
51 | DMAE_CHANNEL(0x8500, 10), | ||
52 | DMAE_CHANNEL(0x8580, 11), | ||
53 | DMAE_CHANNEL(0x8600, 12), | ||
54 | DMAE_CHANNEL(0x8680, 13), | ||
55 | DMAE_CHANNEL(0x8700, 14), | ||
56 | DMAE_CHANNEL(0x8780, 15), | ||
57 | DMAE_CHANNEL(0x8800, 16), | ||
58 | DMAE_CHANNEL(0x8880, 17), | ||
59 | DMAE_CHANNEL(0x8900, 18), | ||
60 | DMAE_CHANNEL(0x8980, 19), | ||
61 | }; | ||
62 | |||
63 | const struct sh_dmae_pdata r8a73a4_dma_pdata = { | ||
64 | .slave = dma_slaves, | ||
65 | .slave_num = ARRAY_SIZE(dma_slaves), | ||
66 | .channel = dma_channels, | ||
67 | .channel_num = ARRAY_SIZE(dma_channels), | ||
68 | .ts_low_shift = TS_LOW_SHIFT, | ||
69 | .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, | ||
70 | .ts_high_shift = TS_HI_SHIFT, | ||
71 | .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, | ||
72 | .ts_shift = dma_ts_shift, | ||
73 | .ts_shift_num = ARRAY_SIZE(dma_ts_shift), | ||
74 | .dmaor_init = DMAOR_DME, | ||
75 | .chclr_present = 1, | ||
76 | .chclr_bitwise = 1, | ||
77 | }; | ||
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h index 9314e93225db..758a57b51875 100644 --- a/drivers/dma/sh/shdma.h +++ b/drivers/dma/sh/shdma.h | |||
@@ -28,18 +28,19 @@ struct sh_dmae_chan { | |||
28 | struct shdma_chan shdma_chan; | 28 | struct shdma_chan shdma_chan; |
29 | const struct sh_dmae_slave_config *config; /* Slave DMA configuration */ | 29 | const struct sh_dmae_slave_config *config; /* Slave DMA configuration */ |
30 | int xmit_shift; /* log_2(bytes_per_xfer) */ | 30 | int xmit_shift; /* log_2(bytes_per_xfer) */ |
31 | u32 __iomem *base; | 31 | void __iomem *base; |
32 | char dev_id[16]; /* unique name per DMAC of channel */ | 32 | char dev_id[16]; /* unique name per DMAC of channel */ |
33 | int pm_error; | 33 | int pm_error; |
34 | dma_addr_t slave_addr; | ||
34 | }; | 35 | }; |
35 | 36 | ||
36 | struct sh_dmae_device { | 37 | struct sh_dmae_device { |
37 | struct shdma_dev shdma_dev; | 38 | struct shdma_dev shdma_dev; |
38 | struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS]; | 39 | struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS]; |
39 | struct sh_dmae_pdata *pdata; | 40 | const struct sh_dmae_pdata *pdata; |
40 | struct list_head node; | 41 | struct list_head node; |
41 | u32 __iomem *chan_reg; | 42 | void __iomem *chan_reg; |
42 | u16 __iomem *dmars; | 43 | void __iomem *dmars; |
43 | unsigned int chcr_offset; | 44 | unsigned int chcr_offset; |
44 | u32 chcr_ie_bit; | 45 | u32 chcr_ie_bit; |
45 | }; | 46 | }; |
@@ -61,4 +62,11 @@ struct sh_dmae_desc { | |||
61 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ | 62 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ |
62 | struct sh_dmae_device, shdma_dev.dma_dev) | 63 | struct sh_dmae_device, shdma_dev.dma_dev) |
63 | 64 | ||
65 | #ifdef CONFIG_SHDMA_R8A73A4 | ||
66 | extern const struct sh_dmae_pdata r8a73a4_dma_pdata; | ||
67 | #define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) | ||
68 | #else | ||
69 | #define r8a73a4_shdma_devid NULL | ||
70 | #endif | ||
71 | |||
64 | #endif /* __DMA_SHDMA_H */ | 72 | #endif /* __DMA_SHDMA_H */ |
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdmac.c index 5039fbc88254..1069e8869f20 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdmac.c | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/of.h> | ||
24 | #include <linux/of_device.h> | ||
23 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
24 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
25 | #include <linux/dmaengine.h> | 27 | #include <linux/dmaengine.h> |
@@ -35,6 +37,15 @@ | |||
35 | #include "../dmaengine.h" | 37 | #include "../dmaengine.h" |
36 | #include "shdma.h" | 38 | #include "shdma.h" |
37 | 39 | ||
40 | /* DMA register */ | ||
41 | #define SAR 0x00 | ||
42 | #define DAR 0x04 | ||
43 | #define TCR 0x08 | ||
44 | #define CHCR 0x0C | ||
45 | #define DMAOR 0x40 | ||
46 | |||
47 | #define TEND 0x18 /* USB-DMAC */ | ||
48 | |||
38 | #define SH_DMAE_DRV_NAME "sh-dma-engine" | 49 | #define SH_DMAE_DRV_NAME "sh-dma-engine" |
39 | 50 | ||
40 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | 51 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
@@ -49,27 +60,37 @@ | |||
49 | static DEFINE_SPINLOCK(sh_dmae_lock); | 60 | static DEFINE_SPINLOCK(sh_dmae_lock); |
50 | static LIST_HEAD(sh_dmae_devices); | 61 | static LIST_HEAD(sh_dmae_devices); |
51 | 62 | ||
52 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) | 63 | /* |
64 | * Different DMAC implementations provide different ways to clear DMA channels: | ||
65 | * (1) none - no CHCLR registers are available | ||
66 | * (2) one CHCLR register per channel - 0 has to be written to it to clear | ||
67 | * channel buffers | ||
68 | * (3) one CHCLR per several channels - 1 has to be written to the bit, | ||
69 | * corresponding to the specific channel to reset it | ||
70 | */ | ||
71 | static void channel_clear(struct sh_dmae_chan *sh_dc) | ||
53 | { | 72 | { |
54 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | 73 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); |
74 | const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + | ||
75 | sh_dc->shdma_chan.id; | ||
76 | u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; | ||
55 | 77 | ||
56 | __raw_writel(data, shdev->chan_reg + | 78 | __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); |
57 | shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset); | ||
58 | } | 79 | } |
59 | 80 | ||
60 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 81 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
61 | { | 82 | { |
62 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); | 83 | __raw_writel(data, sh_dc->base + reg); |
63 | } | 84 | } |
64 | 85 | ||
65 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 86 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
66 | { | 87 | { |
67 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); | 88 | return __raw_readl(sh_dc->base + reg); |
68 | } | 89 | } |
69 | 90 | ||
70 | static u16 dmaor_read(struct sh_dmae_device *shdev) | 91 | static u16 dmaor_read(struct sh_dmae_device *shdev) |
71 | { | 92 | { |
72 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | 93 | void __iomem *addr = shdev->chan_reg + DMAOR; |
73 | 94 | ||
74 | if (shdev->pdata->dmaor_is_32bit) | 95 | if (shdev->pdata->dmaor_is_32bit) |
75 | return __raw_readl(addr); | 96 | return __raw_readl(addr); |
@@ -79,7 +100,7 @@ static u16 dmaor_read(struct sh_dmae_device *shdev) | |||
79 | 100 | ||
80 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | 101 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) |
81 | { | 102 | { |
82 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); | 103 | void __iomem *addr = shdev->chan_reg + DMAOR; |
83 | 104 | ||
84 | if (shdev->pdata->dmaor_is_32bit) | 105 | if (shdev->pdata->dmaor_is_32bit) |
85 | __raw_writel(data, addr); | 106 | __raw_writel(data, addr); |
@@ -91,14 +112,14 @@ static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) | |||
91 | { | 112 | { |
92 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | 113 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); |
93 | 114 | ||
94 | __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); | 115 | __raw_writel(data, sh_dc->base + shdev->chcr_offset); |
95 | } | 116 | } |
96 | 117 | ||
97 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) | 118 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) |
98 | { | 119 | { |
99 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | 120 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); |
100 | 121 | ||
101 | return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); | 122 | return __raw_readl(sh_dc->base + shdev->chcr_offset); |
102 | } | 123 | } |
103 | 124 | ||
104 | /* | 125 | /* |
@@ -133,7 +154,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) | |||
133 | for (i = 0; i < shdev->pdata->channel_num; i++) { | 154 | for (i = 0; i < shdev->pdata->channel_num; i++) { |
134 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 155 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
135 | if (sh_chan) | 156 | if (sh_chan) |
136 | chclr_write(sh_chan, 0); | 157 | channel_clear(sh_chan); |
137 | } | 158 | } |
138 | } | 159 | } |
139 | 160 | ||
@@ -167,7 +188,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | |||
167 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | 188 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) |
168 | { | 189 | { |
169 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 190 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
170 | struct sh_dmae_pdata *pdata = shdev->pdata; | 191 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
171 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | 192 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | |
172 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | 193 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); |
173 | 194 | ||
@@ -180,7 +201,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | |||
180 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | 201 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) |
181 | { | 202 | { |
182 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 203 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
183 | struct sh_dmae_pdata *pdata = shdev->pdata; | 204 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
184 | int i; | 205 | int i; |
185 | 206 | ||
186 | for (i = 0; i < pdata->ts_shift_num; i++) | 207 | for (i = 0; i < pdata->ts_shift_num; i++) |
@@ -240,9 +261,9 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | |||
240 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 261 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
241 | { | 262 | { |
242 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 263 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
243 | struct sh_dmae_pdata *pdata = shdev->pdata; | 264 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
244 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; | 265 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; |
245 | u16 __iomem *addr = shdev->dmars; | 266 | void __iomem *addr = shdev->dmars; |
246 | unsigned int shift = chan_pdata->dmars_bit; | 267 | unsigned int shift = chan_pdata->dmars_bit; |
247 | 268 | ||
248 | if (dmae_is_busy(sh_chan)) | 269 | if (dmae_is_busy(sh_chan)) |
@@ -253,8 +274,8 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
253 | 274 | ||
254 | /* in the case of a missing DMARS resource use first memory window */ | 275 | /* in the case of a missing DMARS resource use first memory window */ |
255 | if (!addr) | 276 | if (!addr) |
256 | addr = (u16 __iomem *)shdev->chan_reg; | 277 | addr = shdev->chan_reg; |
257 | addr += chan_pdata->dmars / sizeof(u16); | 278 | addr += chan_pdata->dmars; |
258 | 279 | ||
259 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | 280 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), |
260 | addr); | 281 | addr); |
@@ -309,7 +330,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave( | |||
309 | struct sh_dmae_chan *sh_chan, int match) | 330 | struct sh_dmae_chan *sh_chan, int match) |
310 | { | 331 | { |
311 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 332 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
312 | struct sh_dmae_pdata *pdata = shdev->pdata; | 333 | const struct sh_dmae_pdata *pdata = shdev->pdata; |
313 | const struct sh_dmae_slave_config *cfg; | 334 | const struct sh_dmae_slave_config *cfg; |
314 | int i; | 335 | int i; |
315 | 336 | ||
@@ -323,7 +344,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave( | |||
323 | } else { | 344 | } else { |
324 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | 345 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
325 | if (cfg->mid_rid == match) { | 346 | if (cfg->mid_rid == match) { |
326 | sh_chan->shdma_chan.slave_id = cfg->slave_id; | 347 | sh_chan->shdma_chan.slave_id = i; |
327 | return cfg; | 348 | return cfg; |
328 | } | 349 | } |
329 | } | 350 | } |
@@ -332,7 +353,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave( | |||
332 | } | 353 | } |
333 | 354 | ||
334 | static int sh_dmae_set_slave(struct shdma_chan *schan, | 355 | static int sh_dmae_set_slave(struct shdma_chan *schan, |
335 | int slave_id, bool try) | 356 | int slave_id, dma_addr_t slave_addr, bool try) |
336 | { | 357 | { |
337 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | 358 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
338 | shdma_chan); | 359 | shdma_chan); |
@@ -340,8 +361,10 @@ static int sh_dmae_set_slave(struct shdma_chan *schan, | |||
340 | if (!cfg) | 361 | if (!cfg) |
341 | return -ENXIO; | 362 | return -ENXIO; |
342 | 363 | ||
343 | if (!try) | 364 | if (!try) { |
344 | sh_chan->config = cfg; | 365 | sh_chan->config = cfg; |
366 | sh_chan->slave_addr = slave_addr ? : cfg->addr; | ||
367 | } | ||
345 | 368 | ||
346 | return 0; | 369 | return 0; |
347 | } | 370 | } |
@@ -505,7 +528,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | |||
505 | struct shdma_chan *schan; | 528 | struct shdma_chan *schan; |
506 | int err; | 529 | int err; |
507 | 530 | ||
508 | sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | 531 | sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), |
532 | GFP_KERNEL); | ||
509 | if (!sh_chan) { | 533 | if (!sh_chan) { |
510 | dev_err(sdev->dma_dev.dev, | 534 | dev_err(sdev->dma_dev.dev, |
511 | "No free memory for allocating dma channels!\n"); | 535 | "No free memory for allocating dma channels!\n"); |
@@ -517,7 +541,7 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | |||
517 | 541 | ||
518 | shdma_chan_probe(sdev, schan, id); | 542 | shdma_chan_probe(sdev, schan, id); |
519 | 543 | ||
520 | sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | 544 | sh_chan->base = shdev->chan_reg + chan_pdata->offset; |
521 | 545 | ||
522 | /* set up channel irq */ | 546 | /* set up channel irq */ |
523 | if (pdev->id >= 0) | 547 | if (pdev->id >= 0) |
@@ -541,7 +565,6 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | |||
541 | err_no_irq: | 565 | err_no_irq: |
542 | /* remove from dmaengine device node */ | 566 | /* remove from dmaengine device node */ |
543 | shdma_chan_remove(schan); | 567 | shdma_chan_remove(schan); |
544 | kfree(sh_chan); | ||
545 | return err; | 568 | return err; |
546 | } | 569 | } |
547 | 570 | ||
@@ -552,14 +575,9 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
552 | int i; | 575 | int i; |
553 | 576 | ||
554 | shdma_for_each_chan(schan, &shdev->shdma_dev, i) { | 577 | shdma_for_each_chan(schan, &shdev->shdma_dev, i) { |
555 | struct sh_dmae_chan *sh_chan = container_of(schan, | ||
556 | struct sh_dmae_chan, shdma_chan); | ||
557 | BUG_ON(!schan); | 578 | BUG_ON(!schan); |
558 | 579 | ||
559 | shdma_free_irq(&sh_chan->shdma_chan); | ||
560 | |||
561 | shdma_chan_remove(schan); | 580 | shdma_chan_remove(schan); |
562 | kfree(sh_chan); | ||
563 | } | 581 | } |
564 | dma_dev->chancnt = 0; | 582 | dma_dev->chancnt = 0; |
565 | } | 583 | } |
@@ -636,7 +654,7 @@ static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) | |||
636 | * This is an exclusive slave DMA operation, may only be called after a | 654 | * This is an exclusive slave DMA operation, may only be called after a |
637 | * successful slave configuration. | 655 | * successful slave configuration. |
638 | */ | 656 | */ |
639 | return sh_chan->config->addr; | 657 | return sh_chan->slave_addr; |
640 | } | 658 | } |
641 | 659 | ||
642 | static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) | 660 | static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) |
@@ -658,9 +676,15 @@ static const struct shdma_ops sh_dmae_shdma_ops = { | |||
658 | .get_partial = sh_dmae_get_partial, | 676 | .get_partial = sh_dmae_get_partial, |
659 | }; | 677 | }; |
660 | 678 | ||
679 | static const struct of_device_id sh_dmae_of_match[] = { | ||
680 | {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,}, | ||
681 | {} | ||
682 | }; | ||
683 | MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | ||
684 | |||
661 | static int sh_dmae_probe(struct platform_device *pdev) | 685 | static int sh_dmae_probe(struct platform_device *pdev) |
662 | { | 686 | { |
663 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | 687 | const struct sh_dmae_pdata *pdata; |
664 | unsigned long irqflags = IRQF_DISABLED, | 688 | unsigned long irqflags = IRQF_DISABLED, |
665 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | 689 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; |
666 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; | 690 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; |
@@ -669,6 +693,11 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
669 | struct dma_device *dma_dev; | 693 | struct dma_device *dma_dev; |
670 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | 694 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; |
671 | 695 | ||
696 | if (pdev->dev.of_node) | ||
697 | pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data; | ||
698 | else | ||
699 | pdata = dev_get_platdata(&pdev->dev); | ||
700 | |||
672 | /* get platform data */ | 701 | /* get platform data */ |
673 | if (!pdata || !pdata->channel_num) | 702 | if (!pdata || !pdata->channel_num) |
674 | return -ENODEV; | 703 | return -ENODEV; |
@@ -696,33 +725,22 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
696 | if (!chan || !errirq_res) | 725 | if (!chan || !errirq_res) |
697 | return -ENODEV; | 726 | return -ENODEV; |
698 | 727 | ||
699 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | 728 | shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), |
700 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | 729 | GFP_KERNEL); |
701 | return -EBUSY; | ||
702 | } | ||
703 | |||
704 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
705 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
706 | err = -EBUSY; | ||
707 | goto ermrdmars; | ||
708 | } | ||
709 | |||
710 | err = -ENOMEM; | ||
711 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | ||
712 | if (!shdev) { | 730 | if (!shdev) { |
713 | dev_err(&pdev->dev, "Not enough memory\n"); | 731 | dev_err(&pdev->dev, "Not enough memory\n"); |
714 | goto ealloc; | 732 | return -ENOMEM; |
715 | } | 733 | } |
716 | 734 | ||
717 | dma_dev = &shdev->shdma_dev.dma_dev; | 735 | dma_dev = &shdev->shdma_dev.dma_dev; |
718 | 736 | ||
719 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | 737 | shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); |
720 | if (!shdev->chan_reg) | 738 | if (IS_ERR(shdev->chan_reg)) |
721 | goto emapchan; | 739 | return PTR_ERR(shdev->chan_reg); |
722 | if (dmars) { | 740 | if (dmars) { |
723 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | 741 | shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); |
724 | if (!shdev->dmars) | 742 | if (IS_ERR(shdev->dmars)) |
725 | goto emapdmars; | 743 | return PTR_ERR(shdev->dmars); |
726 | } | 744 | } |
727 | 745 | ||
728 | if (!pdata->slave_only) | 746 | if (!pdata->slave_only) |
@@ -783,8 +801,8 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
783 | 801 | ||
784 | errirq = errirq_res->start; | 802 | errirq = errirq_res->start; |
785 | 803 | ||
786 | err = request_irq(errirq, sh_dmae_err, irqflags, | 804 | err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, |
787 | "DMAC Address Error", shdev); | 805 | "DMAC Address Error", shdev); |
788 | if (err) { | 806 | if (err) { |
789 | dev_err(&pdev->dev, | 807 | dev_err(&pdev->dev, |
790 | "DMA failed requesting irq #%d, error %d\n", | 808 | "DMA failed requesting irq #%d, error %d\n", |
@@ -862,7 +880,6 @@ chan_probe_err: | |||
862 | sh_dmae_chan_remove(shdev); | 880 | sh_dmae_chan_remove(shdev); |
863 | 881 | ||
864 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 882 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
865 | free_irq(errirq, shdev); | ||
866 | eirq_err: | 883 | eirq_err: |
867 | #endif | 884 | #endif |
868 | rst_err: | 885 | rst_err: |
@@ -873,21 +890,9 @@ rst_err: | |||
873 | pm_runtime_put(&pdev->dev); | 890 | pm_runtime_put(&pdev->dev); |
874 | pm_runtime_disable(&pdev->dev); | 891 | pm_runtime_disable(&pdev->dev); |
875 | 892 | ||
876 | platform_set_drvdata(pdev, NULL); | ||
877 | shdma_cleanup(&shdev->shdma_dev); | 893 | shdma_cleanup(&shdev->shdma_dev); |
878 | eshdma: | 894 | eshdma: |
879 | if (dmars) | ||
880 | iounmap(shdev->dmars); | ||
881 | emapdmars: | ||
882 | iounmap(shdev->chan_reg); | ||
883 | synchronize_rcu(); | 895 | synchronize_rcu(); |
884 | emapchan: | ||
885 | kfree(shdev); | ||
886 | ealloc: | ||
887 | if (dmars) | ||
888 | release_mem_region(dmars->start, resource_size(dmars)); | ||
889 | ermrdmars: | ||
890 | release_mem_region(chan->start, resource_size(chan)); | ||
891 | 896 | ||
892 | return err; | 897 | return err; |
893 | } | 898 | } |
@@ -896,14 +901,9 @@ static int sh_dmae_remove(struct platform_device *pdev) | |||
896 | { | 901 | { |
897 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 902 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
898 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | 903 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; |
899 | struct resource *res; | ||
900 | int errirq = platform_get_irq(pdev, 0); | ||
901 | 904 | ||
902 | dma_async_device_unregister(dma_dev); | 905 | dma_async_device_unregister(dma_dev); |
903 | 906 | ||
904 | if (errirq > 0) | ||
905 | free_irq(errirq, shdev); | ||
906 | |||
907 | spin_lock_irq(&sh_dmae_lock); | 907 | spin_lock_irq(&sh_dmae_lock); |
908 | list_del_rcu(&shdev->node); | 908 | list_del_rcu(&shdev->node); |
909 | spin_unlock_irq(&sh_dmae_lock); | 909 | spin_unlock_irq(&sh_dmae_lock); |
@@ -913,31 +913,11 @@ static int sh_dmae_remove(struct platform_device *pdev) | |||
913 | sh_dmae_chan_remove(shdev); | 913 | sh_dmae_chan_remove(shdev); |
914 | shdma_cleanup(&shdev->shdma_dev); | 914 | shdma_cleanup(&shdev->shdma_dev); |
915 | 915 | ||
916 | if (shdev->dmars) | ||
917 | iounmap(shdev->dmars); | ||
918 | iounmap(shdev->chan_reg); | ||
919 | |||
920 | platform_set_drvdata(pdev, NULL); | ||
921 | |||
922 | synchronize_rcu(); | 916 | synchronize_rcu(); |
923 | kfree(shdev); | ||
924 | |||
925 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
926 | if (res) | ||
927 | release_mem_region(res->start, resource_size(res)); | ||
928 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
929 | if (res) | ||
930 | release_mem_region(res->start, resource_size(res)); | ||
931 | 917 | ||
932 | return 0; | 918 | return 0; |
933 | } | 919 | } |
934 | 920 | ||
935 | static const struct of_device_id sh_dmae_of_match[] = { | ||
936 | { .compatible = "renesas,shdma", }, | ||
937 | { } | ||
938 | }; | ||
939 | MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | ||
940 | |||
941 | static struct platform_driver sh_dmae_driver = { | 921 | static struct platform_driver sh_dmae_driver = { |
942 | .driver = { | 922 | .driver = { |
943 | .owner = THIS_MODULE, | 923 | .owner = THIS_MODULE, |
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index e7c94bbddb53..c7e9cdff0708 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c | |||
@@ -150,7 +150,8 @@ static const struct sudmac_slave_config *sudmac_find_slave( | |||
150 | return NULL; | 150 | return NULL; |
151 | } | 151 | } |
152 | 152 | ||
153 | static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try) | 153 | static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, |
154 | dma_addr_t slave_addr, bool try) | ||
154 | { | 155 | { |
155 | struct sudmac_chan *sc = to_chan(schan); | 156 | struct sudmac_chan *sc = to_chan(schan); |
156 | const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); | 157 | const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); |
@@ -298,11 +299,8 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev) | |||
298 | int i; | 299 | int i; |
299 | 300 | ||
300 | shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { | 301 | shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { |
301 | struct sudmac_chan *sc = to_chan(schan); | ||
302 | |||
303 | BUG_ON(!schan); | 302 | BUG_ON(!schan); |
304 | 303 | ||
305 | shdma_free_irq(&sc->shdma_chan); | ||
306 | shdma_chan_remove(schan); | 304 | shdma_chan_remove(schan); |
307 | } | 305 | } |
308 | dma_dev->chancnt = 0; | 306 | dma_dev->chancnt = 0; |
@@ -335,7 +333,7 @@ static const struct shdma_ops sudmac_shdma_ops = { | |||
335 | 333 | ||
336 | static int sudmac_probe(struct platform_device *pdev) | 334 | static int sudmac_probe(struct platform_device *pdev) |
337 | { | 335 | { |
338 | struct sudmac_pdata *pdata = pdev->dev.platform_data; | 336 | struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev); |
339 | int err, i; | 337 | int err, i; |
340 | struct sudmac_device *su_dev; | 338 | struct sudmac_device *su_dev; |
341 | struct dma_device *dma_dev; | 339 | struct dma_device *dma_dev; |
@@ -345,9 +343,8 @@ static int sudmac_probe(struct platform_device *pdev) | |||
345 | if (!pdata) | 343 | if (!pdata) |
346 | return -ENODEV; | 344 | return -ENODEV; |
347 | 345 | ||
348 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
349 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 346 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
350 | if (!chan || !irq_res) | 347 | if (!irq_res) |
351 | return -ENODEV; | 348 | return -ENODEV; |
352 | 349 | ||
353 | err = -ENOMEM; | 350 | err = -ENOMEM; |
@@ -360,9 +357,10 @@ static int sudmac_probe(struct platform_device *pdev) | |||
360 | 357 | ||
361 | dma_dev = &su_dev->shdma_dev.dma_dev; | 358 | dma_dev = &su_dev->shdma_dev.dma_dev; |
362 | 359 | ||
363 | su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan); | 360 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
364 | if (!su_dev->chan_reg) | 361 | su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); |
365 | return err; | 362 | if (IS_ERR(su_dev->chan_reg)) |
363 | return PTR_ERR(su_dev->chan_reg); | ||
366 | 364 | ||
367 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | 365 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
368 | 366 | ||
@@ -373,7 +371,7 @@ static int sudmac_probe(struct platform_device *pdev) | |||
373 | return err; | 371 | return err; |
374 | 372 | ||
375 | /* platform data */ | 373 | /* platform data */ |
376 | su_dev->pdata = pdev->dev.platform_data; | 374 | su_dev->pdata = dev_get_platdata(&pdev->dev); |
377 | 375 | ||
378 | platform_set_drvdata(pdev, su_dev); | 376 | platform_set_drvdata(pdev, su_dev); |
379 | 377 | ||
@@ -393,7 +391,6 @@ static int sudmac_probe(struct platform_device *pdev) | |||
393 | chan_probe_err: | 391 | chan_probe_err: |
394 | sudmac_chan_remove(su_dev); | 392 | sudmac_chan_remove(su_dev); |
395 | 393 | ||
396 | platform_set_drvdata(pdev, NULL); | ||
397 | shdma_cleanup(&su_dev->shdma_dev); | 394 | shdma_cleanup(&su_dev->shdma_dev); |
398 | 395 | ||
399 | return err; | 396 | return err; |
@@ -407,7 +404,6 @@ static int sudmac_remove(struct platform_device *pdev) | |||
407 | dma_async_device_unregister(dma_dev); | 404 | dma_async_device_unregister(dma_dev); |
408 | sudmac_chan_remove(su_dev); | 405 | sudmac_chan_remove(su_dev); |
409 | shdma_cleanup(&su_dev->shdma_dev); | 406 | shdma_cleanup(&su_dev->shdma_dev); |
410 | platform_set_drvdata(pdev, NULL); | ||
411 | 407 | ||
412 | return 0; | 408 | return 0; |
413 | } | 409 | } |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 716b23e4f327..6aec3ad814d3 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/dmaengine.h> | 10 | #include <linux/dmaengine.h> |
11 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
12 | #include <linux/pm_runtime.h> | ||
12 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | 14 | #include <linux/io.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
@@ -73,6 +74,11 @@ struct sirfsoc_dma_chan { | |||
73 | int mode; | 74 | int mode; |
74 | }; | 75 | }; |
75 | 76 | ||
77 | struct sirfsoc_dma_regs { | ||
78 | u32 ctrl[SIRFSOC_DMA_CHANNELS]; | ||
79 | u32 interrupt_en; | ||
80 | }; | ||
81 | |||
76 | struct sirfsoc_dma { | 82 | struct sirfsoc_dma { |
77 | struct dma_device dma; | 83 | struct dma_device dma; |
78 | struct tasklet_struct tasklet; | 84 | struct tasklet_struct tasklet; |
@@ -81,10 +87,13 @@ struct sirfsoc_dma { | |||
81 | int irq; | 87 | int irq; |
82 | struct clk *clk; | 88 | struct clk *clk; |
83 | bool is_marco; | 89 | bool is_marco; |
90 | struct sirfsoc_dma_regs regs_save; | ||
84 | }; | 91 | }; |
85 | 92 | ||
86 | #define DRV_NAME "sirfsoc_dma" | 93 | #define DRV_NAME "sirfsoc_dma" |
87 | 94 | ||
95 | static int sirfsoc_dma_runtime_suspend(struct device *dev); | ||
96 | |||
88 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | 97 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ |
89 | static inline | 98 | static inline |
90 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | 99 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) |
@@ -393,6 +402,8 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |||
393 | LIST_HEAD(descs); | 402 | LIST_HEAD(descs); |
394 | int i; | 403 | int i; |
395 | 404 | ||
405 | pm_runtime_get_sync(sdma->dma.dev); | ||
406 | |||
396 | /* Alloc descriptors for this channel */ | 407 | /* Alloc descriptors for this channel */ |
397 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | 408 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { |
398 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | 409 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); |
@@ -425,6 +436,7 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |||
425 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | 436 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) |
426 | { | 437 | { |
427 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | 438 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
439 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
428 | struct sirfsoc_dma_desc *sdesc, *tmp; | 440 | struct sirfsoc_dma_desc *sdesc, *tmp; |
429 | unsigned long flags; | 441 | unsigned long flags; |
430 | LIST_HEAD(descs); | 442 | LIST_HEAD(descs); |
@@ -445,6 +457,8 @@ static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | |||
445 | /* Free descriptors */ | 457 | /* Free descriptors */ |
446 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | 458 | list_for_each_entry_safe(sdesc, tmp, &descs, node) |
447 | kfree(sdesc); | 459 | kfree(sdesc); |
460 | |||
461 | pm_runtime_put(sdma->dma.dev); | ||
448 | } | 462 | } |
449 | 463 | ||
450 | /* Send pending descriptor to hardware */ | 464 | /* Send pending descriptor to hardware */ |
@@ -595,7 +609,7 @@ sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | |||
595 | spin_unlock_irqrestore(&schan->lock, iflags); | 609 | spin_unlock_irqrestore(&schan->lock, iflags); |
596 | 610 | ||
597 | if (!sdesc) | 611 | if (!sdesc) |
598 | return 0; | 612 | return NULL; |
599 | 613 | ||
600 | /* Place descriptor in prepared list */ | 614 | /* Place descriptor in prepared list */ |
601 | spin_lock_irqsave(&schan->lock, iflags); | 615 | spin_lock_irqsave(&schan->lock, iflags); |
@@ -723,14 +737,14 @@ static int sirfsoc_dma_probe(struct platform_device *op) | |||
723 | 737 | ||
724 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | 738 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); |
725 | 739 | ||
726 | clk_prepare_enable(sdma->clk); | ||
727 | |||
728 | /* Register DMA engine */ | 740 | /* Register DMA engine */ |
729 | dev_set_drvdata(dev, sdma); | 741 | dev_set_drvdata(dev, sdma); |
742 | |||
730 | ret = dma_async_device_register(dma); | 743 | ret = dma_async_device_register(dma); |
731 | if (ret) | 744 | if (ret) |
732 | goto free_irq; | 745 | goto free_irq; |
733 | 746 | ||
747 | pm_runtime_enable(&op->dev); | ||
734 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | 748 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); |
735 | 749 | ||
736 | return 0; | 750 | return 0; |
@@ -747,13 +761,124 @@ static int sirfsoc_dma_remove(struct platform_device *op) | |||
747 | struct device *dev = &op->dev; | 761 | struct device *dev = &op->dev; |
748 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 762 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
749 | 763 | ||
750 | clk_disable_unprepare(sdma->clk); | ||
751 | dma_async_device_unregister(&sdma->dma); | 764 | dma_async_device_unregister(&sdma->dma); |
752 | free_irq(sdma->irq, sdma); | 765 | free_irq(sdma->irq, sdma); |
753 | irq_dispose_mapping(sdma->irq); | 766 | irq_dispose_mapping(sdma->irq); |
767 | pm_runtime_disable(&op->dev); | ||
768 | if (!pm_runtime_status_suspended(&op->dev)) | ||
769 | sirfsoc_dma_runtime_suspend(&op->dev); | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static int sirfsoc_dma_runtime_suspend(struct device *dev) | ||
775 | { | ||
776 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | ||
777 | |||
778 | clk_disable_unprepare(sdma->clk); | ||
779 | return 0; | ||
780 | } | ||
781 | |||
782 | static int sirfsoc_dma_runtime_resume(struct device *dev) | ||
783 | { | ||
784 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | ||
785 | int ret; | ||
786 | |||
787 | ret = clk_prepare_enable(sdma->clk); | ||
788 | if (ret < 0) { | ||
789 | dev_err(dev, "clk_enable failed: %d\n", ret); | ||
790 | return ret; | ||
791 | } | ||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | static int sirfsoc_dma_pm_suspend(struct device *dev) | ||
796 | { | ||
797 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | ||
798 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | ||
799 | struct sirfsoc_dma_desc *sdesc; | ||
800 | struct sirfsoc_dma_chan *schan; | ||
801 | int ch; | ||
802 | int ret; | ||
803 | |||
804 | /* | ||
805 | * if we were runtime-suspended before, resume to enable clock | ||
806 | * before accessing register | ||
807 | */ | ||
808 | if (pm_runtime_status_suspended(dev)) { | ||
809 | ret = sirfsoc_dma_runtime_resume(dev); | ||
810 | if (ret < 0) | ||
811 | return ret; | ||
812 | } | ||
813 | |||
814 | /* | ||
815 | * DMA controller will lose all registers while suspending | ||
816 | * so we need to save registers for active channels | ||
817 | */ | ||
818 | for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { | ||
819 | schan = &sdma->channels[ch]; | ||
820 | if (list_empty(&schan->active)) | ||
821 | continue; | ||
822 | sdesc = list_first_entry(&schan->active, | ||
823 | struct sirfsoc_dma_desc, | ||
824 | node); | ||
825 | save->ctrl[ch] = readl_relaxed(sdma->base + | ||
826 | ch * 0x10 + SIRFSOC_DMA_CH_CTRL); | ||
827 | } | ||
828 | save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN); | ||
829 | |||
830 | /* Disable clock */ | ||
831 | sirfsoc_dma_runtime_suspend(dev); | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | static int sirfsoc_dma_pm_resume(struct device *dev) | ||
837 | { | ||
838 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | ||
839 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | ||
840 | struct sirfsoc_dma_desc *sdesc; | ||
841 | struct sirfsoc_dma_chan *schan; | ||
842 | int ch; | ||
843 | int ret; | ||
844 | |||
845 | /* Enable clock before accessing register */ | ||
846 | ret = sirfsoc_dma_runtime_resume(dev); | ||
847 | if (ret < 0) | ||
848 | return ret; | ||
849 | |||
850 | writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN); | ||
851 | for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { | ||
852 | schan = &sdma->channels[ch]; | ||
853 | if (list_empty(&schan->active)) | ||
854 | continue; | ||
855 | sdesc = list_first_entry(&schan->active, | ||
856 | struct sirfsoc_dma_desc, | ||
857 | node); | ||
858 | writel_relaxed(sdesc->width, | ||
859 | sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4); | ||
860 | writel_relaxed(sdesc->xlen, | ||
861 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); | ||
862 | writel_relaxed(sdesc->ylen, | ||
863 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); | ||
864 | writel_relaxed(save->ctrl[ch], | ||
865 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); | ||
866 | writel_relaxed(sdesc->addr >> 2, | ||
867 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); | ||
868 | } | ||
869 | |||
870 | /* if we were runtime-suspended before, suspend again */ | ||
871 | if (pm_runtime_status_suspended(dev)) | ||
872 | sirfsoc_dma_runtime_suspend(dev); | ||
873 | |||
754 | return 0; | 874 | return 0; |
755 | } | 875 | } |
756 | 876 | ||
877 | static const struct dev_pm_ops sirfsoc_dma_pm_ops = { | ||
878 | SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) | ||
879 | SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) | ||
880 | }; | ||
881 | |||
757 | static struct of_device_id sirfsoc_dma_match[] = { | 882 | static struct of_device_id sirfsoc_dma_match[] = { |
758 | { .compatible = "sirf,prima2-dmac", }, | 883 | { .compatible = "sirf,prima2-dmac", }, |
759 | { .compatible = "sirf,marco-dmac", }, | 884 | { .compatible = "sirf,marco-dmac", }, |
@@ -766,6 +891,7 @@ static struct platform_driver sirfsoc_dma_driver = { | |||
766 | .driver = { | 891 | .driver = { |
767 | .name = DRV_NAME, | 892 | .name = DRV_NAME, |
768 | .owner = THIS_MODULE, | 893 | .owner = THIS_MODULE, |
894 | .pm = &sirfsoc_dma_pm_ops, | ||
769 | .of_match_table = sirfsoc_dma_match, | 895 | .of_match_table = sirfsoc_dma_match, |
770 | }, | 896 | }, |
771 | }; | 897 | }; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 5ab5880d5c90..82d2b97ad942 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -2591,6 +2591,9 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
2591 | int i; | 2591 | int i; |
2592 | 2592 | ||
2593 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); | 2593 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); |
2594 | if (!sg) | ||
2595 | return NULL; | ||
2596 | |||
2594 | for (i = 0; i < periods; i++) { | 2597 | for (i = 0; i < periods; i++) { |
2595 | sg_dma_address(&sg[i]) = dma_addr; | 2598 | sg_dma_address(&sg[i]) = dma_addr; |
2596 | sg_dma_len(&sg[i]) = period_len; | 2599 | sg_dma_len(&sg[i]) = period_len; |
@@ -3139,7 +3142,7 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
3139 | 3142 | ||
3140 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 3143 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) |
3141 | { | 3144 | { |
3142 | struct stedma40_platform_data *plat_data = pdev->dev.platform_data; | 3145 | struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); |
3143 | struct clk *clk = NULL; | 3146 | struct clk *clk = NULL; |
3144 | void __iomem *virtbase = NULL; | 3147 | void __iomem *virtbase = NULL; |
3145 | struct resource *res = NULL; | 3148 | struct resource *res = NULL; |
@@ -3226,8 +3229,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3226 | num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; | 3229 | num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; |
3227 | 3230 | ||
3228 | dev_info(&pdev->dev, | 3231 | dev_info(&pdev->dev, |
3229 | "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n", | 3232 | "hardware rev: %d @ %pa with %d physical and %d logical channels\n", |
3230 | rev, res->start, num_phy_chans, num_log_chans); | 3233 | rev, &res->start, num_phy_chans, num_log_chans); |
3231 | 3234 | ||
3232 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + | 3235 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + |
3233 | (num_phy_chans + num_log_chans + num_memcpy_chans) * | 3236 | (num_phy_chans + num_log_chans + num_memcpy_chans) * |
@@ -3485,7 +3488,7 @@ static int __init d40_of_probe(struct platform_device *pdev, | |||
3485 | { | 3488 | { |
3486 | struct stedma40_platform_data *pdata; | 3489 | struct stedma40_platform_data *pdata; |
3487 | int num_phy = 0, num_memcpy = 0, num_disabled = 0; | 3490 | int num_phy = 0, num_memcpy = 0, num_disabled = 0; |
3488 | const const __be32 *list; | 3491 | const __be32 *list; |
3489 | 3492 | ||
3490 | pdata = devm_kzalloc(&pdev->dev, | 3493 | pdata = devm_kzalloc(&pdev->dev, |
3491 | sizeof(struct stedma40_platform_data), | 3494 | sizeof(struct stedma40_platform_data), |
@@ -3516,7 +3519,7 @@ static int __init d40_of_probe(struct platform_device *pdev, | |||
3516 | list = of_get_property(np, "disabled-channels", &num_disabled); | 3519 | list = of_get_property(np, "disabled-channels", &num_disabled); |
3517 | num_disabled /= sizeof(*list); | 3520 | num_disabled /= sizeof(*list); |
3518 | 3521 | ||
3519 | if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) { | 3522 | if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) { |
3520 | d40_err(&pdev->dev, | 3523 | d40_err(&pdev->dev, |
3521 | "Invalid number of disabled channels specified (%d)\n", | 3524 | "Invalid number of disabled channels specified (%d)\n", |
3522 | num_disabled); | 3525 | num_disabled); |
@@ -3535,7 +3538,7 @@ static int __init d40_of_probe(struct platform_device *pdev, | |||
3535 | 3538 | ||
3536 | static int __init d40_probe(struct platform_device *pdev) | 3539 | static int __init d40_probe(struct platform_device *pdev) |
3537 | { | 3540 | { |
3538 | struct stedma40_platform_data *plat_data = pdev->dev.platform_data; | 3541 | struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); |
3539 | struct device_node *np = pdev->dev.of_node; | 3542 | struct device_node *np = pdev->dev.of_node; |
3540 | int ret = -ENOENT; | 3543 | int ret = -ENOENT; |
3541 | struct d40_base *base = NULL; | 3544 | struct d40_base *base = NULL; |
@@ -3579,9 +3582,7 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3579 | if (request_mem_region(res->start, resource_size(res), | 3582 | if (request_mem_region(res->start, resource_size(res), |
3580 | D40_NAME " I/O lcpa") == NULL) { | 3583 | D40_NAME " I/O lcpa") == NULL) { |
3581 | ret = -EBUSY; | 3584 | ret = -EBUSY; |
3582 | d40_err(&pdev->dev, | 3585 | d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); |
3583 | "Failed to request LCPA region 0x%x-0x%x\n", | ||
3584 | res->start, res->end); | ||
3585 | goto failure; | 3586 | goto failure; |
3586 | } | 3587 | } |
3587 | 3588 | ||
@@ -3589,8 +3590,8 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3589 | val = readl(base->virtbase + D40_DREG_LCPA); | 3590 | val = readl(base->virtbase + D40_DREG_LCPA); |
3590 | if (res->start != val && val != 0) { | 3591 | if (res->start != val && val != 0) { |
3591 | dev_warn(&pdev->dev, | 3592 | dev_warn(&pdev->dev, |
3592 | "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", | 3593 | "[%s] Mismatch LCPA dma 0x%x, def %pa\n", |
3593 | __func__, val, res->start); | 3594 | __func__, val, &res->start); |
3594 | } else | 3595 | } else |
3595 | writel(res->start, base->virtbase + D40_DREG_LCPA); | 3596 | writel(res->start, base->virtbase + D40_DREG_LCPA); |
3596 | 3597 | ||
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index f137914d7b16..5d4986e5f5fa 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -767,13 +767,11 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | |||
767 | unsigned long flags; | 767 | unsigned long flags; |
768 | unsigned int residual; | 768 | unsigned int residual; |
769 | 769 | ||
770 | spin_lock_irqsave(&tdc->lock, flags); | ||
771 | |||
772 | ret = dma_cookie_status(dc, cookie, txstate); | 770 | ret = dma_cookie_status(dc, cookie, txstate); |
773 | if (ret == DMA_SUCCESS) { | 771 | if (ret == DMA_SUCCESS) |
774 | spin_unlock_irqrestore(&tdc->lock, flags); | ||
775 | return ret; | 772 | return ret; |
776 | } | 773 | |
774 | spin_lock_irqsave(&tdc->lock, flags); | ||
777 | 775 | ||
778 | /* Check on wait_ack desc status */ | 776 | /* Check on wait_ack desc status */ |
779 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { | 777 | list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 0ef43c136aa7..28af214fce04 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -669,7 +669,7 @@ static irqreturn_t td_irq(int irq, void *devid) | |||
669 | 669 | ||
670 | static int td_probe(struct platform_device *pdev) | 670 | static int td_probe(struct platform_device *pdev) |
671 | { | 671 | { |
672 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; | 672 | struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); |
673 | struct timb_dma *td; | 673 | struct timb_dma *td; |
674 | struct resource *iomem; | 674 | struct resource *iomem; |
675 | int irq; | 675 | int irq; |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index a59fb4841d4c..71e8e775189e 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -962,15 +962,14 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
962 | enum dma_status ret; | 962 | enum dma_status ret; |
963 | 963 | ||
964 | ret = dma_cookie_status(chan, cookie, txstate); | 964 | ret = dma_cookie_status(chan, cookie, txstate); |
965 | if (ret != DMA_SUCCESS) { | 965 | if (ret == DMA_SUCCESS) |
966 | spin_lock_bh(&dc->lock); | 966 | return DMA_SUCCESS; |
967 | txx9dmac_scan_descriptors(dc); | ||
968 | spin_unlock_bh(&dc->lock); | ||
969 | 967 | ||
970 | ret = dma_cookie_status(chan, cookie, txstate); | 968 | spin_lock_bh(&dc->lock); |
971 | } | 969 | txx9dmac_scan_descriptors(dc); |
970 | spin_unlock_bh(&dc->lock); | ||
972 | 971 | ||
973 | return ret; | 972 | return dma_cookie_status(chan, cookie, txstate); |
974 | } | 973 | } |
975 | 974 | ||
976 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, | 975 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, |
@@ -1118,9 +1117,10 @@ static void txx9dmac_off(struct txx9dmac_dev *ddev) | |||
1118 | 1117 | ||
1119 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) | 1118 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) |
1120 | { | 1119 | { |
1121 | struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data; | 1120 | struct txx9dmac_chan_platform_data *cpdata = |
1121 | dev_get_platdata(&pdev->dev); | ||
1122 | struct platform_device *dmac_dev = cpdata->dmac_dev; | 1122 | struct platform_device *dmac_dev = cpdata->dmac_dev; |
1123 | struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data; | 1123 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev); |
1124 | struct txx9dmac_chan *dc; | 1124 | struct txx9dmac_chan *dc; |
1125 | int err; | 1125 | int err; |
1126 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; | 1126 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; |
@@ -1203,7 +1203,7 @@ static int txx9dmac_chan_remove(struct platform_device *pdev) | |||
1203 | 1203 | ||
1204 | static int __init txx9dmac_probe(struct platform_device *pdev) | 1204 | static int __init txx9dmac_probe(struct platform_device *pdev) |
1205 | { | 1205 | { |
1206 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; | 1206 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev); |
1207 | struct resource *io; | 1207 | struct resource *io; |
1208 | struct txx9dmac_dev *ddev; | 1208 | struct txx9dmac_dev *ddev; |
1209 | u32 mcr; | 1209 | u32 mcr; |
@@ -1282,7 +1282,7 @@ static int txx9dmac_resume_noirq(struct device *dev) | |||
1282 | { | 1282 | { |
1283 | struct platform_device *pdev = to_platform_device(dev); | 1283 | struct platform_device *pdev = to_platform_device(dev); |
1284 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | 1284 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1285 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; | 1285 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev); |
1286 | u32 mcr; | 1286 | u32 mcr; |
1287 | 1287 | ||
1288 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | 1288 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; |