diff options
-rw-r--r-- | arch/arm/mach-exynos/dma.c | 2 | ||||
-rw-r--r-- | arch/arm/mach-spear13xx/spear13xx.c | 3 | ||||
-rw-r--r-- | arch/avr32/mach-at32ap/at32ap700x.c | 3 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 17 | ||||
-rw-r--r-- | drivers/dma/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 2 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 258 | ||||
-rw-r--r-- | drivers/dma/dw_dmac_regs.h | 48 | ||||
-rw-r--r-- | drivers/dma/edma.c | 671 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 3 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 22 | ||||
-rw-r--r-- | drivers/dma/mmp_pdma.c | 875 | ||||
-rw-r--r-- | drivers/dma/mmp_tdma.c | 51 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 14 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 78 | ||||
-rw-r--r-- | drivers/dma/sirf-dma.c | 23 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 14 | ||||
-rw-r--r-- | drivers/dma/tegra20-apb-dma.c | 12 | ||||
-rw-r--r-- | drivers/spi/Kconfig | 1 | ||||
-rw-r--r-- | drivers/spi/spi-davinci.c | 292 | ||||
-rw-r--r-- | include/linux/dw_dmac.h | 7 | ||||
-rw-r--r-- | include/linux/edma.h | 29 | ||||
-rw-r--r-- | include/linux/platform_data/mmp_dma.h | 19 |
23 files changed, 2085 insertions, 361 deletions
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c index f60b66dbcf84..21d568b3b149 100644 --- a/arch/arm/mach-exynos/dma.c +++ b/arch/arm/mach-exynos/dma.c | |||
@@ -303,10 +303,12 @@ static int __init exynos_dma_init(void) | |||
303 | 303 | ||
304 | dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask); | 304 | dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask); |
305 | dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask); | 305 | dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask); |
306 | dma_cap_set(DMA_PRIVATE, exynos_pdma0_pdata.cap_mask); | ||
306 | amba_device_register(&exynos_pdma0_device, &iomem_resource); | 307 | amba_device_register(&exynos_pdma0_device, &iomem_resource); |
307 | 308 | ||
308 | dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask); | 309 | dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask); |
309 | dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask); | 310 | dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask); |
311 | dma_cap_set(DMA_PRIVATE, exynos_pdma1_pdata.cap_mask); | ||
310 | amba_device_register(&exynos_pdma1_device, &iomem_resource); | 312 | amba_device_register(&exynos_pdma1_device, &iomem_resource); |
311 | 313 | ||
312 | dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask); | 314 | dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask); |
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c index e10648801b2e..5633d698f1e1 100644 --- a/arch/arm/mach-spear13xx/spear13xx.c +++ b/arch/arm/mach-spear13xx/spear13xx.c | |||
@@ -78,6 +78,9 @@ struct dw_dma_platform_data dmac_plat_data = { | |||
78 | .nr_channels = 8, | 78 | .nr_channels = 8, |
79 | .chan_allocation_order = CHAN_ALLOCATION_DESCENDING, | 79 | .chan_allocation_order = CHAN_ALLOCATION_DESCENDING, |
80 | .chan_priority = CHAN_PRIORITY_DESCENDING, | 80 | .chan_priority = CHAN_PRIORITY_DESCENDING, |
81 | .block_size = 4095U, | ||
82 | .nr_masters = 2, | ||
83 | .data_width = { 3, 3, 0, 0 }, | ||
81 | }; | 84 | }; |
82 | 85 | ||
83 | void __init spear13xx_l2x0_init(void) | 86 | void __init spear13xx_l2x0_init(void) |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 0445c4fd67e3..b323d8d3185b 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -605,6 +605,9 @@ static void __init genclk_init_parent(struct clk *clk) | |||
605 | 605 | ||
606 | static struct dw_dma_platform_data dw_dmac0_data = { | 606 | static struct dw_dma_platform_data dw_dmac0_data = { |
607 | .nr_channels = 3, | 607 | .nr_channels = 3, |
608 | .block_size = 4095U, | ||
609 | .nr_masters = 2, | ||
610 | .data_width = { 2, 2, 0, 0 }, | ||
608 | }; | 611 | }; |
609 | 612 | ||
610 | static struct resource dw_dmac0_resource[] = { | 613 | static struct resource dw_dmac0_resource[] = { |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d06ea2950dd9..677cd6e4e1a1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -208,6 +208,16 @@ config SIRF_DMA | |||
208 | help | 208 | help |
209 | Enable support for the CSR SiRFprimaII DMA engine. | 209 | Enable support for the CSR SiRFprimaII DMA engine. |
210 | 210 | ||
211 | config TI_EDMA | ||
212 | tristate "TI EDMA support" | ||
213 | depends on ARCH_DAVINCI | ||
214 | select DMA_ENGINE | ||
215 | select DMA_VIRTUAL_CHANNELS | ||
216 | default n | ||
217 | help | ||
218 | Enable support for the TI EDMA controller. This DMA | ||
219 | engine is found on TI DaVinci and AM33xx parts. | ||
220 | |||
211 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 221 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
212 | bool | 222 | bool |
213 | 223 | ||
@@ -292,6 +302,13 @@ config DMA_OMAP | |||
292 | select DMA_ENGINE | 302 | select DMA_ENGINE |
293 | select DMA_VIRTUAL_CHANNELS | 303 | select DMA_VIRTUAL_CHANNELS |
294 | 304 | ||
305 | config MMP_PDMA | ||
306 | bool "MMP PDMA support" | ||
307 | depends on (ARCH_MMP || ARCH_PXA) | ||
308 | select DMA_ENGINE | ||
309 | help | ||
310 | Support the MMP PDMA engine for PXA and MMP platfrom. | ||
311 | |||
295 | config DMA_ENGINE | 312 | config DMA_ENGINE |
296 | bool | 313 | bool |
297 | 314 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 4cf6b128ab9a..7428feaa8705 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -23,6 +23,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o | |||
23 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | 23 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o |
24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
25 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 25 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
26 | obj-$(CONFIG_TI_EDMA) += edma.o | ||
26 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 27 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
27 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 28 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
28 | obj-$(CONFIG_PL330_DMA) += pl330.o | 29 | obj-$(CONFIG_PL330_DMA) += pl330.o |
@@ -32,3 +33,4 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | |||
32 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | 33 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o |
33 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o | 34 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o |
34 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o | 35 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o |
36 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 6fbeebb9486f..d1cc5791476b 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1892,6 +1892,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1892 | pl08x->pd = dev_get_platdata(&adev->dev); | 1892 | pl08x->pd = dev_get_platdata(&adev->dev); |
1893 | if (!pl08x->pd) { | 1893 | if (!pl08x->pd) { |
1894 | dev_err(&adev->dev, "no platform data supplied\n"); | 1894 | dev_err(&adev->dev, "no platform data supplied\n"); |
1895 | ret = -EINVAL; | ||
1895 | goto out_no_platdata; | 1896 | goto out_no_platdata; |
1896 | } | 1897 | } |
1897 | 1898 | ||
@@ -1943,6 +1944,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1943 | dev_err(&adev->dev, "%s failed to allocate " | 1944 | dev_err(&adev->dev, "%s failed to allocate " |
1944 | "physical channel holders\n", | 1945 | "physical channel holders\n", |
1945 | __func__); | 1946 | __func__); |
1947 | ret = -ENOMEM; | ||
1946 | goto out_no_phychans; | 1948 | goto out_no_phychans; |
1947 | } | 1949 | } |
1948 | 1950 | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index d3c5a5a88f1e..c4b0eb3cde81 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -36,12 +36,22 @@ | |||
36 | * which does not support descriptor writeback. | 36 | * which does not support descriptor writeback. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) | ||
40 | { | ||
41 | return slave ? slave->dst_master : 0; | ||
42 | } | ||
43 | |||
44 | static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | ||
45 | { | ||
46 | return slave ? slave->src_master : 1; | ||
47 | } | ||
48 | |||
39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 49 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
40 | struct dw_dma_slave *__slave = (_chan->private); \ | 50 | struct dw_dma_slave *__slave = (_chan->private); \ |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | 51 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | 52 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
43 | int _dms = __slave ? __slave->dst_master : 0; \ | 53 | int _dms = dwc_get_dms(__slave); \ |
44 | int _sms = __slave ? __slave->src_master : 1; \ | 54 | int _sms = dwc_get_sms(__slave); \ |
45 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | 55 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ |
46 | DW_DMA_MSIZE_16; \ | 56 | DW_DMA_MSIZE_16; \ |
47 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | 57 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ |
@@ -56,16 +66,6 @@ | |||
56 | }) | 66 | }) |
57 | 67 | ||
58 | /* | 68 | /* |
59 | * This is configuration-dependent and usually a funny size like 4095. | ||
60 | * | ||
61 | * Note that this is a transfer count, i.e. if we transfer 32-bit | ||
62 | * words, we can do 16380 bytes per descriptor. | ||
63 | * | ||
64 | * This parameter is also system-specific. | ||
65 | */ | ||
66 | #define DWC_MAX_COUNT 4095U | ||
67 | |||
68 | /* | ||
69 | * Number of descriptors to allocate for each channel. This should be | 69 | * Number of descriptors to allocate for each channel. This should be |
70 | * made configurable somehow; preferably, the clients (at least the | 70 | * made configurable somehow; preferably, the clients (at least the |
71 | * ones using slave transfers) should be able to give us a hint. | 71 | * ones using slave transfers) should be able to give us a hint. |
@@ -177,6 +177,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
177 | 177 | ||
178 | cfghi = dws->cfg_hi; | 178 | cfghi = dws->cfg_hi; |
179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | 179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
180 | } else { | ||
181 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | ||
182 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); | ||
183 | else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) | ||
184 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); | ||
180 | } | 185 | } |
181 | 186 | ||
182 | channel_writel(dwc, CFG_LO, cfglo); | 187 | channel_writel(dwc, CFG_LO, cfglo); |
@@ -206,7 +211,7 @@ static inline unsigned int dwc_fast_fls(unsigned long long v) | |||
206 | return 0; | 211 | return 0; |
207 | } | 212 | } |
208 | 213 | ||
209 | static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | 214 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
210 | { | 215 | { |
211 | dev_err(chan2dev(&dwc->chan), | 216 | dev_err(chan2dev(&dwc->chan), |
212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 217 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
@@ -227,10 +232,29 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
227 | 232 | ||
228 | /*----------------------------------------------------------------------*/ | 233 | /*----------------------------------------------------------------------*/ |
229 | 234 | ||
235 | /* Perform single block transfer */ | ||
236 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | ||
237 | struct dw_desc *desc) | ||
238 | { | ||
239 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
240 | u32 ctllo; | ||
241 | |||
242 | /* Software emulation of LLP mode relies on interrupts to continue | ||
243 | * multi block transfer. */ | ||
244 | ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; | ||
245 | |||
246 | channel_writel(dwc, SAR, desc->lli.sar); | ||
247 | channel_writel(dwc, DAR, desc->lli.dar); | ||
248 | channel_writel(dwc, CTL_LO, ctllo); | ||
249 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | ||
250 | channel_set_bit(dw, CH_EN, dwc->mask); | ||
251 | } | ||
252 | |||
230 | /* Called with dwc->lock held and bh disabled */ | 253 | /* Called with dwc->lock held and bh disabled */ |
231 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 254 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
232 | { | 255 | { |
233 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 256 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
257 | unsigned long was_soft_llp; | ||
234 | 258 | ||
235 | /* ASSERT: channel is idle */ | 259 | /* ASSERT: channel is idle */ |
236 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 260 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
@@ -242,6 +266,26 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
242 | return; | 266 | return; |
243 | } | 267 | } |
244 | 268 | ||
269 | if (dwc->nollp) { | ||
270 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | ||
271 | &dwc->flags); | ||
272 | if (was_soft_llp) { | ||
273 | dev_err(chan2dev(&dwc->chan), | ||
274 | "BUG: Attempted to start new LLP transfer " | ||
275 | "inside ongoing one\n"); | ||
276 | return; | ||
277 | } | ||
278 | |||
279 | dwc_initialize(dwc); | ||
280 | |||
281 | dwc->tx_list = &first->tx_list; | ||
282 | dwc->tx_node_active = first->tx_list.next; | ||
283 | |||
284 | dwc_do_single_block(dwc, first); | ||
285 | |||
286 | return; | ||
287 | } | ||
288 | |||
245 | dwc_initialize(dwc); | 289 | dwc_initialize(dwc); |
246 | 290 | ||
247 | channel_writel(dwc, LLP, first->txd.phys); | 291 | channel_writel(dwc, LLP, first->txd.phys); |
@@ -553,8 +597,36 @@ static void dw_dma_tasklet(unsigned long data) | |||
553 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); | 597 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
554 | else if (status_err & (1 << i)) | 598 | else if (status_err & (1 << i)) |
555 | dwc_handle_error(dw, dwc); | 599 | dwc_handle_error(dw, dwc); |
556 | else if (status_xfer & (1 << i)) | 600 | else if (status_xfer & (1 << i)) { |
601 | unsigned long flags; | ||
602 | |||
603 | spin_lock_irqsave(&dwc->lock, flags); | ||
604 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
605 | if (dwc->tx_node_active != dwc->tx_list) { | ||
606 | struct dw_desc *desc = | ||
607 | list_entry(dwc->tx_node_active, | ||
608 | struct dw_desc, | ||
609 | desc_node); | ||
610 | |||
611 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
612 | |||
613 | /* move pointer to next descriptor */ | ||
614 | dwc->tx_node_active = | ||
615 | dwc->tx_node_active->next; | ||
616 | |||
617 | dwc_do_single_block(dwc, desc); | ||
618 | |||
619 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
620 | continue; | ||
621 | } else { | ||
622 | /* we are done here */ | ||
623 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
624 | } | ||
625 | } | ||
626 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
627 | |||
557 | dwc_scan_descriptors(dw, dwc); | 628 | dwc_scan_descriptors(dw, dwc); |
629 | } | ||
558 | } | 630 | } |
559 | 631 | ||
560 | /* | 632 | /* |
@@ -636,6 +708,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
636 | size_t len, unsigned long flags) | 708 | size_t len, unsigned long flags) |
637 | { | 709 | { |
638 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 710 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
711 | struct dw_dma_slave *dws = chan->private; | ||
639 | struct dw_desc *desc; | 712 | struct dw_desc *desc; |
640 | struct dw_desc *first; | 713 | struct dw_desc *first; |
641 | struct dw_desc *prev; | 714 | struct dw_desc *prev; |
@@ -643,6 +716,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
643 | size_t offset; | 716 | size_t offset; |
644 | unsigned int src_width; | 717 | unsigned int src_width; |
645 | unsigned int dst_width; | 718 | unsigned int dst_width; |
719 | unsigned int data_width; | ||
646 | u32 ctllo; | 720 | u32 ctllo; |
647 | 721 | ||
648 | dev_vdbg(chan2dev(chan), | 722 | dev_vdbg(chan2dev(chan), |
@@ -655,7 +729,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
655 | return NULL; | 729 | return NULL; |
656 | } | 730 | } |
657 | 731 | ||
658 | src_width = dst_width = dwc_fast_fls(src | dest | len); | 732 | data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], |
733 | dwc->dw->data_width[dwc_get_dms(dws)]); | ||
734 | |||
735 | src_width = dst_width = min_t(unsigned int, data_width, | ||
736 | dwc_fast_fls(src | dest | len)); | ||
659 | 737 | ||
660 | ctllo = DWC_DEFAULT_CTLLO(chan) | 738 | ctllo = DWC_DEFAULT_CTLLO(chan) |
661 | | DWC_CTLL_DST_WIDTH(dst_width) | 739 | | DWC_CTLL_DST_WIDTH(dst_width) |
@@ -667,7 +745,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
667 | 745 | ||
668 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | 746 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
669 | xfer_count = min_t(size_t, (len - offset) >> src_width, | 747 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
670 | DWC_MAX_COUNT); | 748 | dwc->block_size); |
671 | 749 | ||
672 | desc = dwc_desc_get(dwc); | 750 | desc = dwc_desc_get(dwc); |
673 | if (!desc) | 751 | if (!desc) |
@@ -725,6 +803,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
725 | dma_addr_t reg; | 803 | dma_addr_t reg; |
726 | unsigned int reg_width; | 804 | unsigned int reg_width; |
727 | unsigned int mem_width; | 805 | unsigned int mem_width; |
806 | unsigned int data_width; | ||
728 | unsigned int i; | 807 | unsigned int i; |
729 | struct scatterlist *sg; | 808 | struct scatterlist *sg; |
730 | size_t total_len = 0; | 809 | size_t total_len = 0; |
@@ -748,6 +827,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
748 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 827 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
749 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 828 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
750 | 829 | ||
830 | data_width = dwc->dw->data_width[dwc_get_sms(dws)]; | ||
831 | |||
751 | for_each_sg(sgl, sg, sg_len, i) { | 832 | for_each_sg(sgl, sg, sg_len, i) { |
752 | struct dw_desc *desc; | 833 | struct dw_desc *desc; |
753 | u32 len, dlen, mem; | 834 | u32 len, dlen, mem; |
@@ -755,7 +836,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
755 | mem = sg_dma_address(sg); | 836 | mem = sg_dma_address(sg); |
756 | len = sg_dma_len(sg); | 837 | len = sg_dma_len(sg); |
757 | 838 | ||
758 | mem_width = dwc_fast_fls(mem | len); | 839 | mem_width = min_t(unsigned int, |
840 | data_width, dwc_fast_fls(mem | len)); | ||
759 | 841 | ||
760 | slave_sg_todev_fill_desc: | 842 | slave_sg_todev_fill_desc: |
761 | desc = dwc_desc_get(dwc); | 843 | desc = dwc_desc_get(dwc); |
@@ -768,8 +850,8 @@ slave_sg_todev_fill_desc: | |||
768 | desc->lli.sar = mem; | 850 | desc->lli.sar = mem; |
769 | desc->lli.dar = reg; | 851 | desc->lli.dar = reg; |
770 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 852 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); |
771 | if ((len >> mem_width) > DWC_MAX_COUNT) { | 853 | if ((len >> mem_width) > dwc->block_size) { |
772 | dlen = DWC_MAX_COUNT << mem_width; | 854 | dlen = dwc->block_size << mem_width; |
773 | mem += dlen; | 855 | mem += dlen; |
774 | len -= dlen; | 856 | len -= dlen; |
775 | } else { | 857 | } else { |
@@ -808,6 +890,8 @@ slave_sg_todev_fill_desc: | |||
808 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 890 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
809 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 891 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
810 | 892 | ||
893 | data_width = dwc->dw->data_width[dwc_get_dms(dws)]; | ||
894 | |||
811 | for_each_sg(sgl, sg, sg_len, i) { | 895 | for_each_sg(sgl, sg, sg_len, i) { |
812 | struct dw_desc *desc; | 896 | struct dw_desc *desc; |
813 | u32 len, dlen, mem; | 897 | u32 len, dlen, mem; |
@@ -815,7 +899,8 @@ slave_sg_todev_fill_desc: | |||
815 | mem = sg_dma_address(sg); | 899 | mem = sg_dma_address(sg); |
816 | len = sg_dma_len(sg); | 900 | len = sg_dma_len(sg); |
817 | 901 | ||
818 | mem_width = dwc_fast_fls(mem | len); | 902 | mem_width = min_t(unsigned int, |
903 | data_width, dwc_fast_fls(mem | len)); | ||
819 | 904 | ||
820 | slave_sg_fromdev_fill_desc: | 905 | slave_sg_fromdev_fill_desc: |
821 | desc = dwc_desc_get(dwc); | 906 | desc = dwc_desc_get(dwc); |
@@ -828,8 +913,8 @@ slave_sg_fromdev_fill_desc: | |||
828 | desc->lli.sar = reg; | 913 | desc->lli.sar = reg; |
829 | desc->lli.dar = mem; | 914 | desc->lli.dar = mem; |
830 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 915 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); |
831 | if ((len >> reg_width) > DWC_MAX_COUNT) { | 916 | if ((len >> reg_width) > dwc->block_size) { |
832 | dlen = DWC_MAX_COUNT << reg_width; | 917 | dlen = dwc->block_size << reg_width; |
833 | mem += dlen; | 918 | mem += dlen; |
834 | len -= dlen; | 919 | len -= dlen; |
835 | } else { | 920 | } else { |
@@ -945,6 +1030,8 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
945 | } else if (cmd == DMA_TERMINATE_ALL) { | 1030 | } else if (cmd == DMA_TERMINATE_ALL) { |
946 | spin_lock_irqsave(&dwc->lock, flags); | 1031 | spin_lock_irqsave(&dwc->lock, flags); |
947 | 1032 | ||
1033 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
1034 | |||
948 | dwc_chan_disable(dw, dwc); | 1035 | dwc_chan_disable(dw, dwc); |
949 | 1036 | ||
950 | dwc->paused = false; | 1037 | dwc->paused = false; |
@@ -1187,6 +1274,13 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1187 | unsigned long flags; | 1274 | unsigned long flags; |
1188 | 1275 | ||
1189 | spin_lock_irqsave(&dwc->lock, flags); | 1276 | spin_lock_irqsave(&dwc->lock, flags); |
1277 | if (dwc->nollp) { | ||
1278 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1279 | dev_dbg(chan2dev(&dwc->chan), | ||
1280 | "channel doesn't support LLP transfers\n"); | ||
1281 | return ERR_PTR(-EINVAL); | ||
1282 | } | ||
1283 | |||
1190 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 1284 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
1191 | spin_unlock_irqrestore(&dwc->lock, flags); | 1285 | spin_unlock_irqrestore(&dwc->lock, flags); |
1192 | dev_dbg(chan2dev(&dwc->chan), | 1286 | dev_dbg(chan2dev(&dwc->chan), |
@@ -1212,7 +1306,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1212 | periods = buf_len / period_len; | 1306 | periods = buf_len / period_len; |
1213 | 1307 | ||
1214 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | 1308 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ |
1215 | if (period_len > (DWC_MAX_COUNT << reg_width)) | 1309 | if (period_len > (dwc->block_size << reg_width)) |
1216 | goto out_err; | 1310 | goto out_err; |
1217 | if (unlikely(period_len & ((1 << reg_width) - 1))) | 1311 | if (unlikely(period_len & ((1 << reg_width) - 1))) |
1218 | goto out_err; | 1312 | goto out_err; |
@@ -1374,6 +1468,11 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1374 | struct resource *io; | 1468 | struct resource *io; |
1375 | struct dw_dma *dw; | 1469 | struct dw_dma *dw; |
1376 | size_t size; | 1470 | size_t size; |
1471 | void __iomem *regs; | ||
1472 | bool autocfg; | ||
1473 | unsigned int dw_params; | ||
1474 | unsigned int nr_channels; | ||
1475 | unsigned int max_blk_size = 0; | ||
1377 | int irq; | 1476 | int irq; |
1378 | int err; | 1477 | int err; |
1379 | int i; | 1478 | int i; |
@@ -1390,32 +1489,46 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1390 | if (irq < 0) | 1489 | if (irq < 0) |
1391 | return irq; | 1490 | return irq; |
1392 | 1491 | ||
1393 | size = sizeof(struct dw_dma); | 1492 | regs = devm_request_and_ioremap(&pdev->dev, io); |
1394 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | 1493 | if (!regs) |
1395 | dw = kzalloc(size, GFP_KERNEL); | 1494 | return -EBUSY; |
1495 | |||
1496 | dw_params = dma_read_byaddr(regs, DW_PARAMS); | ||
1497 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | ||
1498 | |||
1499 | if (autocfg) | ||
1500 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | ||
1501 | else | ||
1502 | nr_channels = pdata->nr_channels; | ||
1503 | |||
1504 | size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); | ||
1505 | dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | ||
1396 | if (!dw) | 1506 | if (!dw) |
1397 | return -ENOMEM; | 1507 | return -ENOMEM; |
1398 | 1508 | ||
1399 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | 1509 | dw->clk = devm_clk_get(&pdev->dev, "hclk"); |
1400 | err = -EBUSY; | 1510 | if (IS_ERR(dw->clk)) |
1401 | goto err_kfree; | 1511 | return PTR_ERR(dw->clk); |
1402 | } | 1512 | clk_prepare_enable(dw->clk); |
1403 | 1513 | ||
1404 | dw->regs = ioremap(io->start, DW_REGLEN); | 1514 | dw->regs = regs; |
1405 | if (!dw->regs) { | 1515 | |
1406 | err = -ENOMEM; | 1516 | /* get hardware configuration parameters */ |
1407 | goto err_release_r; | 1517 | if (autocfg) { |
1408 | } | 1518 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); |
1409 | 1519 | ||
1410 | dw->clk = clk_get(&pdev->dev, "hclk"); | 1520 | dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; |
1411 | if (IS_ERR(dw->clk)) { | 1521 | for (i = 0; i < dw->nr_masters; i++) { |
1412 | err = PTR_ERR(dw->clk); | 1522 | dw->data_width[i] = |
1413 | goto err_clk; | 1523 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; |
1524 | } | ||
1525 | } else { | ||
1526 | dw->nr_masters = pdata->nr_masters; | ||
1527 | memcpy(dw->data_width, pdata->data_width, 4); | ||
1414 | } | 1528 | } |
1415 | clk_prepare_enable(dw->clk); | ||
1416 | 1529 | ||
1417 | /* Calculate all channel mask before DMA setup */ | 1530 | /* Calculate all channel mask before DMA setup */ |
1418 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1531 | dw->all_chan_mask = (1 << nr_channels) - 1; |
1419 | 1532 | ||
1420 | /* force dma off, just in case */ | 1533 | /* force dma off, just in case */ |
1421 | dw_dma_off(dw); | 1534 | dw_dma_off(dw); |
@@ -1423,17 +1536,19 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1423 | /* disable BLOCK interrupts as well */ | 1536 | /* disable BLOCK interrupts as well */ |
1424 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1537 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1425 | 1538 | ||
1426 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | 1539 | err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, |
1540 | "dw_dmac", dw); | ||
1427 | if (err) | 1541 | if (err) |
1428 | goto err_irq; | 1542 | return err; |
1429 | 1543 | ||
1430 | platform_set_drvdata(pdev, dw); | 1544 | platform_set_drvdata(pdev, dw); |
1431 | 1545 | ||
1432 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1546 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1433 | 1547 | ||
1434 | INIT_LIST_HEAD(&dw->dma.channels); | 1548 | INIT_LIST_HEAD(&dw->dma.channels); |
1435 | for (i = 0; i < pdata->nr_channels; i++) { | 1549 | for (i = 0; i < nr_channels; i++) { |
1436 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1550 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1551 | int r = nr_channels - i - 1; | ||
1437 | 1552 | ||
1438 | dwc->chan.device = &dw->dma; | 1553 | dwc->chan.device = &dw->dma; |
1439 | dma_cookie_init(&dwc->chan); | 1554 | dma_cookie_init(&dwc->chan); |
@@ -1445,7 +1560,7 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1445 | 1560 | ||
1446 | /* 7 is highest priority & 0 is lowest. */ | 1561 | /* 7 is highest priority & 0 is lowest. */ |
1447 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1562 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1448 | dwc->priority = pdata->nr_channels - i - 1; | 1563 | dwc->priority = r; |
1449 | else | 1564 | else |
1450 | dwc->priority = i; | 1565 | dwc->priority = i; |
1451 | 1566 | ||
@@ -1458,6 +1573,32 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1458 | INIT_LIST_HEAD(&dwc->free_list); | 1573 | INIT_LIST_HEAD(&dwc->free_list); |
1459 | 1574 | ||
1460 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1575 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1576 | |||
1577 | dwc->dw = dw; | ||
1578 | |||
1579 | /* hardware configuration */ | ||
1580 | if (autocfg) { | ||
1581 | unsigned int dwc_params; | ||
1582 | |||
1583 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), | ||
1584 | DWC_PARAMS); | ||
1585 | |||
1586 | /* Decode maximum block size for given channel. The | ||
1587 | * stored 4 bit value represents blocks from 0x00 for 3 | ||
1588 | * up to 0x0a for 4095. */ | ||
1589 | dwc->block_size = | ||
1590 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | ||
1591 | dwc->nollp = | ||
1592 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | ||
1593 | } else { | ||
1594 | dwc->block_size = pdata->block_size; | ||
1595 | |||
1596 | /* Check if channel supports multi block transfer */ | ||
1597 | channel_writel(dwc, LLP, 0xfffffffc); | ||
1598 | dwc->nollp = | ||
1599 | (channel_readl(dwc, LLP) & 0xfffffffc) == 0; | ||
1600 | channel_writel(dwc, LLP, 0); | ||
1601 | } | ||
1461 | } | 1602 | } |
1462 | 1603 | ||
1463 | /* Clear all interrupts on all channels. */ | 1604 | /* Clear all interrupts on all channels. */ |
@@ -1486,35 +1627,21 @@ static int __devinit dw_probe(struct platform_device *pdev) | |||
1486 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1627 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1487 | 1628 | ||
1488 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | 1629 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", |
1489 | dev_name(&pdev->dev), pdata->nr_channels); | 1630 | dev_name(&pdev->dev), nr_channels); |
1490 | 1631 | ||
1491 | dma_async_device_register(&dw->dma); | 1632 | dma_async_device_register(&dw->dma); |
1492 | 1633 | ||
1493 | return 0; | 1634 | return 0; |
1494 | |||
1495 | err_irq: | ||
1496 | clk_disable_unprepare(dw->clk); | ||
1497 | clk_put(dw->clk); | ||
1498 | err_clk: | ||
1499 | iounmap(dw->regs); | ||
1500 | dw->regs = NULL; | ||
1501 | err_release_r: | ||
1502 | release_resource(io); | ||
1503 | err_kfree: | ||
1504 | kfree(dw); | ||
1505 | return err; | ||
1506 | } | 1635 | } |
1507 | 1636 | ||
1508 | static int __devexit dw_remove(struct platform_device *pdev) | 1637 | static int __devexit dw_remove(struct platform_device *pdev) |
1509 | { | 1638 | { |
1510 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1639 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1511 | struct dw_dma_chan *dwc, *_dwc; | 1640 | struct dw_dma_chan *dwc, *_dwc; |
1512 | struct resource *io; | ||
1513 | 1641 | ||
1514 | dw_dma_off(dw); | 1642 | dw_dma_off(dw); |
1515 | dma_async_device_unregister(&dw->dma); | 1643 | dma_async_device_unregister(&dw->dma); |
1516 | 1644 | ||
1517 | free_irq(platform_get_irq(pdev, 0), dw); | ||
1518 | tasklet_kill(&dw->tasklet); | 1645 | tasklet_kill(&dw->tasklet); |
1519 | 1646 | ||
1520 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | 1647 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, |
@@ -1523,17 +1650,6 @@ static int __devexit dw_remove(struct platform_device *pdev) | |||
1523 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1650 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1524 | } | 1651 | } |
1525 | 1652 | ||
1526 | clk_disable_unprepare(dw->clk); | ||
1527 | clk_put(dw->clk); | ||
1528 | |||
1529 | iounmap(dw->regs); | ||
1530 | dw->regs = NULL; | ||
1531 | |||
1532 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1533 | release_mem_region(io->start, DW_REGLEN); | ||
1534 | |||
1535 | kfree(dw); | ||
1536 | |||
1537 | return 0; | 1653 | return 0; |
1538 | } | 1654 | } |
1539 | 1655 | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 50830bee087a..ff39fa6cd2bc 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -82,9 +82,39 @@ struct dw_dma_regs { | |||
82 | DW_REG(ID); | 82 | DW_REG(ID); |
83 | DW_REG(TEST); | 83 | DW_REG(TEST); |
84 | 84 | ||
85 | /* reserved */ | ||
86 | DW_REG(__reserved0); | ||
87 | DW_REG(__reserved1); | ||
88 | |||
85 | /* optional encoded params, 0x3c8..0x3f7 */ | 89 | /* optional encoded params, 0x3c8..0x3f7 */ |
90 | u32 __reserved; | ||
91 | |||
92 | /* per-channel configuration registers */ | ||
93 | u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; | ||
94 | u32 MULTI_BLK_TYPE; | ||
95 | u32 MAX_BLK_SIZE; | ||
96 | |||
97 | /* top-level parameters */ | ||
98 | u32 DW_PARAMS; | ||
86 | }; | 99 | }; |
87 | 100 | ||
101 | /* To access the registers in early stage of probe */ | ||
102 | #define dma_read_byaddr(addr, name) \ | ||
103 | readl((addr) + offsetof(struct dw_dma_regs, name)) | ||
104 | |||
105 | /* Bitfields in DW_PARAMS */ | ||
106 | #define DW_PARAMS_NR_CHAN 8 /* number of channels */ | ||
107 | #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ | ||
108 | #define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) | ||
109 | #define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ | ||
110 | #define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ | ||
111 | #define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ | ||
112 | #define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ | ||
113 | #define DW_PARAMS_EN 28 /* encoded parameters */ | ||
114 | |||
115 | /* Bitfields in DWC_PARAMS */ | ||
116 | #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ | ||
117 | |||
88 | /* Bitfields in CTL_LO */ | 118 | /* Bitfields in CTL_LO */ |
89 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | 119 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ |
90 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | 120 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ |
@@ -140,10 +170,9 @@ struct dw_dma_regs { | |||
140 | /* Bitfields in CFG */ | 170 | /* Bitfields in CFG */ |
141 | #define DW_CFG_DMA_EN (1 << 0) | 171 | #define DW_CFG_DMA_EN (1 << 0) |
142 | 172 | ||
143 | #define DW_REGLEN 0x400 | ||
144 | |||
145 | enum dw_dmac_flags { | 173 | enum dw_dmac_flags { |
146 | DW_DMA_IS_CYCLIC = 0, | 174 | DW_DMA_IS_CYCLIC = 0, |
175 | DW_DMA_IS_SOFT_LLP = 1, | ||
147 | }; | 176 | }; |
148 | 177 | ||
149 | struct dw_dma_chan { | 178 | struct dw_dma_chan { |
@@ -154,6 +183,10 @@ struct dw_dma_chan { | |||
154 | bool paused; | 183 | bool paused; |
155 | bool initialized; | 184 | bool initialized; |
156 | 185 | ||
186 | /* software emulation of the LLP transfers */ | ||
187 | struct list_head *tx_list; | ||
188 | struct list_head *tx_node_active; | ||
189 | |||
157 | spinlock_t lock; | 190 | spinlock_t lock; |
158 | 191 | ||
159 | /* these other elements are all protected by lock */ | 192 | /* these other elements are all protected by lock */ |
@@ -165,8 +198,15 @@ struct dw_dma_chan { | |||
165 | 198 | ||
166 | unsigned int descs_allocated; | 199 | unsigned int descs_allocated; |
167 | 200 | ||
201 | /* hardware configuration */ | ||
202 | unsigned int block_size; | ||
203 | bool nollp; | ||
204 | |||
168 | /* configuration passed via DMA_SLAVE_CONFIG */ | 205 | /* configuration passed via DMA_SLAVE_CONFIG */ |
169 | struct dma_slave_config dma_sconfig; | 206 | struct dma_slave_config dma_sconfig; |
207 | |||
208 | /* backlink to dw_dma */ | ||
209 | struct dw_dma *dw; | ||
170 | }; | 210 | }; |
171 | 211 | ||
172 | static inline struct dw_dma_chan_regs __iomem * | 212 | static inline struct dw_dma_chan_regs __iomem * |
@@ -193,6 +233,10 @@ struct dw_dma { | |||
193 | 233 | ||
194 | u8 all_chan_mask; | 234 | u8 all_chan_mask; |
195 | 235 | ||
236 | /* hardware configuration */ | ||
237 | unsigned char nr_masters; | ||
238 | unsigned char data_width[4]; | ||
239 | |||
196 | struct dw_dma_chan chan[0]; | 240 | struct dw_dma_chan chan[0]; |
197 | }; | 241 | }; |
198 | 242 | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c new file mode 100644 index 000000000000..05aea3ce8506 --- /dev/null +++ b/drivers/dma/edma.c | |||
@@ -0,0 +1,671 @@ | |||
1 | /* | ||
2 | * TI EDMA DMA engine driver | ||
3 | * | ||
4 | * Copyright 2012 Texas Instruments | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | |||
27 | #include <mach/edma.h> | ||
28 | |||
29 | #include "dmaengine.h" | ||
30 | #include "virt-dma.h" | ||
31 | |||
32 | /* | ||
33 | * This will go away when the private EDMA API is folded | ||
34 | * into this driver and the platform device(s) are | ||
35 | * instantiated in the arch code. We can only get away | ||
36 | * with this simplification because DA8XX may not be built | ||
37 | * in the same kernel image with other DaVinci parts. This | ||
38 | * avoids having to sprinkle dmaengine driver platform devices | ||
39 | * and data throughout all the existing board files. | ||
40 | */ | ||
41 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | ||
42 | #define EDMA_CTLRS 2 | ||
43 | #define EDMA_CHANS 32 | ||
44 | #else | ||
45 | #define EDMA_CTLRS 1 | ||
46 | #define EDMA_CHANS 64 | ||
47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | ||
48 | |||
49 | /* Max of 16 segments per channel to conserve PaRAM slots */ | ||
50 | #define MAX_NR_SG 16 | ||
51 | #define EDMA_MAX_SLOTS MAX_NR_SG | ||
52 | #define EDMA_DESCRIPTORS 16 | ||
53 | |||
54 | struct edma_desc { | ||
55 | struct virt_dma_desc vdesc; | ||
56 | struct list_head node; | ||
57 | int absync; | ||
58 | int pset_nr; | ||
59 | struct edmacc_param pset[0]; | ||
60 | }; | ||
61 | |||
62 | struct edma_cc; | ||
63 | |||
64 | struct edma_chan { | ||
65 | struct virt_dma_chan vchan; | ||
66 | struct list_head node; | ||
67 | struct edma_desc *edesc; | ||
68 | struct edma_cc *ecc; | ||
69 | int ch_num; | ||
70 | bool alloced; | ||
71 | int slot[EDMA_MAX_SLOTS]; | ||
72 | dma_addr_t addr; | ||
73 | int addr_width; | ||
74 | int maxburst; | ||
75 | }; | ||
76 | |||
77 | struct edma_cc { | ||
78 | int ctlr; | ||
79 | struct dma_device dma_slave; | ||
80 | struct edma_chan slave_chans[EDMA_CHANS]; | ||
81 | int num_slave_chans; | ||
82 | int dummy_slot; | ||
83 | }; | ||
84 | |||
85 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | ||
86 | { | ||
87 | return container_of(d, struct edma_cc, dma_slave); | ||
88 | } | ||
89 | |||
90 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | ||
91 | { | ||
92 | return container_of(c, struct edma_chan, vchan.chan); | ||
93 | } | ||
94 | |||
95 | static inline struct edma_desc | ||
96 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | ||
97 | { | ||
98 | return container_of(tx, struct edma_desc, vdesc.tx); | ||
99 | } | ||
100 | |||
101 | static void edma_desc_free(struct virt_dma_desc *vdesc) | ||
102 | { | ||
103 | kfree(container_of(vdesc, struct edma_desc, vdesc)); | ||
104 | } | ||
105 | |||
106 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | ||
107 | static void edma_execute(struct edma_chan *echan) | ||
108 | { | ||
109 | struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); | ||
110 | struct edma_desc *edesc; | ||
111 | int i; | ||
112 | |||
113 | if (!vdesc) { | ||
114 | echan->edesc = NULL; | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | list_del(&vdesc->node); | ||
119 | |||
120 | echan->edesc = edesc = to_edma_desc(&vdesc->tx); | ||
121 | |||
122 | /* Write descriptor PaRAM set(s) */ | ||
123 | for (i = 0; i < edesc->pset_nr; i++) { | ||
124 | edma_write_slot(echan->slot[i], &edesc->pset[i]); | ||
125 | dev_dbg(echan->vchan.chan.device->dev, | ||
126 | "\n pset[%d]:\n" | ||
127 | " chnum\t%d\n" | ||
128 | " slot\t%d\n" | ||
129 | " opt\t%08x\n" | ||
130 | " src\t%08x\n" | ||
131 | " dst\t%08x\n" | ||
132 | " abcnt\t%08x\n" | ||
133 | " ccnt\t%08x\n" | ||
134 | " bidx\t%08x\n" | ||
135 | " cidx\t%08x\n" | ||
136 | " lkrld\t%08x\n", | ||
137 | i, echan->ch_num, echan->slot[i], | ||
138 | edesc->pset[i].opt, | ||
139 | edesc->pset[i].src, | ||
140 | edesc->pset[i].dst, | ||
141 | edesc->pset[i].a_b_cnt, | ||
142 | edesc->pset[i].ccnt, | ||
143 | edesc->pset[i].src_dst_bidx, | ||
144 | edesc->pset[i].src_dst_cidx, | ||
145 | edesc->pset[i].link_bcntrld); | ||
146 | /* Link to the previous slot if not the last set */ | ||
147 | if (i != (edesc->pset_nr - 1)) | ||
148 | edma_link(echan->slot[i], echan->slot[i+1]); | ||
149 | /* Final pset links to the dummy pset */ | ||
150 | else | ||
151 | edma_link(echan->slot[i], echan->ecc->dummy_slot); | ||
152 | } | ||
153 | |||
154 | edma_start(echan->ch_num); | ||
155 | } | ||
156 | |||
157 | static int edma_terminate_all(struct edma_chan *echan) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | LIST_HEAD(head); | ||
161 | |||
162 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
163 | |||
164 | /* | ||
165 | * Stop DMA activity: we assume the callback will not be called | ||
166 | * after edma_dma() returns (even if it does, it will see | ||
167 | * echan->edesc is NULL and exit.) | ||
168 | */ | ||
169 | if (echan->edesc) { | ||
170 | echan->edesc = NULL; | ||
171 | edma_stop(echan->ch_num); | ||
172 | } | ||
173 | |||
174 | vchan_get_all_descriptors(&echan->vchan, &head); | ||
175 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
176 | vchan_dma_desc_free_list(&echan->vchan, &head); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | |||
182 | static int edma_slave_config(struct edma_chan *echan, | ||
183 | struct dma_slave_config *config) | ||
184 | { | ||
185 | if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || | ||
186 | (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
187 | return -EINVAL; | ||
188 | |||
189 | if (config->direction == DMA_MEM_TO_DEV) { | ||
190 | if (config->dst_addr) | ||
191 | echan->addr = config->dst_addr; | ||
192 | if (config->dst_addr_width) | ||
193 | echan->addr_width = config->dst_addr_width; | ||
194 | if (config->dst_maxburst) | ||
195 | echan->maxburst = config->dst_maxburst; | ||
196 | } else if (config->direction == DMA_DEV_TO_MEM) { | ||
197 | if (config->src_addr) | ||
198 | echan->addr = config->src_addr; | ||
199 | if (config->src_addr_width) | ||
200 | echan->addr_width = config->src_addr_width; | ||
201 | if (config->src_maxburst) | ||
202 | echan->maxburst = config->src_maxburst; | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
209 | unsigned long arg) | ||
210 | { | ||
211 | int ret = 0; | ||
212 | struct dma_slave_config *config; | ||
213 | struct edma_chan *echan = to_edma_chan(chan); | ||
214 | |||
215 | switch (cmd) { | ||
216 | case DMA_TERMINATE_ALL: | ||
217 | edma_terminate_all(echan); | ||
218 | break; | ||
219 | case DMA_SLAVE_CONFIG: | ||
220 | config = (struct dma_slave_config *)arg; | ||
221 | ret = edma_slave_config(echan, config); | ||
222 | break; | ||
223 | default: | ||
224 | ret = -ENOSYS; | ||
225 | } | ||
226 | |||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( | ||
231 | struct dma_chan *chan, struct scatterlist *sgl, | ||
232 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
233 | unsigned long tx_flags, void *context) | ||
234 | { | ||
235 | struct edma_chan *echan = to_edma_chan(chan); | ||
236 | struct device *dev = chan->device->dev; | ||
237 | struct edma_desc *edesc; | ||
238 | struct scatterlist *sg; | ||
239 | int i; | ||
240 | int acnt, bcnt, ccnt, src, dst, cidx; | ||
241 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
242 | |||
243 | if (unlikely(!echan || !sgl || !sg_len)) | ||
244 | return NULL; | ||
245 | |||
246 | if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
247 | dev_err(dev, "Undefined slave buswidth\n"); | ||
248 | return NULL; | ||
249 | } | ||
250 | |||
251 | if (sg_len > MAX_NR_SG) { | ||
252 | dev_err(dev, "Exceeded max SG segments %d > %d\n", | ||
253 | sg_len, MAX_NR_SG); | ||
254 | return NULL; | ||
255 | } | ||
256 | |||
257 | edesc = kzalloc(sizeof(*edesc) + sg_len * | ||
258 | sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
259 | if (!edesc) { | ||
260 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
261 | return NULL; | ||
262 | } | ||
263 | |||
264 | edesc->pset_nr = sg_len; | ||
265 | |||
266 | for_each_sg(sgl, sg, sg_len, i) { | ||
267 | /* Allocate a PaRAM slot, if needed */ | ||
268 | if (echan->slot[i] < 0) { | ||
269 | echan->slot[i] = | ||
270 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | ||
271 | EDMA_SLOT_ANY); | ||
272 | if (echan->slot[i] < 0) { | ||
273 | dev_err(dev, "Failed to allocate slot\n"); | ||
274 | return NULL; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | acnt = echan->addr_width; | ||
279 | |||
280 | /* | ||
281 | * If the maxburst is equal to the fifo width, use | ||
282 | * A-synced transfers. This allows for large contiguous | ||
283 | * buffer transfers using only one PaRAM set. | ||
284 | */ | ||
285 | if (echan->maxburst == 1) { | ||
286 | edesc->absync = false; | ||
287 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | ||
288 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | ||
289 | if (bcnt) | ||
290 | ccnt++; | ||
291 | else | ||
292 | bcnt = SZ_64K - 1; | ||
293 | cidx = acnt; | ||
294 | /* | ||
295 | * If maxburst is greater than the fifo address_width, | ||
296 | * use AB-synced transfers where A count is the fifo | ||
297 | * address_width and B count is the maxburst. In this | ||
298 | * case, we are limited to transfers of C count frames | ||
299 | * of (address_width * maxburst) where C count is limited | ||
300 | * to SZ_64K-1. This places an upper bound on the length | ||
301 | * of an SG segment that can be handled. | ||
302 | */ | ||
303 | } else { | ||
304 | edesc->absync = true; | ||
305 | bcnt = echan->maxburst; | ||
306 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | ||
307 | if (ccnt > (SZ_64K - 1)) { | ||
308 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
309 | return NULL; | ||
310 | } | ||
311 | cidx = acnt * bcnt; | ||
312 | } | ||
313 | |||
314 | if (direction == DMA_MEM_TO_DEV) { | ||
315 | src = sg_dma_address(sg); | ||
316 | dst = echan->addr; | ||
317 | src_bidx = acnt; | ||
318 | src_cidx = cidx; | ||
319 | dst_bidx = 0; | ||
320 | dst_cidx = 0; | ||
321 | } else { | ||
322 | src = echan->addr; | ||
323 | dst = sg_dma_address(sg); | ||
324 | src_bidx = 0; | ||
325 | src_cidx = 0; | ||
326 | dst_bidx = acnt; | ||
327 | dst_cidx = cidx; | ||
328 | } | ||
329 | |||
330 | edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
331 | /* Configure A or AB synchronized transfers */ | ||
332 | if (edesc->absync) | ||
333 | edesc->pset[i].opt |= SYNCDIM; | ||
334 | /* If this is the last set, enable completion interrupt flag */ | ||
335 | if (i == sg_len - 1) | ||
336 | edesc->pset[i].opt |= TCINTEN; | ||
337 | |||
338 | edesc->pset[i].src = src; | ||
339 | edesc->pset[i].dst = dst; | ||
340 | |||
341 | edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; | ||
342 | edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; | ||
343 | |||
344 | edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; | ||
345 | edesc->pset[i].ccnt = ccnt; | ||
346 | edesc->pset[i].link_bcntrld = 0xffffffff; | ||
347 | |||
348 | } | ||
349 | |||
350 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | ||
351 | } | ||
352 | |||
353 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | ||
354 | { | ||
355 | struct edma_chan *echan = data; | ||
356 | struct device *dev = echan->vchan.chan.device->dev; | ||
357 | struct edma_desc *edesc; | ||
358 | unsigned long flags; | ||
359 | |||
360 | /* Stop the channel */ | ||
361 | edma_stop(echan->ch_num); | ||
362 | |||
363 | switch (ch_status) { | ||
364 | case DMA_COMPLETE: | ||
365 | dev_dbg(dev, "transfer complete on channel %d\n", ch_num); | ||
366 | |||
367 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
368 | |||
369 | edesc = echan->edesc; | ||
370 | if (edesc) { | ||
371 | edma_execute(echan); | ||
372 | vchan_cookie_complete(&edesc->vdesc); | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
376 | |||
377 | break; | ||
378 | case DMA_CC_ERROR: | ||
379 | dev_dbg(dev, "transfer error on channel %d\n", ch_num); | ||
380 | break; | ||
381 | default: | ||
382 | break; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | /* Alloc channel resources */ | ||
387 | static int edma_alloc_chan_resources(struct dma_chan *chan) | ||
388 | { | ||
389 | struct edma_chan *echan = to_edma_chan(chan); | ||
390 | struct device *dev = chan->device->dev; | ||
391 | int ret; | ||
392 | int a_ch_num; | ||
393 | LIST_HEAD(descs); | ||
394 | |||
395 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | ||
396 | chan, EVENTQ_DEFAULT); | ||
397 | |||
398 | if (a_ch_num < 0) { | ||
399 | ret = -ENODEV; | ||
400 | goto err_no_chan; | ||
401 | } | ||
402 | |||
403 | if (a_ch_num != echan->ch_num) { | ||
404 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | ||
405 | EDMA_CTLR(echan->ch_num), | ||
406 | EDMA_CHAN_SLOT(echan->ch_num)); | ||
407 | ret = -ENODEV; | ||
408 | goto err_wrong_chan; | ||
409 | } | ||
410 | |||
411 | echan->alloced = true; | ||
412 | echan->slot[0] = echan->ch_num; | ||
413 | |||
414 | dev_info(dev, "allocated channel for %u:%u\n", | ||
415 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | ||
416 | |||
417 | return 0; | ||
418 | |||
419 | err_wrong_chan: | ||
420 | edma_free_channel(a_ch_num); | ||
421 | err_no_chan: | ||
422 | return ret; | ||
423 | } | ||
424 | |||
425 | /* Free channel resources */ | ||
426 | static void edma_free_chan_resources(struct dma_chan *chan) | ||
427 | { | ||
428 | struct edma_chan *echan = to_edma_chan(chan); | ||
429 | struct device *dev = chan->device->dev; | ||
430 | int i; | ||
431 | |||
432 | /* Terminate transfers */ | ||
433 | edma_stop(echan->ch_num); | ||
434 | |||
435 | vchan_free_chan_resources(&echan->vchan); | ||
436 | |||
437 | /* Free EDMA PaRAM slots */ | ||
438 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | ||
439 | if (echan->slot[i] >= 0) { | ||
440 | edma_free_slot(echan->slot[i]); | ||
441 | echan->slot[i] = -1; | ||
442 | } | ||
443 | } | ||
444 | |||
445 | /* Free EDMA channel */ | ||
446 | if (echan->alloced) { | ||
447 | edma_free_channel(echan->ch_num); | ||
448 | echan->alloced = false; | ||
449 | } | ||
450 | |||
451 | dev_info(dev, "freeing channel for %u\n", echan->ch_num); | ||
452 | } | ||
453 | |||
454 | /* Send pending descriptor to hardware */ | ||
455 | static void edma_issue_pending(struct dma_chan *chan) | ||
456 | { | ||
457 | struct edma_chan *echan = to_edma_chan(chan); | ||
458 | unsigned long flags; | ||
459 | |||
460 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
461 | if (vchan_issue_pending(&echan->vchan) && !echan->edesc) | ||
462 | edma_execute(echan); | ||
463 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
464 | } | ||
465 | |||
466 | static size_t edma_desc_size(struct edma_desc *edesc) | ||
467 | { | ||
468 | int i; | ||
469 | size_t size; | ||
470 | |||
471 | if (edesc->absync) | ||
472 | for (size = i = 0; i < edesc->pset_nr; i++) | ||
473 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | ||
474 | (edesc->pset[i].a_b_cnt >> 16) * | ||
475 | edesc->pset[i].ccnt; | ||
476 | else | ||
477 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | ||
478 | (edesc->pset[0].a_b_cnt >> 16) + | ||
479 | (edesc->pset[0].a_b_cnt & 0xffff) * | ||
480 | (SZ_64K - 1) * edesc->pset[0].ccnt; | ||
481 | |||
482 | return size; | ||
483 | } | ||
484 | |||
485 | /* Check request completion status */ | ||
486 | static enum dma_status edma_tx_status(struct dma_chan *chan, | ||
487 | dma_cookie_t cookie, | ||
488 | struct dma_tx_state *txstate) | ||
489 | { | ||
490 | struct edma_chan *echan = to_edma_chan(chan); | ||
491 | struct virt_dma_desc *vdesc; | ||
492 | enum dma_status ret; | ||
493 | unsigned long flags; | ||
494 | |||
495 | ret = dma_cookie_status(chan, cookie, txstate); | ||
496 | if (ret == DMA_SUCCESS || !txstate) | ||
497 | return ret; | ||
498 | |||
499 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
500 | vdesc = vchan_find_desc(&echan->vchan, cookie); | ||
501 | if (vdesc) { | ||
502 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | ||
503 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | ||
504 | struct edma_desc *edesc = echan->edesc; | ||
505 | txstate->residue = edma_desc_size(edesc); | ||
506 | } else { | ||
507 | txstate->residue = 0; | ||
508 | } | ||
509 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
510 | |||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | static void __init edma_chan_init(struct edma_cc *ecc, | ||
515 | struct dma_device *dma, | ||
516 | struct edma_chan *echans) | ||
517 | { | ||
518 | int i, j; | ||
519 | |||
520 | for (i = 0; i < EDMA_CHANS; i++) { | ||
521 | struct edma_chan *echan = &echans[i]; | ||
522 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | ||
523 | echan->ecc = ecc; | ||
524 | echan->vchan.desc_free = edma_desc_free; | ||
525 | |||
526 | vchan_init(&echan->vchan, dma); | ||
527 | |||
528 | INIT_LIST_HEAD(&echan->node); | ||
529 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | ||
530 | echan->slot[j] = -1; | ||
531 | } | ||
532 | } | ||
533 | |||
534 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | ||
535 | struct device *dev) | ||
536 | { | ||
537 | dma->device_prep_slave_sg = edma_prep_slave_sg; | ||
538 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
539 | dma->device_free_chan_resources = edma_free_chan_resources; | ||
540 | dma->device_issue_pending = edma_issue_pending; | ||
541 | dma->device_tx_status = edma_tx_status; | ||
542 | dma->device_control = edma_control; | ||
543 | dma->dev = dev; | ||
544 | |||
545 | INIT_LIST_HEAD(&dma->channels); | ||
546 | } | ||
547 | |||
548 | static int __devinit edma_probe(struct platform_device *pdev) | ||
549 | { | ||
550 | struct edma_cc *ecc; | ||
551 | int ret; | ||
552 | |||
553 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); | ||
554 | if (!ecc) { | ||
555 | dev_err(&pdev->dev, "Can't allocate controller\n"); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | |||
559 | ecc->ctlr = pdev->id; | ||
560 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | ||
561 | if (ecc->dummy_slot < 0) { | ||
562 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | ||
563 | return -EIO; | ||
564 | } | ||
565 | |||
566 | dma_cap_zero(ecc->dma_slave.cap_mask); | ||
567 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | ||
568 | |||
569 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | ||
570 | |||
571 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | ||
572 | |||
573 | ret = dma_async_device_register(&ecc->dma_slave); | ||
574 | if (ret) | ||
575 | goto err_reg1; | ||
576 | |||
577 | platform_set_drvdata(pdev, ecc); | ||
578 | |||
579 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | ||
580 | |||
581 | return 0; | ||
582 | |||
583 | err_reg1: | ||
584 | edma_free_slot(ecc->dummy_slot); | ||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | static int __devexit edma_remove(struct platform_device *pdev) | ||
589 | { | ||
590 | struct device *dev = &pdev->dev; | ||
591 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
592 | |||
593 | dma_async_device_unregister(&ecc->dma_slave); | ||
594 | edma_free_slot(ecc->dummy_slot); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static struct platform_driver edma_driver = { | ||
600 | .probe = edma_probe, | ||
601 | .remove = __devexit_p(edma_remove), | ||
602 | .driver = { | ||
603 | .name = "edma-dma-engine", | ||
604 | .owner = THIS_MODULE, | ||
605 | }, | ||
606 | }; | ||
607 | |||
608 | bool edma_filter_fn(struct dma_chan *chan, void *param) | ||
609 | { | ||
610 | if (chan->device->dev->driver == &edma_driver.driver) { | ||
611 | struct edma_chan *echan = to_edma_chan(chan); | ||
612 | unsigned ch_req = *(unsigned *)param; | ||
613 | return ch_req == echan->ch_num; | ||
614 | } | ||
615 | return false; | ||
616 | } | ||
617 | EXPORT_SYMBOL(edma_filter_fn); | ||
618 | |||
619 | static struct platform_device *pdev0, *pdev1; | ||
620 | |||
621 | static const struct platform_device_info edma_dev_info0 = { | ||
622 | .name = "edma-dma-engine", | ||
623 | .id = 0, | ||
624 | .dma_mask = DMA_BIT_MASK(32), | ||
625 | }; | ||
626 | |||
627 | static const struct platform_device_info edma_dev_info1 = { | ||
628 | .name = "edma-dma-engine", | ||
629 | .id = 1, | ||
630 | .dma_mask = DMA_BIT_MASK(32), | ||
631 | }; | ||
632 | |||
633 | static int edma_init(void) | ||
634 | { | ||
635 | int ret = platform_driver_register(&edma_driver); | ||
636 | |||
637 | if (ret == 0) { | ||
638 | pdev0 = platform_device_register_full(&edma_dev_info0); | ||
639 | if (IS_ERR(pdev0)) { | ||
640 | platform_driver_unregister(&edma_driver); | ||
641 | ret = PTR_ERR(pdev0); | ||
642 | goto out; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | if (EDMA_CTLRS == 2) { | ||
647 | pdev1 = platform_device_register_full(&edma_dev_info1); | ||
648 | if (IS_ERR(pdev1)) { | ||
649 | platform_driver_unregister(&edma_driver); | ||
650 | platform_device_unregister(pdev0); | ||
651 | ret = PTR_ERR(pdev1); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | out: | ||
656 | return ret; | ||
657 | } | ||
658 | subsys_initcall(edma_init); | ||
659 | |||
660 | static void __exit edma_exit(void) | ||
661 | { | ||
662 | platform_device_unregister(pdev0); | ||
663 | if (pdev1) | ||
664 | platform_device_unregister(pdev1); | ||
665 | platform_driver_unregister(&edma_driver); | ||
666 | } | ||
667 | module_exit(edma_exit); | ||
668 | |||
669 | MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); | ||
670 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); | ||
671 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 86895760b598..b9d667851445 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -434,12 +434,11 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f | |||
434 | return NULL; | 434 | return NULL; |
435 | memset(hw, 0, sizeof(*hw)); | 435 | memset(hw, 0, sizeof(*hw)); |
436 | 436 | ||
437 | desc = kmem_cache_alloc(ioat2_cache, flags); | 437 | desc = kmem_cache_zalloc(ioat2_cache, flags); |
438 | if (!desc) { | 438 | if (!desc) { |
439 | pci_pool_free(dma->dma_pool, hw, phys); | 439 | pci_pool_free(dma->dma_pool, hw, phys); |
440 | return NULL; | 440 | return NULL; |
441 | } | 441 | } |
442 | memset(desc, 0, sizeof(*desc)); | ||
443 | 442 | ||
444 | dma_async_tx_descriptor_init(&desc->txd, chan); | 443 | dma_async_tx_descriptor_init(&desc->txd, chan); |
445 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | 444 | desc->txd.tx_submit = ioat2_tx_submit_unlock; |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 5e3a40f79945..c0573061b45d 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -40,6 +40,17 @@ MODULE_VERSION(IOAT_DMA_VERSION); | |||
40 | MODULE_LICENSE("Dual BSD/GPL"); | 40 | MODULE_LICENSE("Dual BSD/GPL"); |
41 | MODULE_AUTHOR("Intel Corporation"); | 41 | MODULE_AUTHOR("Intel Corporation"); |
42 | 42 | ||
43 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 | ||
44 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 | ||
45 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 | ||
46 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 | ||
47 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 | ||
48 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 | ||
49 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 | ||
50 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 | ||
51 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e | ||
52 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f | ||
53 | |||
43 | static struct pci_device_id ioat_pci_tbl[] = { | 54 | static struct pci_device_id ioat_pci_tbl[] = { |
44 | /* I/OAT v1 platforms */ | 55 | /* I/OAT v1 platforms */ |
45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, | 56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, |
@@ -83,6 +94,17 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
83 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, | 94 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, |
84 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, | 95 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, |
85 | 96 | ||
97 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, | ||
98 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, | ||
99 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, | ||
100 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, | ||
101 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, | ||
102 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, | ||
103 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, | ||
104 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, | ||
105 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, | ||
106 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, | ||
107 | |||
86 | { 0, } | 108 | { 0, } |
87 | }; | 109 | }; |
88 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | 110 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c new file mode 100644 index 000000000000..14da1f403edf --- /dev/null +++ b/drivers/dma/mmp_pdma.c | |||
@@ -0,0 +1,875 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Marvell International Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/platform_data/mmp_dma.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/of_device.h> | ||
20 | #include <linux/of.h> | ||
21 | |||
22 | #include "dmaengine.h" | ||
23 | |||
24 | #define DCSR 0x0000 | ||
25 | #define DALGN 0x00a0 | ||
26 | #define DINT 0x00f0 | ||
27 | #define DDADR 0x0200 | ||
28 | #define DSADR 0x0204 | ||
29 | #define DTADR 0x0208 | ||
30 | #define DCMD 0x020c | ||
31 | |||
32 | #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ | ||
33 | #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ | ||
34 | #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ | ||
35 | #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ | ||
36 | #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ | ||
37 | #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ | ||
38 | #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ | ||
39 | #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ | ||
40 | |||
41 | #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ | ||
42 | #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ | ||
43 | #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ | ||
44 | #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ | ||
45 | #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ | ||
46 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ | ||
47 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ | ||
48 | |||
49 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ | ||
50 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | ||
51 | |||
52 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | ||
53 | #define DDADR_STOP (1 << 0) /* Stop (read / write) */ | ||
54 | |||
55 | #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ | ||
56 | #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ | ||
57 | #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ | ||
58 | #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ | ||
59 | #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ | ||
60 | #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ | ||
61 | #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ | ||
62 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ | ||
63 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ | ||
64 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ | ||
65 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ | ||
66 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ | ||
67 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ | ||
68 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | ||
69 | |||
70 | #define PDMA_ALIGNMENT 3 | ||
71 | #define PDMA_MAX_DESC_BYTES 0x1000 | ||
72 | |||
73 | struct mmp_pdma_desc_hw { | ||
74 | u32 ddadr; /* Points to the next descriptor + flags */ | ||
75 | u32 dsadr; /* DSADR value for the current transfer */ | ||
76 | u32 dtadr; /* DTADR value for the current transfer */ | ||
77 | u32 dcmd; /* DCMD value for the current transfer */ | ||
78 | } __aligned(32); | ||
79 | |||
80 | struct mmp_pdma_desc_sw { | ||
81 | struct mmp_pdma_desc_hw desc; | ||
82 | struct list_head node; | ||
83 | struct list_head tx_list; | ||
84 | struct dma_async_tx_descriptor async_tx; | ||
85 | }; | ||
86 | |||
87 | struct mmp_pdma_phy; | ||
88 | |||
89 | struct mmp_pdma_chan { | ||
90 | struct device *dev; | ||
91 | struct dma_chan chan; | ||
92 | struct dma_async_tx_descriptor desc; | ||
93 | struct mmp_pdma_phy *phy; | ||
94 | enum dma_transfer_direction dir; | ||
95 | |||
96 | /* channel's basic info */ | ||
97 | struct tasklet_struct tasklet; | ||
98 | u32 dcmd; | ||
99 | u32 drcmr; | ||
100 | u32 dev_addr; | ||
101 | |||
102 | /* list for desc */ | ||
103 | spinlock_t desc_lock; /* Descriptor list lock */ | ||
104 | struct list_head chain_pending; /* Link descriptors queue for pending */ | ||
105 | struct list_head chain_running; /* Link descriptors queue for running */ | ||
106 | bool idle; /* channel statue machine */ | ||
107 | |||
108 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
109 | }; | ||
110 | |||
111 | struct mmp_pdma_phy { | ||
112 | int idx; | ||
113 | void __iomem *base; | ||
114 | struct mmp_pdma_chan *vchan; | ||
115 | }; | ||
116 | |||
117 | struct mmp_pdma_device { | ||
118 | int dma_channels; | ||
119 | void __iomem *base; | ||
120 | struct device *dev; | ||
121 | struct dma_device device; | ||
122 | struct mmp_pdma_phy *phy; | ||
123 | }; | ||
124 | |||
125 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) | ||
126 | #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) | ||
127 | #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) | ||
128 | #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) | ||
129 | |||
130 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | ||
131 | { | ||
132 | u32 reg = (phy->idx << 4) + DDADR; | ||
133 | |||
134 | writel(addr, phy->base + reg); | ||
135 | } | ||
136 | |||
137 | static void enable_chan(struct mmp_pdma_phy *phy) | ||
138 | { | ||
139 | u32 reg; | ||
140 | |||
141 | if (!phy->vchan) | ||
142 | return; | ||
143 | |||
144 | reg = phy->vchan->drcmr; | ||
145 | reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2); | ||
146 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); | ||
147 | |||
148 | reg = (phy->idx << 2) + DCSR; | ||
149 | writel(readl(phy->base + reg) | DCSR_RUN, | ||
150 | phy->base + reg); | ||
151 | } | ||
152 | |||
153 | static void disable_chan(struct mmp_pdma_phy *phy) | ||
154 | { | ||
155 | u32 reg; | ||
156 | |||
157 | if (phy) { | ||
158 | reg = (phy->idx << 2) + DCSR; | ||
159 | writel(readl(phy->base + reg) & ~DCSR_RUN, | ||
160 | phy->base + reg); | ||
161 | } | ||
162 | } | ||
163 | |||
164 | static int clear_chan_irq(struct mmp_pdma_phy *phy) | ||
165 | { | ||
166 | u32 dcsr; | ||
167 | u32 dint = readl(phy->base + DINT); | ||
168 | u32 reg = (phy->idx << 2) + DCSR; | ||
169 | |||
170 | if (dint & BIT(phy->idx)) { | ||
171 | /* clear irq */ | ||
172 | dcsr = readl(phy->base + reg); | ||
173 | writel(dcsr, phy->base + reg); | ||
174 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) | ||
175 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); | ||
176 | return 0; | ||
177 | } | ||
178 | return -EAGAIN; | ||
179 | } | ||
180 | |||
181 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) | ||
182 | { | ||
183 | struct mmp_pdma_phy *phy = dev_id; | ||
184 | |||
185 | if (clear_chan_irq(phy) == 0) { | ||
186 | tasklet_schedule(&phy->vchan->tasklet); | ||
187 | return IRQ_HANDLED; | ||
188 | } else | ||
189 | return IRQ_NONE; | ||
190 | } | ||
191 | |||
192 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | ||
193 | { | ||
194 | struct mmp_pdma_device *pdev = dev_id; | ||
195 | struct mmp_pdma_phy *phy; | ||
196 | u32 dint = readl(pdev->base + DINT); | ||
197 | int i, ret; | ||
198 | int irq_num = 0; | ||
199 | |||
200 | while (dint) { | ||
201 | i = __ffs(dint); | ||
202 | dint &= (dint - 1); | ||
203 | phy = &pdev->phy[i]; | ||
204 | ret = mmp_pdma_chan_handler(irq, phy); | ||
205 | if (ret == IRQ_HANDLED) | ||
206 | irq_num++; | ||
207 | } | ||
208 | |||
209 | if (irq_num) | ||
210 | return IRQ_HANDLED; | ||
211 | else | ||
212 | return IRQ_NONE; | ||
213 | } | ||
214 | |||
215 | /* lookup free phy channel as descending priority */ | ||
216 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | ||
217 | { | ||
218 | int prio, i; | ||
219 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | ||
220 | struct mmp_pdma_phy *phy; | ||
221 | |||
222 | /* | ||
223 | * dma channel priorities | ||
224 | * ch 0 - 3, 16 - 19 <--> (0) | ||
225 | * ch 4 - 7, 20 - 23 <--> (1) | ||
226 | * ch 8 - 11, 24 - 27 <--> (2) | ||
227 | * ch 12 - 15, 28 - 31 <--> (3) | ||
228 | */ | ||
229 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { | ||
230 | for (i = 0; i < pdev->dma_channels; i++) { | ||
231 | if (prio != ((i & 0xf) >> 2)) | ||
232 | continue; | ||
233 | phy = &pdev->phy[i]; | ||
234 | if (!phy->vchan) { | ||
235 | phy->vchan = pchan; | ||
236 | return phy; | ||
237 | } | ||
238 | } | ||
239 | } | ||
240 | |||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | /* desc->tx_list ==> pending list */ | ||
245 | static void append_pending_queue(struct mmp_pdma_chan *chan, | ||
246 | struct mmp_pdma_desc_sw *desc) | ||
247 | { | ||
248 | struct mmp_pdma_desc_sw *tail = | ||
249 | to_mmp_pdma_desc(chan->chain_pending.prev); | ||
250 | |||
251 | if (list_empty(&chan->chain_pending)) | ||
252 | goto out_splice; | ||
253 | |||
254 | /* one irq per queue, even appended */ | ||
255 | tail->desc.ddadr = desc->async_tx.phys; | ||
256 | tail->desc.dcmd &= ~DCMD_ENDIRQEN; | ||
257 | |||
258 | /* softly link to pending list */ | ||
259 | out_splice: | ||
260 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * start_pending_queue - transfer any pending transactions | ||
265 | * pending list ==> running list | ||
266 | */ | ||
267 | static void start_pending_queue(struct mmp_pdma_chan *chan) | ||
268 | { | ||
269 | struct mmp_pdma_desc_sw *desc; | ||
270 | |||
271 | /* still in running, irq will start the pending list */ | ||
272 | if (!chan->idle) { | ||
273 | dev_dbg(chan->dev, "DMA controller still busy\n"); | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | if (list_empty(&chan->chain_pending)) { | ||
278 | /* chance to re-fetch phy channel with higher prio */ | ||
279 | if (chan->phy) { | ||
280 | chan->phy->vchan = NULL; | ||
281 | chan->phy = NULL; | ||
282 | } | ||
283 | dev_dbg(chan->dev, "no pending list\n"); | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | if (!chan->phy) { | ||
288 | chan->phy = lookup_phy(chan); | ||
289 | if (!chan->phy) { | ||
290 | dev_dbg(chan->dev, "no free dma channel\n"); | ||
291 | return; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * pending -> running | ||
297 | * reintilize pending list | ||
298 | */ | ||
299 | desc = list_first_entry(&chan->chain_pending, | ||
300 | struct mmp_pdma_desc_sw, node); | ||
301 | list_splice_tail_init(&chan->chain_pending, &chan->chain_running); | ||
302 | |||
303 | /* | ||
304 | * Program the descriptor's address into the DMA controller, | ||
305 | * then start the DMA transaction | ||
306 | */ | ||
307 | set_desc(chan->phy, desc->async_tx.phys); | ||
308 | enable_chan(chan->phy); | ||
309 | chan->idle = false; | ||
310 | } | ||
311 | |||
312 | |||
313 | /* desc->tx_list ==> pending list */ | ||
314 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
315 | { | ||
316 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); | ||
317 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); | ||
318 | struct mmp_pdma_desc_sw *child; | ||
319 | unsigned long flags; | ||
320 | dma_cookie_t cookie = -EBUSY; | ||
321 | |||
322 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
323 | |||
324 | list_for_each_entry(child, &desc->tx_list, node) { | ||
325 | cookie = dma_cookie_assign(&child->async_tx); | ||
326 | } | ||
327 | |||
328 | append_pending_queue(chan, desc); | ||
329 | |||
330 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
331 | |||
332 | return cookie; | ||
333 | } | ||
334 | |||
335 | struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | ||
336 | { | ||
337 | struct mmp_pdma_desc_sw *desc; | ||
338 | dma_addr_t pdesc; | ||
339 | |||
340 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
341 | if (!desc) { | ||
342 | dev_err(chan->dev, "out of memory for link descriptor\n"); | ||
343 | return NULL; | ||
344 | } | ||
345 | |||
346 | memset(desc, 0, sizeof(*desc)); | ||
347 | INIT_LIST_HEAD(&desc->tx_list); | ||
348 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | ||
349 | /* each desc has submit */ | ||
350 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; | ||
351 | desc->async_tx.phys = pdesc; | ||
352 | |||
353 | return desc; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. | ||
358 | * | ||
359 | * This function will create a dma pool for descriptor allocation. | ||
360 | * Request irq only when channel is requested | ||
361 | * Return - The number of allocated descriptors. | ||
362 | */ | ||
363 | |||
364 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | ||
365 | { | ||
366 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
367 | |||
368 | if (chan->desc_pool) | ||
369 | return 1; | ||
370 | |||
371 | chan->desc_pool = | ||
372 | dma_pool_create(dev_name(&dchan->dev->device), chan->dev, | ||
373 | sizeof(struct mmp_pdma_desc_sw), | ||
374 | __alignof__(struct mmp_pdma_desc_sw), 0); | ||
375 | if (!chan->desc_pool) { | ||
376 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | if (chan->phy) { | ||
380 | chan->phy->vchan = NULL; | ||
381 | chan->phy = NULL; | ||
382 | } | ||
383 | chan->idle = true; | ||
384 | chan->dev_addr = 0; | ||
385 | return 1; | ||
386 | } | ||
387 | |||
388 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, | ||
389 | struct list_head *list) | ||
390 | { | ||
391 | struct mmp_pdma_desc_sw *desc, *_desc; | ||
392 | |||
393 | list_for_each_entry_safe(desc, _desc, list, node) { | ||
394 | list_del(&desc->node); | ||
395 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
396 | } | ||
397 | } | ||
398 | |||
399 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | ||
400 | { | ||
401 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
402 | unsigned long flags; | ||
403 | |||
404 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
405 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | ||
406 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | ||
407 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
408 | |||
409 | dma_pool_destroy(chan->desc_pool); | ||
410 | chan->desc_pool = NULL; | ||
411 | chan->idle = true; | ||
412 | chan->dev_addr = 0; | ||
413 | if (chan->phy) { | ||
414 | chan->phy->vchan = NULL; | ||
415 | chan->phy = NULL; | ||
416 | } | ||
417 | return; | ||
418 | } | ||
419 | |||
420 | static struct dma_async_tx_descriptor * | ||
421 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, | ||
422 | dma_addr_t dma_dst, dma_addr_t dma_src, | ||
423 | size_t len, unsigned long flags) | ||
424 | { | ||
425 | struct mmp_pdma_chan *chan; | ||
426 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | ||
427 | size_t copy = 0; | ||
428 | |||
429 | if (!dchan) | ||
430 | return NULL; | ||
431 | |||
432 | if (!len) | ||
433 | return NULL; | ||
434 | |||
435 | chan = to_mmp_pdma_chan(dchan); | ||
436 | |||
437 | if (!chan->dir) { | ||
438 | chan->dir = DMA_MEM_TO_MEM; | ||
439 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; | ||
440 | chan->dcmd |= DCMD_BURST32; | ||
441 | } | ||
442 | |||
443 | do { | ||
444 | /* Allocate the link descriptor from DMA pool */ | ||
445 | new = mmp_pdma_alloc_descriptor(chan); | ||
446 | if (!new) { | ||
447 | dev_err(chan->dev, "no memory for desc\n"); | ||
448 | goto fail; | ||
449 | } | ||
450 | |||
451 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | ||
452 | |||
453 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | ||
454 | new->desc.dsadr = dma_src; | ||
455 | new->desc.dtadr = dma_dst; | ||
456 | |||
457 | if (!first) | ||
458 | first = new; | ||
459 | else | ||
460 | prev->desc.ddadr = new->async_tx.phys; | ||
461 | |||
462 | new->async_tx.cookie = 0; | ||
463 | async_tx_ack(&new->async_tx); | ||
464 | |||
465 | prev = new; | ||
466 | len -= copy; | ||
467 | |||
468 | if (chan->dir == DMA_MEM_TO_DEV) { | ||
469 | dma_src += copy; | ||
470 | } else if (chan->dir == DMA_DEV_TO_MEM) { | ||
471 | dma_dst += copy; | ||
472 | } else if (chan->dir == DMA_MEM_TO_MEM) { | ||
473 | dma_src += copy; | ||
474 | dma_dst += copy; | ||
475 | } | ||
476 | |||
477 | /* Insert the link descriptor to the LD ring */ | ||
478 | list_add_tail(&new->node, &first->tx_list); | ||
479 | } while (len); | ||
480 | |||
481 | first->async_tx.flags = flags; /* client is in control of this ack */ | ||
482 | first->async_tx.cookie = -EBUSY; | ||
483 | |||
484 | /* last desc and fire IRQ */ | ||
485 | new->desc.ddadr = DDADR_STOP; | ||
486 | new->desc.dcmd |= DCMD_ENDIRQEN; | ||
487 | |||
488 | return &first->async_tx; | ||
489 | |||
490 | fail: | ||
491 | if (first) | ||
492 | mmp_pdma_free_desc_list(chan, &first->tx_list); | ||
493 | return NULL; | ||
494 | } | ||
495 | |||
496 | static struct dma_async_tx_descriptor * | ||
497 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | ||
498 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
499 | unsigned long flags, void *context) | ||
500 | { | ||
501 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
502 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; | ||
503 | size_t len, avail; | ||
504 | struct scatterlist *sg; | ||
505 | dma_addr_t addr; | ||
506 | int i; | ||
507 | |||
508 | if ((sgl == NULL) || (sg_len == 0)) | ||
509 | return NULL; | ||
510 | |||
511 | for_each_sg(sgl, sg, sg_len, i) { | ||
512 | addr = sg_dma_address(sg); | ||
513 | avail = sg_dma_len(sgl); | ||
514 | |||
515 | do { | ||
516 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | ||
517 | |||
518 | /* allocate and populate the descriptor */ | ||
519 | new = mmp_pdma_alloc_descriptor(chan); | ||
520 | if (!new) { | ||
521 | dev_err(chan->dev, "no memory for desc\n"); | ||
522 | goto fail; | ||
523 | } | ||
524 | |||
525 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); | ||
526 | if (dir == DMA_MEM_TO_DEV) { | ||
527 | new->desc.dsadr = addr; | ||
528 | new->desc.dtadr = chan->dev_addr; | ||
529 | } else { | ||
530 | new->desc.dsadr = chan->dev_addr; | ||
531 | new->desc.dtadr = addr; | ||
532 | } | ||
533 | |||
534 | if (!first) | ||
535 | first = new; | ||
536 | else | ||
537 | prev->desc.ddadr = new->async_tx.phys; | ||
538 | |||
539 | new->async_tx.cookie = 0; | ||
540 | async_tx_ack(&new->async_tx); | ||
541 | prev = new; | ||
542 | |||
543 | /* Insert the link descriptor to the LD ring */ | ||
544 | list_add_tail(&new->node, &first->tx_list); | ||
545 | |||
546 | /* update metadata */ | ||
547 | addr += len; | ||
548 | avail -= len; | ||
549 | } while (avail); | ||
550 | } | ||
551 | |||
552 | first->async_tx.cookie = -EBUSY; | ||
553 | first->async_tx.flags = flags; | ||
554 | |||
555 | /* last desc and fire IRQ */ | ||
556 | new->desc.ddadr = DDADR_STOP; | ||
557 | new->desc.dcmd |= DCMD_ENDIRQEN; | ||
558 | |||
559 | return &first->async_tx; | ||
560 | |||
561 | fail: | ||
562 | if (first) | ||
563 | mmp_pdma_free_desc_list(chan, &first->tx_list); | ||
564 | return NULL; | ||
565 | } | ||
566 | |||
567 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | ||
568 | unsigned long arg) | ||
569 | { | ||
570 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
571 | struct dma_slave_config *cfg = (void *)arg; | ||
572 | unsigned long flags; | ||
573 | int ret = 0; | ||
574 | u32 maxburst = 0, addr = 0; | ||
575 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
576 | |||
577 | if (!dchan) | ||
578 | return -EINVAL; | ||
579 | |||
580 | switch (cmd) { | ||
581 | case DMA_TERMINATE_ALL: | ||
582 | disable_chan(chan->phy); | ||
583 | if (chan->phy) { | ||
584 | chan->phy->vchan = NULL; | ||
585 | chan->phy = NULL; | ||
586 | } | ||
587 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
588 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | ||
589 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | ||
590 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
591 | chan->idle = true; | ||
592 | break; | ||
593 | case DMA_SLAVE_CONFIG: | ||
594 | if (cfg->direction == DMA_DEV_TO_MEM) { | ||
595 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; | ||
596 | maxburst = cfg->src_maxburst; | ||
597 | width = cfg->src_addr_width; | ||
598 | addr = cfg->src_addr; | ||
599 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | ||
600 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; | ||
601 | maxburst = cfg->dst_maxburst; | ||
602 | width = cfg->dst_addr_width; | ||
603 | addr = cfg->dst_addr; | ||
604 | } | ||
605 | |||
606 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | ||
607 | chan->dcmd |= DCMD_WIDTH1; | ||
608 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
609 | chan->dcmd |= DCMD_WIDTH2; | ||
610 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
611 | chan->dcmd |= DCMD_WIDTH4; | ||
612 | |||
613 | if (maxburst == 8) | ||
614 | chan->dcmd |= DCMD_BURST8; | ||
615 | else if (maxburst == 16) | ||
616 | chan->dcmd |= DCMD_BURST16; | ||
617 | else if (maxburst == 32) | ||
618 | chan->dcmd |= DCMD_BURST32; | ||
619 | |||
620 | if (cfg) { | ||
621 | chan->dir = cfg->direction; | ||
622 | chan->drcmr = cfg->slave_id; | ||
623 | } | ||
624 | chan->dev_addr = addr; | ||
625 | break; | ||
626 | default: | ||
627 | return -ENOSYS; | ||
628 | } | ||
629 | |||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | ||
634 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
635 | { | ||
636 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
637 | enum dma_status ret; | ||
638 | unsigned long flags; | ||
639 | |||
640 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
641 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
642 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
643 | |||
644 | return ret; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * mmp_pdma_issue_pending - Issue the DMA start command | ||
649 | * pending list ==> running list | ||
650 | */ | ||
651 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) | ||
652 | { | ||
653 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
654 | unsigned long flags; | ||
655 | |||
656 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
657 | start_pending_queue(chan); | ||
658 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * dma_do_tasklet | ||
663 | * Do call back | ||
664 | * Start pending list | ||
665 | */ | ||
666 | static void dma_do_tasklet(unsigned long data) | ||
667 | { | ||
668 | struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; | ||
669 | struct mmp_pdma_desc_sw *desc, *_desc; | ||
670 | LIST_HEAD(chain_cleanup); | ||
671 | unsigned long flags; | ||
672 | |||
673 | /* submit pending list; callback for each desc; free desc */ | ||
674 | |||
675 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
676 | |||
677 | /* update the cookie if we have some descriptors to cleanup */ | ||
678 | if (!list_empty(&chan->chain_running)) { | ||
679 | dma_cookie_t cookie; | ||
680 | |||
681 | desc = to_mmp_pdma_desc(chan->chain_running.prev); | ||
682 | cookie = desc->async_tx.cookie; | ||
683 | dma_cookie_complete(&desc->async_tx); | ||
684 | |||
685 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * move the descriptors to a temporary list so we can drop the lock | ||
690 | * during the entire cleanup operation | ||
691 | */ | ||
692 | list_splice_tail_init(&chan->chain_running, &chain_cleanup); | ||
693 | |||
694 | /* the hardware is now idle and ready for more */ | ||
695 | chan->idle = true; | ||
696 | |||
697 | /* Start any pending transactions automatically */ | ||
698 | start_pending_queue(chan); | ||
699 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
700 | |||
701 | /* Run the callback for each descriptor, in order */ | ||
702 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { | ||
703 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | ||
704 | |||
705 | /* Remove from the list of transactions */ | ||
706 | list_del(&desc->node); | ||
707 | /* Run the link descriptor callback function */ | ||
708 | if (txd->callback) | ||
709 | txd->callback(txd->callback_param); | ||
710 | |||
711 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
712 | } | ||
713 | } | ||
714 | |||
715 | static int __devexit mmp_pdma_remove(struct platform_device *op) | ||
716 | { | ||
717 | struct mmp_pdma_device *pdev = platform_get_drvdata(op); | ||
718 | |||
719 | dma_async_device_unregister(&pdev->device); | ||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | ||
724 | int idx, int irq) | ||
725 | { | ||
726 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; | ||
727 | struct mmp_pdma_chan *chan; | ||
728 | int ret; | ||
729 | |||
730 | chan = devm_kzalloc(pdev->dev, | ||
731 | sizeof(struct mmp_pdma_chan), GFP_KERNEL); | ||
732 | if (chan == NULL) | ||
733 | return -ENOMEM; | ||
734 | |||
735 | phy->idx = idx; | ||
736 | phy->base = pdev->base; | ||
737 | |||
738 | if (irq) { | ||
739 | ret = devm_request_irq(pdev->dev, irq, | ||
740 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); | ||
741 | if (ret) { | ||
742 | dev_err(pdev->dev, "channel request irq fail!\n"); | ||
743 | return ret; | ||
744 | } | ||
745 | } | ||
746 | |||
747 | spin_lock_init(&chan->desc_lock); | ||
748 | chan->dev = pdev->dev; | ||
749 | chan->chan.device = &pdev->device; | ||
750 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | ||
751 | INIT_LIST_HEAD(&chan->chain_pending); | ||
752 | INIT_LIST_HEAD(&chan->chain_running); | ||
753 | |||
754 | /* register virt channel to dma engine */ | ||
755 | list_add_tail(&chan->chan.device_node, | ||
756 | &pdev->device.channels); | ||
757 | |||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | static struct of_device_id mmp_pdma_dt_ids[] = { | ||
762 | { .compatible = "marvell,pdma-1.0", }, | ||
763 | {} | ||
764 | }; | ||
765 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); | ||
766 | |||
767 | static int __devinit mmp_pdma_probe(struct platform_device *op) | ||
768 | { | ||
769 | struct mmp_pdma_device *pdev; | ||
770 | const struct of_device_id *of_id; | ||
771 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); | ||
772 | struct resource *iores; | ||
773 | int i, ret, irq = 0; | ||
774 | int dma_channels = 0, irq_num = 0; | ||
775 | |||
776 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | ||
777 | if (!pdev) | ||
778 | return -ENOMEM; | ||
779 | pdev->dev = &op->dev; | ||
780 | |||
781 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | ||
782 | if (!iores) | ||
783 | return -EINVAL; | ||
784 | |||
785 | pdev->base = devm_request_and_ioremap(pdev->dev, iores); | ||
786 | if (!pdev->base) | ||
787 | return -EADDRNOTAVAIL; | ||
788 | |||
789 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); | ||
790 | if (of_id) | ||
791 | of_property_read_u32(pdev->dev->of_node, | ||
792 | "#dma-channels", &dma_channels); | ||
793 | else if (pdata && pdata->dma_channels) | ||
794 | dma_channels = pdata->dma_channels; | ||
795 | else | ||
796 | dma_channels = 32; /* default 32 channel */ | ||
797 | pdev->dma_channels = dma_channels; | ||
798 | |||
799 | for (i = 0; i < dma_channels; i++) { | ||
800 | if (platform_get_irq(op, i) > 0) | ||
801 | irq_num++; | ||
802 | } | ||
803 | |||
804 | pdev->phy = devm_kzalloc(pdev->dev, | ||
805 | dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); | ||
806 | if (pdev->phy == NULL) | ||
807 | return -ENOMEM; | ||
808 | |||
809 | INIT_LIST_HEAD(&pdev->device.channels); | ||
810 | |||
811 | if (irq_num != dma_channels) { | ||
812 | /* all chan share one irq, demux inside */ | ||
813 | irq = platform_get_irq(op, 0); | ||
814 | ret = devm_request_irq(pdev->dev, irq, | ||
815 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); | ||
816 | if (ret) | ||
817 | return ret; | ||
818 | } | ||
819 | |||
820 | for (i = 0; i < dma_channels; i++) { | ||
821 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); | ||
822 | ret = mmp_pdma_chan_init(pdev, i, irq); | ||
823 | if (ret) | ||
824 | return ret; | ||
825 | } | ||
826 | |||
827 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | ||
828 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); | ||
829 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | ||
830 | pdev->device.dev = &op->dev; | ||
831 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; | ||
832 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; | ||
833 | pdev->device.device_tx_status = mmp_pdma_tx_status; | ||
834 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; | ||
835 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | ||
836 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; | ||
837 | pdev->device.device_control = mmp_pdma_control; | ||
838 | pdev->device.copy_align = PDMA_ALIGNMENT; | ||
839 | |||
840 | if (pdev->dev->coherent_dma_mask) | ||
841 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | ||
842 | else | ||
843 | dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); | ||
844 | |||
845 | ret = dma_async_device_register(&pdev->device); | ||
846 | if (ret) { | ||
847 | dev_err(pdev->device.dev, "unable to register\n"); | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | dev_info(pdev->device.dev, "initialized\n"); | ||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | static const struct platform_device_id mmp_pdma_id_table[] = { | ||
856 | { "mmp-pdma", }, | ||
857 | { }, | ||
858 | }; | ||
859 | |||
860 | static struct platform_driver mmp_pdma_driver = { | ||
861 | .driver = { | ||
862 | .name = "mmp-pdma", | ||
863 | .owner = THIS_MODULE, | ||
864 | .of_match_table = mmp_pdma_dt_ids, | ||
865 | }, | ||
866 | .id_table = mmp_pdma_id_table, | ||
867 | .probe = mmp_pdma_probe, | ||
868 | .remove = __devexit_p(mmp_pdma_remove), | ||
869 | }; | ||
870 | |||
871 | module_platform_driver(mmp_pdma_driver); | ||
872 | |||
873 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); | ||
874 | MODULE_AUTHOR("Marvell International Ltd."); | ||
875 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 6d9c82e891d7..f3e8d71bcbc7 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <mach/regs-icu.h> | 21 | #include <mach/regs-icu.h> |
22 | #include <linux/platform_data/dma-mmp_tdma.h> | 22 | #include <linux/platform_data/dma-mmp_tdma.h> |
23 | #include <linux/of_device.h> | ||
23 | 24 | ||
24 | #include "dmaengine.h" | 25 | #include "dmaengine.h" |
25 | 26 | ||
@@ -127,7 +128,6 @@ struct mmp_tdma_device { | |||
127 | void __iomem *base; | 128 | void __iomem *base; |
128 | struct dma_device device; | 129 | struct dma_device device; |
129 | struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; | 130 | struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; |
130 | int irq; | ||
131 | }; | 131 | }; |
132 | 132 | ||
133 | #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) | 133 | #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) |
@@ -492,7 +492,7 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | |||
492 | return -ENOMEM; | 492 | return -ENOMEM; |
493 | } | 493 | } |
494 | if (irq) | 494 | if (irq) |
495 | tdmac->irq = irq + idx; | 495 | tdmac->irq = irq; |
496 | tdmac->dev = tdev->dev; | 496 | tdmac->dev = tdev->dev; |
497 | tdmac->chan.device = &tdev->device; | 497 | tdmac->chan.device = &tdev->device; |
498 | tdmac->idx = idx; | 498 | tdmac->idx = idx; |
@@ -505,34 +505,43 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | |||
505 | /* add the channel to tdma_chan list */ | 505 | /* add the channel to tdma_chan list */ |
506 | list_add_tail(&tdmac->chan.device_node, | 506 | list_add_tail(&tdmac->chan.device_node, |
507 | &tdev->device.channels); | 507 | &tdev->device.channels); |
508 | |||
509 | return 0; | 508 | return 0; |
510 | } | 509 | } |
511 | 510 | ||
511 | static struct of_device_id mmp_tdma_dt_ids[] = { | ||
512 | { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, | ||
513 | { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, | ||
514 | {} | ||
515 | }; | ||
516 | MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids); | ||
517 | |||
512 | static int __devinit mmp_tdma_probe(struct platform_device *pdev) | 518 | static int __devinit mmp_tdma_probe(struct platform_device *pdev) |
513 | { | 519 | { |
514 | const struct platform_device_id *id = platform_get_device_id(pdev); | 520 | enum mmp_tdma_type type; |
515 | enum mmp_tdma_type type = id->driver_data; | 521 | const struct of_device_id *of_id; |
516 | struct mmp_tdma_device *tdev; | 522 | struct mmp_tdma_device *tdev; |
517 | struct resource *iores; | 523 | struct resource *iores; |
518 | int i, ret; | 524 | int i, ret; |
519 | int irq = 0; | 525 | int irq = 0, irq_num = 0; |
520 | int chan_num = TDMA_CHANNEL_NUM; | 526 | int chan_num = TDMA_CHANNEL_NUM; |
521 | 527 | ||
528 | of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); | ||
529 | if (of_id) | ||
530 | type = (enum mmp_tdma_type) of_id->data; | ||
531 | else | ||
532 | type = platform_get_device_id(pdev)->driver_data; | ||
533 | |||
522 | /* always have couple channels */ | 534 | /* always have couple channels */ |
523 | tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); | 535 | tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); |
524 | if (!tdev) | 536 | if (!tdev) |
525 | return -ENOMEM; | 537 | return -ENOMEM; |
526 | 538 | ||
527 | tdev->dev = &pdev->dev; | 539 | tdev->dev = &pdev->dev; |
528 | iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
529 | if (!iores) | ||
530 | return -EINVAL; | ||
531 | 540 | ||
532 | if (resource_size(iores) != chan_num) | 541 | for (i = 0; i < chan_num; i++) { |
533 | tdev->irq = iores->start; | 542 | if (platform_get_irq(pdev, i) > 0) |
534 | else | 543 | irq_num++; |
535 | irq = iores->start; | 544 | } |
536 | 545 | ||
537 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 546 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
538 | if (!iores) | 547 | if (!iores) |
@@ -542,25 +551,26 @@ static int __devinit mmp_tdma_probe(struct platform_device *pdev) | |||
542 | if (!tdev->base) | 551 | if (!tdev->base) |
543 | return -EADDRNOTAVAIL; | 552 | return -EADDRNOTAVAIL; |
544 | 553 | ||
545 | if (tdev->irq) { | 554 | INIT_LIST_HEAD(&tdev->device.channels); |
546 | ret = devm_request_irq(&pdev->dev, tdev->irq, | 555 | |
556 | if (irq_num != chan_num) { | ||
557 | irq = platform_get_irq(pdev, 0); | ||
558 | ret = devm_request_irq(&pdev->dev, irq, | ||
547 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); | 559 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); |
548 | if (ret) | 560 | if (ret) |
549 | return ret; | 561 | return ret; |
550 | } | 562 | } |
551 | 563 | ||
552 | dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); | ||
553 | dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); | ||
554 | |||
555 | INIT_LIST_HEAD(&tdev->device.channels); | ||
556 | |||
557 | /* initialize channel parameters */ | 564 | /* initialize channel parameters */ |
558 | for (i = 0; i < chan_num; i++) { | 565 | for (i = 0; i < chan_num; i++) { |
566 | irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); | ||
559 | ret = mmp_tdma_chan_init(tdev, i, irq, type); | 567 | ret = mmp_tdma_chan_init(tdev, i, irq, type); |
560 | if (ret) | 568 | if (ret) |
561 | return ret; | 569 | return ret; |
562 | } | 570 | } |
563 | 571 | ||
572 | dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); | ||
573 | dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); | ||
564 | tdev->device.dev = &pdev->dev; | 574 | tdev->device.dev = &pdev->dev; |
565 | tdev->device.device_alloc_chan_resources = | 575 | tdev->device.device_alloc_chan_resources = |
566 | mmp_tdma_alloc_chan_resources; | 576 | mmp_tdma_alloc_chan_resources; |
@@ -595,6 +605,7 @@ static struct platform_driver mmp_tdma_driver = { | |||
595 | .driver = { | 605 | .driver = { |
596 | .name = "mmp-tdma", | 606 | .name = "mmp-tdma", |
597 | .owner = THIS_MODULE, | 607 | .owner = THIS_MODULE, |
608 | .of_match_table = mmp_tdma_dt_ids, | ||
598 | }, | 609 | }, |
599 | .id_table = mmp_tdma_id_table, | 610 | .id_table = mmp_tdma_id_table, |
600 | .probe = mmp_tdma_probe, | 611 | .probe = mmp_tdma_probe, |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 734a4eb84d65..9f02e794b12b 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -101,7 +101,8 @@ struct mxs_dma_ccw { | |||
101 | u32 pio_words[MXS_PIO_WORDS]; | 101 | u32 pio_words[MXS_PIO_WORDS]; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) | 104 | #define CCW_BLOCK_SIZE (4 * PAGE_SIZE) |
105 | #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) | ||
105 | 106 | ||
106 | struct mxs_dma_chan { | 107 | struct mxs_dma_chan { |
107 | struct mxs_dma_engine *mxs_dma; | 108 | struct mxs_dma_engine *mxs_dma; |
@@ -354,14 +355,15 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
354 | 355 | ||
355 | mxs_chan->chan_irq = data->chan_irq; | 356 | mxs_chan->chan_irq = data->chan_irq; |
356 | 357 | ||
357 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 358 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, |
358 | &mxs_chan->ccw_phys, GFP_KERNEL); | 359 | CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, |
360 | GFP_KERNEL); | ||
359 | if (!mxs_chan->ccw) { | 361 | if (!mxs_chan->ccw) { |
360 | ret = -ENOMEM; | 362 | ret = -ENOMEM; |
361 | goto err_alloc; | 363 | goto err_alloc; |
362 | } | 364 | } |
363 | 365 | ||
364 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | 366 | memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); |
365 | 367 | ||
366 | if (mxs_chan->chan_irq != NO_IRQ) { | 368 | if (mxs_chan->chan_irq != NO_IRQ) { |
367 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 369 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
@@ -387,7 +389,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
387 | err_clk: | 389 | err_clk: |
388 | free_irq(mxs_chan->chan_irq, mxs_dma); | 390 | free_irq(mxs_chan->chan_irq, mxs_dma); |
389 | err_irq: | 391 | err_irq: |
390 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 392 | dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, |
391 | mxs_chan->ccw, mxs_chan->ccw_phys); | 393 | mxs_chan->ccw, mxs_chan->ccw_phys); |
392 | err_alloc: | 394 | err_alloc: |
393 | return ret; | 395 | return ret; |
@@ -402,7 +404,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
402 | 404 | ||
403 | free_irq(mxs_chan->chan_irq, mxs_dma); | 405 | free_irq(mxs_chan->chan_irq, mxs_dma); |
404 | 406 | ||
405 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 407 | dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, |
406 | mxs_chan->ccw, mxs_chan->ccw_phys); | 408 | mxs_chan->ccw, mxs_chan->ccw_phys); |
407 | 409 | ||
408 | clk_disable_unprepare(mxs_dma->clk); | 410 | clk_disable_unprepare(mxs_dma->clk); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 169c0dbd71ae..665668b6f2b1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/amba/bus.h> | 24 | #include <linux/amba/bus.h> |
25 | #include <linux/amba/pl330.h> | 25 | #include <linux/amba/pl330.h> |
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
28 | #include <linux/of.h> | 27 | #include <linux/of.h> |
29 | 28 | ||
@@ -586,8 +585,6 @@ struct dma_pl330_dmac { | |||
586 | 585 | ||
587 | /* Peripheral channels connected to this DMAC */ | 586 | /* Peripheral channels connected to this DMAC */ |
588 | struct dma_pl330_chan *peripherals; /* keep at end */ | 587 | struct dma_pl330_chan *peripherals; /* keep at end */ |
589 | |||
590 | struct clk *clk; | ||
591 | }; | 588 | }; |
592 | 589 | ||
593 | struct dma_pl330_desc { | 590 | struct dma_pl330_desc { |
@@ -2395,7 +2392,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
2395 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | 2392 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); |
2396 | if (!pch->pl330_chid) { | 2393 | if (!pch->pl330_chid) { |
2397 | spin_unlock_irqrestore(&pch->lock, flags); | 2394 | spin_unlock_irqrestore(&pch->lock, flags); |
2398 | return 0; | 2395 | return -ENOMEM; |
2399 | } | 2396 | } |
2400 | 2397 | ||
2401 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); | 2398 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); |
@@ -2889,29 +2886,17 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2889 | goto probe_err1; | 2886 | goto probe_err1; |
2890 | } | 2887 | } |
2891 | 2888 | ||
2892 | pdmac->clk = clk_get(&adev->dev, "dma"); | ||
2893 | if (IS_ERR(pdmac->clk)) { | ||
2894 | dev_err(&adev->dev, "Cannot get operation clock.\n"); | ||
2895 | ret = -EINVAL; | ||
2896 | goto probe_err2; | ||
2897 | } | ||
2898 | |||
2899 | amba_set_drvdata(adev, pdmac); | 2889 | amba_set_drvdata(adev, pdmac); |
2900 | 2890 | ||
2901 | #ifndef CONFIG_PM_RUNTIME | ||
2902 | /* enable dma clk */ | ||
2903 | clk_enable(pdmac->clk); | ||
2904 | #endif | ||
2905 | |||
2906 | irq = adev->irq[0]; | 2891 | irq = adev->irq[0]; |
2907 | ret = request_irq(irq, pl330_irq_handler, 0, | 2892 | ret = request_irq(irq, pl330_irq_handler, 0, |
2908 | dev_name(&adev->dev), pi); | 2893 | dev_name(&adev->dev), pi); |
2909 | if (ret) | 2894 | if (ret) |
2910 | goto probe_err3; | 2895 | goto probe_err2; |
2911 | 2896 | ||
2912 | ret = pl330_add(pi); | 2897 | ret = pl330_add(pi); |
2913 | if (ret) | 2898 | if (ret) |
2914 | goto probe_err4; | 2899 | goto probe_err3; |
2915 | 2900 | ||
2916 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2901 | INIT_LIST_HEAD(&pdmac->desc_pool); |
2917 | spin_lock_init(&pdmac->pool_lock); | 2902 | spin_lock_init(&pdmac->pool_lock); |
@@ -2933,7 +2918,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2933 | if (!pdmac->peripherals) { | 2918 | if (!pdmac->peripherals) { |
2934 | ret = -ENOMEM; | 2919 | ret = -ENOMEM; |
2935 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); | 2920 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); |
2936 | goto probe_err5; | 2921 | goto probe_err4; |
2937 | } | 2922 | } |
2938 | 2923 | ||
2939 | for (i = 0; i < num_chan; i++) { | 2924 | for (i = 0; i < num_chan; i++) { |
@@ -2961,6 +2946,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2961 | if (pi->pcfg.num_peri) { | 2946 | if (pi->pcfg.num_peri) { |
2962 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | 2947 | dma_cap_set(DMA_SLAVE, pd->cap_mask); |
2963 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); | 2948 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); |
2949 | dma_cap_set(DMA_PRIVATE, pd->cap_mask); | ||
2964 | } | 2950 | } |
2965 | } | 2951 | } |
2966 | 2952 | ||
@@ -2976,7 +2962,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2976 | ret = dma_async_device_register(pd); | 2962 | ret = dma_async_device_register(pd); |
2977 | if (ret) { | 2963 | if (ret) { |
2978 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2964 | dev_err(&adev->dev, "unable to register DMAC\n"); |
2979 | goto probe_err5; | 2965 | goto probe_err4; |
2980 | } | 2966 | } |
2981 | 2967 | ||
2982 | dev_info(&adev->dev, | 2968 | dev_info(&adev->dev, |
@@ -2989,15 +2975,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2989 | 2975 | ||
2990 | return 0; | 2976 | return 0; |
2991 | 2977 | ||
2992 | probe_err5: | ||
2993 | pl330_del(pi); | ||
2994 | probe_err4: | 2978 | probe_err4: |
2995 | free_irq(irq, pi); | 2979 | pl330_del(pi); |
2996 | probe_err3: | 2980 | probe_err3: |
2997 | #ifndef CONFIG_PM_RUNTIME | 2981 | free_irq(irq, pi); |
2998 | clk_disable(pdmac->clk); | ||
2999 | #endif | ||
3000 | clk_put(pdmac->clk); | ||
3001 | probe_err2: | 2982 | probe_err2: |
3002 | iounmap(pi->base); | 2983 | iounmap(pi->base); |
3003 | probe_err1: | 2984 | probe_err1: |
@@ -3044,10 +3025,6 @@ static int __devexit pl330_remove(struct amba_device *adev) | |||
3044 | res = &adev->res; | 3025 | res = &adev->res; |
3045 | release_mem_region(res->start, resource_size(res)); | 3026 | release_mem_region(res->start, resource_size(res)); |
3046 | 3027 | ||
3047 | #ifndef CONFIG_PM_RUNTIME | ||
3048 | clk_disable(pdmac->clk); | ||
3049 | #endif | ||
3050 | |||
3051 | kfree(pdmac); | 3028 | kfree(pdmac); |
3052 | 3029 | ||
3053 | return 0; | 3030 | return 0; |
@@ -3063,49 +3040,10 @@ static struct amba_id pl330_ids[] = { | |||
3063 | 3040 | ||
3064 | MODULE_DEVICE_TABLE(amba, pl330_ids); | 3041 | MODULE_DEVICE_TABLE(amba, pl330_ids); |
3065 | 3042 | ||
3066 | #ifdef CONFIG_PM_RUNTIME | ||
3067 | static int pl330_runtime_suspend(struct device *dev) | ||
3068 | { | ||
3069 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); | ||
3070 | |||
3071 | if (!pdmac) { | ||
3072 | dev_err(dev, "failed to get dmac\n"); | ||
3073 | return -ENODEV; | ||
3074 | } | ||
3075 | |||
3076 | clk_disable(pdmac->clk); | ||
3077 | |||
3078 | return 0; | ||
3079 | } | ||
3080 | |||
3081 | static int pl330_runtime_resume(struct device *dev) | ||
3082 | { | ||
3083 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); | ||
3084 | |||
3085 | if (!pdmac) { | ||
3086 | dev_err(dev, "failed to get dmac\n"); | ||
3087 | return -ENODEV; | ||
3088 | } | ||
3089 | |||
3090 | clk_enable(pdmac->clk); | ||
3091 | |||
3092 | return 0; | ||
3093 | } | ||
3094 | #else | ||
3095 | #define pl330_runtime_suspend NULL | ||
3096 | #define pl330_runtime_resume NULL | ||
3097 | #endif /* CONFIG_PM_RUNTIME */ | ||
3098 | |||
3099 | static const struct dev_pm_ops pl330_pm_ops = { | ||
3100 | .runtime_suspend = pl330_runtime_suspend, | ||
3101 | .runtime_resume = pl330_runtime_resume, | ||
3102 | }; | ||
3103 | |||
3104 | static struct amba_driver pl330_driver = { | 3043 | static struct amba_driver pl330_driver = { |
3105 | .drv = { | 3044 | .drv = { |
3106 | .owner = THIS_MODULE, | 3045 | .owner = THIS_MODULE, |
3107 | .name = "dma-pl330", | 3046 | .name = "dma-pl330", |
3108 | .pm = &pl330_pm_ops, | ||
3109 | }, | 3047 | }, |
3110 | .id_table = pl330_ids, | 3048 | .id_table = pl330_ids, |
3111 | .probe = pl330_probe, | 3049 | .probe = pl330_probe, |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 3eed8b35b0f1..64385cde044b 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -570,21 +570,19 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |||
570 | 570 | ||
571 | if (of_property_read_u32(dn, "cell-index", &id)) { | 571 | if (of_property_read_u32(dn, "cell-index", &id)) { |
572 | dev_err(dev, "Fail to get DMAC index\n"); | 572 | dev_err(dev, "Fail to get DMAC index\n"); |
573 | ret = -ENODEV; | 573 | return -ENODEV; |
574 | goto free_mem; | ||
575 | } | 574 | } |
576 | 575 | ||
577 | sdma->irq = irq_of_parse_and_map(dn, 0); | 576 | sdma->irq = irq_of_parse_and_map(dn, 0); |
578 | if (sdma->irq == NO_IRQ) { | 577 | if (sdma->irq == NO_IRQ) { |
579 | dev_err(dev, "Error mapping IRQ!\n"); | 578 | dev_err(dev, "Error mapping IRQ!\n"); |
580 | ret = -EINVAL; | 579 | return -EINVAL; |
581 | goto free_mem; | ||
582 | } | 580 | } |
583 | 581 | ||
584 | ret = of_address_to_resource(dn, 0, &res); | 582 | ret = of_address_to_resource(dn, 0, &res); |
585 | if (ret) { | 583 | if (ret) { |
586 | dev_err(dev, "Error parsing memory region!\n"); | 584 | dev_err(dev, "Error parsing memory region!\n"); |
587 | goto free_mem; | 585 | goto irq_dispose; |
588 | } | 586 | } |
589 | 587 | ||
590 | regs_start = res.start; | 588 | regs_start = res.start; |
@@ -597,12 +595,11 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |||
597 | goto irq_dispose; | 595 | goto irq_dispose; |
598 | } | 596 | } |
599 | 597 | ||
600 | ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, | 598 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); |
601 | sdma); | ||
602 | if (ret) { | 599 | if (ret) { |
603 | dev_err(dev, "Error requesting IRQ!\n"); | 600 | dev_err(dev, "Error requesting IRQ!\n"); |
604 | ret = -EINVAL; | 601 | ret = -EINVAL; |
605 | goto unmap_mem; | 602 | goto irq_dispose; |
606 | } | 603 | } |
607 | 604 | ||
608 | dma = &sdma->dma; | 605 | dma = &sdma->dma; |
@@ -652,13 +649,9 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |||
652 | return 0; | 649 | return 0; |
653 | 650 | ||
654 | free_irq: | 651 | free_irq: |
655 | devm_free_irq(dev, sdma->irq, sdma); | 652 | free_irq(sdma->irq, sdma); |
656 | irq_dispose: | 653 | irq_dispose: |
657 | irq_dispose_mapping(sdma->irq); | 654 | irq_dispose_mapping(sdma->irq); |
658 | unmap_mem: | ||
659 | iounmap(sdma->base); | ||
660 | free_mem: | ||
661 | devm_kfree(dev, sdma); | ||
662 | return ret; | 655 | return ret; |
663 | } | 656 | } |
664 | 657 | ||
@@ -668,10 +661,8 @@ static int __devexit sirfsoc_dma_remove(struct platform_device *op) | |||
668 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 661 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
669 | 662 | ||
670 | dma_async_device_unregister(&sdma->dma); | 663 | dma_async_device_unregister(&sdma->dma); |
671 | devm_free_irq(dev, sdma->irq, sdma); | 664 | free_irq(sdma->irq, sdma); |
672 | irq_dispose_mapping(sdma->irq); | 665 | irq_dispose_mapping(sdma->irq); |
673 | iounmap(sdma->base); | ||
674 | devm_kfree(dev, sdma); | ||
675 | return 0; | 666 | return 0; |
676 | } | 667 | } |
677 | 668 | ||
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index eee8d9b9a20b..ae55091c2272 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -2921,19 +2921,23 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2921 | struct d40_base *base = NULL; | 2921 | struct d40_base *base = NULL; |
2922 | int num_log_chans = 0; | 2922 | int num_log_chans = 0; |
2923 | int num_phy_chans; | 2923 | int num_phy_chans; |
2924 | int clk_ret = -EINVAL; | ||
2924 | int i; | 2925 | int i; |
2925 | u32 pid; | 2926 | u32 pid; |
2926 | u32 cid; | 2927 | u32 cid; |
2927 | u8 rev; | 2928 | u8 rev; |
2928 | 2929 | ||
2929 | clk = clk_get(&pdev->dev, NULL); | 2930 | clk = clk_get(&pdev->dev, NULL); |
2930 | |||
2931 | if (IS_ERR(clk)) { | 2931 | if (IS_ERR(clk)) { |
2932 | d40_err(&pdev->dev, "No matching clock found\n"); | 2932 | d40_err(&pdev->dev, "No matching clock found\n"); |
2933 | goto failure; | 2933 | goto failure; |
2934 | } | 2934 | } |
2935 | 2935 | ||
2936 | clk_enable(clk); | 2936 | clk_ret = clk_prepare_enable(clk); |
2937 | if (clk_ret) { | ||
2938 | d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); | ||
2939 | goto failure; | ||
2940 | } | ||
2937 | 2941 | ||
2938 | /* Get IO for DMAC base address */ | 2942 | /* Get IO for DMAC base address */ |
2939 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | 2943 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); |
@@ -3063,10 +3067,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3063 | return base; | 3067 | return base; |
3064 | 3068 | ||
3065 | failure: | 3069 | failure: |
3066 | if (!IS_ERR(clk)) { | 3070 | if (!clk_ret) |
3067 | clk_disable(clk); | 3071 | clk_disable_unprepare(clk); |
3072 | if (!IS_ERR(clk)) | ||
3068 | clk_put(clk); | 3073 | clk_put(clk); |
3069 | } | ||
3070 | if (virtbase) | 3074 | if (virtbase) |
3071 | iounmap(virtbase); | 3075 | iounmap(virtbase); |
3072 | if (res) | 3076 | if (res) |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 45fbeed1c1a5..528c62dd4b00 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -169,6 +169,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, | |||
169 | /* tegra_dma_channel: Channel specific information */ | 169 | /* tegra_dma_channel: Channel specific information */ |
170 | struct tegra_dma_channel { | 170 | struct tegra_dma_channel { |
171 | struct dma_chan dma_chan; | 171 | struct dma_chan dma_chan; |
172 | char name[30]; | ||
172 | bool config_init; | 173 | bool config_init; |
173 | int id; | 174 | int id; |
174 | int irq; | 175 | int irq; |
@@ -475,8 +476,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) | |||
475 | while (!list_empty(&tdc->pending_sg_req)) { | 476 | while (!list_empty(&tdc->pending_sg_req)) { |
476 | sgreq = list_first_entry(&tdc->pending_sg_req, | 477 | sgreq = list_first_entry(&tdc->pending_sg_req, |
477 | typeof(*sgreq), node); | 478 | typeof(*sgreq), node); |
478 | list_del(&sgreq->node); | 479 | list_move_tail(&sgreq->node, &tdc->free_sg_req); |
479 | list_add_tail(&sgreq->node, &tdc->free_sg_req); | ||
480 | if (sgreq->last_sg) { | 480 | if (sgreq->last_sg) { |
481 | dma_desc = sgreq->dma_desc; | 481 | dma_desc = sgreq->dma_desc; |
482 | dma_desc->dma_status = DMA_ERROR; | 482 | dma_desc->dma_status = DMA_ERROR; |
@@ -570,8 +570,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, | |||
570 | 570 | ||
571 | /* If not last req then put at end of pending list */ | 571 | /* If not last req then put at end of pending list */ |
572 | if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { | 572 | if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { |
573 | list_del(&sgreq->node); | 573 | list_move_tail(&sgreq->node, &tdc->pending_sg_req); |
574 | list_add_tail(&sgreq->node, &tdc->pending_sg_req); | ||
575 | sgreq->configured = false; | 574 | sgreq->configured = false; |
576 | st = handle_continuous_head_request(tdc, sgreq, to_terminate); | 575 | st = handle_continuous_head_request(tdc, sgreq, to_terminate); |
577 | if (!st) | 576 | if (!st) |
@@ -1284,7 +1283,6 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) | |||
1284 | INIT_LIST_HEAD(&tdma->dma_dev.channels); | 1283 | INIT_LIST_HEAD(&tdma->dma_dev.channels); |
1285 | for (i = 0; i < cdata->nr_channels; i++) { | 1284 | for (i = 0; i < cdata->nr_channels; i++) { |
1286 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | 1285 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1287 | char irq_name[30]; | ||
1288 | 1286 | ||
1289 | tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + | 1287 | tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + |
1290 | i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; | 1288 | i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; |
@@ -1296,9 +1294,9 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) | |||
1296 | goto err_irq; | 1294 | goto err_irq; |
1297 | } | 1295 | } |
1298 | tdc->irq = res->start; | 1296 | tdc->irq = res->start; |
1299 | snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); | 1297 | snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); |
1300 | ret = devm_request_irq(&pdev->dev, tdc->irq, | 1298 | ret = devm_request_irq(&pdev->dev, tdc->irq, |
1301 | tegra_dma_isr, 0, irq_name, tdc); | 1299 | tegra_dma_isr, 0, tdc->name, tdc); |
1302 | if (ret) { | 1300 | if (ret) { |
1303 | dev_err(&pdev->dev, | 1301 | dev_err(&pdev->dev, |
1304 | "request_irq failed with err %d channel %d\n", | 1302 | "request_irq failed with err %d channel %d\n", |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 8c2ff2490d99..1acae359cabe 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -134,6 +134,7 @@ config SPI_DAVINCI | |||
134 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" | 134 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" |
135 | depends on ARCH_DAVINCI | 135 | depends on ARCH_DAVINCI |
136 | select SPI_BITBANG | 136 | select SPI_BITBANG |
137 | select TI_EDMA | ||
137 | help | 138 | help |
138 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. | 139 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. |
139 | 140 | ||
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 3afe2f4f5b8e..147dfa87a64b 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
@@ -25,13 +25,14 @@ | |||
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/err.h> | 26 | #include <linux/err.h> |
27 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
28 | #include <linux/dmaengine.h> | ||
28 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
30 | #include <linux/edma.h> | ||
29 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
30 | #include <linux/spi/spi_bitbang.h> | 32 | #include <linux/spi/spi_bitbang.h> |
31 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
32 | 34 | ||
33 | #include <linux/platform_data/spi-davinci.h> | 35 | #include <linux/platform_data/spi-davinci.h> |
34 | #include <mach/edma.h> | ||
35 | 36 | ||
36 | #define SPI_NO_RESOURCE ((resource_size_t)-1) | 37 | #define SPI_NO_RESOURCE ((resource_size_t)-1) |
37 | 38 | ||
@@ -113,14 +114,6 @@ | |||
113 | #define SPIDEF 0x4c | 114 | #define SPIDEF 0x4c |
114 | #define SPIFMT0 0x50 | 115 | #define SPIFMT0 0x50 |
115 | 116 | ||
116 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | ||
117 | struct davinci_spi_dma { | ||
118 | int tx_channel; | ||
119 | int rx_channel; | ||
120 | int dummy_param_slot; | ||
121 | enum dma_event_q eventq; | ||
122 | }; | ||
123 | |||
124 | /* SPI Controller driver's private data. */ | 117 | /* SPI Controller driver's private data. */ |
125 | struct davinci_spi { | 118 | struct davinci_spi { |
126 | struct spi_bitbang bitbang; | 119 | struct spi_bitbang bitbang; |
@@ -134,11 +127,14 @@ struct davinci_spi { | |||
134 | 127 | ||
135 | const void *tx; | 128 | const void *tx; |
136 | void *rx; | 129 | void *rx; |
137 | #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) | ||
138 | u8 rx_tmp_buf[SPI_TMP_BUFSZ]; | ||
139 | int rcount; | 130 | int rcount; |
140 | int wcount; | 131 | int wcount; |
141 | struct davinci_spi_dma dma; | 132 | |
133 | struct dma_chan *dma_rx; | ||
134 | struct dma_chan *dma_tx; | ||
135 | int dma_rx_chnum; | ||
136 | int dma_tx_chnum; | ||
137 | |||
142 | struct davinci_spi_platform_data *pdata; | 138 | struct davinci_spi_platform_data *pdata; |
143 | 139 | ||
144 | void (*get_rx)(u32 rx_data, struct davinci_spi *); | 140 | void (*get_rx)(u32 rx_data, struct davinci_spi *); |
@@ -496,21 +492,23 @@ out: | |||
496 | return errors; | 492 | return errors; |
497 | } | 493 | } |
498 | 494 | ||
499 | static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) | 495 | static void davinci_spi_dma_rx_callback(void *data) |
500 | { | 496 | { |
501 | struct davinci_spi *dspi = data; | 497 | struct davinci_spi *dspi = (struct davinci_spi *)data; |
502 | struct davinci_spi_dma *dma = &dspi->dma; | ||
503 | 498 | ||
504 | edma_stop(lch); | 499 | dspi->rcount = 0; |
505 | 500 | ||
506 | if (status == DMA_COMPLETE) { | 501 | if (!dspi->wcount && !dspi->rcount) |
507 | if (lch == dma->rx_channel) | 502 | complete(&dspi->done); |
508 | dspi->rcount = 0; | 503 | } |
509 | if (lch == dma->tx_channel) | ||
510 | dspi->wcount = 0; | ||
511 | } | ||
512 | 504 | ||
513 | if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) | 505 | static void davinci_spi_dma_tx_callback(void *data) |
506 | { | ||
507 | struct davinci_spi *dspi = (struct davinci_spi *)data; | ||
508 | |||
509 | dspi->wcount = 0; | ||
510 | |||
511 | if (!dspi->wcount && !dspi->rcount) | ||
514 | complete(&dspi->done); | 512 | complete(&dspi->done); |
515 | } | 513 | } |
516 | 514 | ||
@@ -526,20 +524,20 @@ static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) | |||
526 | static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | 524 | static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) |
527 | { | 525 | { |
528 | struct davinci_spi *dspi; | 526 | struct davinci_spi *dspi; |
529 | int data_type, ret; | 527 | int data_type, ret = -ENOMEM; |
530 | u32 tx_data, spidat1; | 528 | u32 tx_data, spidat1; |
531 | u32 errors = 0; | 529 | u32 errors = 0; |
532 | struct davinci_spi_config *spicfg; | 530 | struct davinci_spi_config *spicfg; |
533 | struct davinci_spi_platform_data *pdata; | 531 | struct davinci_spi_platform_data *pdata; |
534 | unsigned uninitialized_var(rx_buf_count); | 532 | unsigned uninitialized_var(rx_buf_count); |
535 | struct device *sdev; | 533 | void *dummy_buf = NULL; |
534 | struct scatterlist sg_rx, sg_tx; | ||
536 | 535 | ||
537 | dspi = spi_master_get_devdata(spi->master); | 536 | dspi = spi_master_get_devdata(spi->master); |
538 | pdata = dspi->pdata; | 537 | pdata = dspi->pdata; |
539 | spicfg = (struct davinci_spi_config *)spi->controller_data; | 538 | spicfg = (struct davinci_spi_config *)spi->controller_data; |
540 | if (!spicfg) | 539 | if (!spicfg) |
541 | spicfg = &davinci_spi_default_cfg; | 540 | spicfg = &davinci_spi_default_cfg; |
542 | sdev = dspi->bitbang.master->dev.parent; | ||
543 | 541 | ||
544 | /* convert len to words based on bits_per_word */ | 542 | /* convert len to words based on bits_per_word */ |
545 | data_type = dspi->bytes_per_word[spi->chip_select]; | 543 | data_type = dspi->bytes_per_word[spi->chip_select]; |
@@ -567,112 +565,83 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
567 | spidat1 |= tx_data & 0xFFFF; | 565 | spidat1 |= tx_data & 0xFFFF; |
568 | iowrite32(spidat1, dspi->base + SPIDAT1); | 566 | iowrite32(spidat1, dspi->base + SPIDAT1); |
569 | } else { | 567 | } else { |
570 | struct davinci_spi_dma *dma; | 568 | struct dma_slave_config dma_rx_conf = { |
571 | unsigned long tx_reg, rx_reg; | 569 | .direction = DMA_DEV_TO_MEM, |
572 | struct edmacc_param param; | 570 | .src_addr = (unsigned long)dspi->pbase + SPIBUF, |
573 | void *rx_buf; | 571 | .src_addr_width = data_type, |
574 | int b, c; | 572 | .src_maxburst = 1, |
575 | 573 | }; | |
576 | dma = &dspi->dma; | 574 | struct dma_slave_config dma_tx_conf = { |
577 | 575 | .direction = DMA_MEM_TO_DEV, | |
578 | tx_reg = (unsigned long)dspi->pbase + SPIDAT1; | 576 | .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, |
579 | rx_reg = (unsigned long)dspi->pbase + SPIBUF; | 577 | .dst_addr_width = data_type, |
580 | 578 | .dst_maxburst = 1, | |
581 | /* | 579 | }; |
582 | * Transmit DMA setup | 580 | struct dma_async_tx_descriptor *rxdesc; |
583 | * | 581 | struct dma_async_tx_descriptor *txdesc; |
584 | * If there is transmit data, map the transmit buffer, set it | 582 | void *buf; |
585 | * as the source of data and set the source B index to data | 583 | |
586 | * size. If there is no transmit data, set the transmit register | 584 | dummy_buf = kzalloc(t->len, GFP_KERNEL); |
587 | * as the source of data, and set the source B index to zero. | 585 | if (!dummy_buf) |
588 | * | 586 | goto err_alloc_dummy_buf; |
589 | * The destination is always the transmit register itself. And | 587 | |
590 | * the destination never increments. | 588 | dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf); |
591 | */ | 589 | dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf); |
592 | 590 | ||
593 | if (t->tx_buf) { | 591 | sg_init_table(&sg_rx, 1); |
594 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, | 592 | if (!t->rx_buf) |
595 | t->len, DMA_TO_DEVICE); | 593 | buf = dummy_buf; |
596 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
597 | dev_dbg(sdev, "Unable to DMA map %d bytes" | ||
598 | "TX buffer\n", t->len); | ||
599 | return -ENOMEM; | ||
600 | } | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * If number of words is greater than 65535, then we need | ||
605 | * to configure a 3 dimension transfer. Use the BCNTRLD | ||
606 | * feature to allow for transfers that aren't even multiples | ||
607 | * of 65535 (or any other possible b size) by first transferring | ||
608 | * the remainder amount then grabbing the next N blocks of | ||
609 | * 65535 words. | ||
610 | */ | ||
611 | |||
612 | c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */ | ||
613 | b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */ | ||
614 | if (b) | ||
615 | c++; | ||
616 | else | 594 | else |
617 | b = SZ_64K - 1; | 595 | buf = t->rx_buf; |
618 | 596 | t->rx_dma = dma_map_single(&spi->dev, buf, | |
619 | param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); | 597 | t->len, DMA_FROM_DEVICE); |
620 | param.src = t->tx_buf ? t->tx_dma : tx_reg; | 598 | if (!t->rx_dma) { |
621 | param.a_b_cnt = b << 16 | data_type; | 599 | ret = -EFAULT; |
622 | param.dst = tx_reg; | 600 | goto err_rx_map; |
623 | param.src_dst_bidx = t->tx_buf ? data_type : 0; | ||
624 | param.link_bcntrld = 0xffffffff; | ||
625 | param.src_dst_cidx = t->tx_buf ? data_type : 0; | ||
626 | param.ccnt = c; | ||
627 | edma_write_slot(dma->tx_channel, ¶m); | ||
628 | edma_link(dma->tx_channel, dma->dummy_param_slot); | ||
629 | |||
630 | /* | ||
631 | * Receive DMA setup | ||
632 | * | ||
633 | * If there is receive buffer, use it to receive data. If there | ||
634 | * is none provided, use a temporary receive buffer. Set the | ||
635 | * destination B index to 0 so effectively only one byte is used | ||
636 | * in the temporary buffer (address does not increment). | ||
637 | * | ||
638 | * The source of receive data is the receive data register. The | ||
639 | * source address never increments. | ||
640 | */ | ||
641 | |||
642 | if (t->rx_buf) { | ||
643 | rx_buf = t->rx_buf; | ||
644 | rx_buf_count = t->len; | ||
645 | } else { | ||
646 | rx_buf = dspi->rx_tmp_buf; | ||
647 | rx_buf_count = sizeof(dspi->rx_tmp_buf); | ||
648 | } | 601 | } |
602 | sg_dma_address(&sg_rx) = t->rx_dma; | ||
603 | sg_dma_len(&sg_rx) = t->len; | ||
649 | 604 | ||
650 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, | 605 | sg_init_table(&sg_tx, 1); |
651 | DMA_FROM_DEVICE); | 606 | if (!t->tx_buf) |
652 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | 607 | buf = dummy_buf; |
653 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | 608 | else |
654 | rx_buf_count); | 609 | buf = (void *)t->tx_buf; |
655 | if (t->tx_buf) | 610 | t->tx_dma = dma_map_single(&spi->dev, buf, |
656 | dma_unmap_single(&spi->dev, t->tx_dma, t->len, | 611 | t->len, DMA_FROM_DEVICE); |
657 | DMA_TO_DEVICE); | 612 | if (!t->tx_dma) { |
658 | return -ENOMEM; | 613 | ret = -EFAULT; |
614 | goto err_tx_map; | ||
659 | } | 615 | } |
660 | 616 | sg_dma_address(&sg_tx) = t->tx_dma; | |
661 | param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); | 617 | sg_dma_len(&sg_tx) = t->len; |
662 | param.src = rx_reg; | 618 | |
663 | param.a_b_cnt = b << 16 | data_type; | 619 | rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx, |
664 | param.dst = t->rx_dma; | 620 | &sg_rx, 1, DMA_DEV_TO_MEM, |
665 | param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; | 621 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
666 | param.link_bcntrld = 0xffffffff; | 622 | if (!rxdesc) |
667 | param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; | 623 | goto err_desc; |
668 | param.ccnt = c; | 624 | |
669 | edma_write_slot(dma->rx_channel, ¶m); | 625 | txdesc = dmaengine_prep_slave_sg(dspi->dma_tx, |
626 | &sg_tx, 1, DMA_MEM_TO_DEV, | ||
627 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
628 | if (!txdesc) | ||
629 | goto err_desc; | ||
630 | |||
631 | rxdesc->callback = davinci_spi_dma_rx_callback; | ||
632 | rxdesc->callback_param = (void *)dspi; | ||
633 | txdesc->callback = davinci_spi_dma_tx_callback; | ||
634 | txdesc->callback_param = (void *)dspi; | ||
670 | 635 | ||
671 | if (pdata->cshold_bug) | 636 | if (pdata->cshold_bug) |
672 | iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); | 637 | iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); |
673 | 638 | ||
674 | edma_start(dma->rx_channel); | 639 | dmaengine_submit(rxdesc); |
675 | edma_start(dma->tx_channel); | 640 | dmaengine_submit(txdesc); |
641 | |||
642 | dma_async_issue_pending(dspi->dma_rx); | ||
643 | dma_async_issue_pending(dspi->dma_tx); | ||
644 | |||
676 | set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); | 645 | set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
677 | } | 646 | } |
678 | 647 | ||
@@ -690,15 +659,13 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
690 | 659 | ||
691 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); | 660 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); |
692 | if (spicfg->io_type == SPI_IO_TYPE_DMA) { | 661 | if (spicfg->io_type == SPI_IO_TYPE_DMA) { |
693 | |||
694 | if (t->tx_buf) | ||
695 | dma_unmap_single(&spi->dev, t->tx_dma, t->len, | ||
696 | DMA_TO_DEVICE); | ||
697 | |||
698 | dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count, | ||
699 | DMA_FROM_DEVICE); | ||
700 | |||
701 | clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); | 662 | clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
663 | |||
664 | dma_unmap_single(&spi->dev, t->rx_dma, | ||
665 | t->len, DMA_FROM_DEVICE); | ||
666 | dma_unmap_single(&spi->dev, t->tx_dma, | ||
667 | t->len, DMA_TO_DEVICE); | ||
668 | kfree(dummy_buf); | ||
702 | } | 669 | } |
703 | 670 | ||
704 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | 671 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); |
@@ -716,11 +683,20 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
716 | } | 683 | } |
717 | 684 | ||
718 | if (dspi->rcount != 0 || dspi->wcount != 0) { | 685 | if (dspi->rcount != 0 || dspi->wcount != 0) { |
719 | dev_err(sdev, "SPI data transfer error\n"); | 686 | dev_err(&spi->dev, "SPI data transfer error\n"); |
720 | return -EIO; | 687 | return -EIO; |
721 | } | 688 | } |
722 | 689 | ||
723 | return t->len; | 690 | return t->len; |
691 | |||
692 | err_desc: | ||
693 | dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE); | ||
694 | err_tx_map: | ||
695 | dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE); | ||
696 | err_rx_map: | ||
697 | kfree(dummy_buf); | ||
698 | err_alloc_dummy_buf: | ||
699 | return ret; | ||
724 | } | 700 | } |
725 | 701 | ||
726 | /** | 702 | /** |
@@ -751,39 +727,33 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data) | |||
751 | 727 | ||
752 | static int davinci_spi_request_dma(struct davinci_spi *dspi) | 728 | static int davinci_spi_request_dma(struct davinci_spi *dspi) |
753 | { | 729 | { |
730 | dma_cap_mask_t mask; | ||
731 | struct device *sdev = dspi->bitbang.master->dev.parent; | ||
754 | int r; | 732 | int r; |
755 | struct davinci_spi_dma *dma = &dspi->dma; | ||
756 | 733 | ||
757 | r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, | 734 | dma_cap_zero(mask); |
758 | dma->eventq); | 735 | dma_cap_set(DMA_SLAVE, mask); |
759 | if (r < 0) { | 736 | |
760 | pr_err("Unable to request DMA channel for SPI RX\n"); | 737 | dspi->dma_rx = dma_request_channel(mask, edma_filter_fn, |
761 | r = -EAGAIN; | 738 | &dspi->dma_rx_chnum); |
739 | if (!dspi->dma_rx) { | ||
740 | dev_err(sdev, "request RX DMA channel failed\n"); | ||
741 | r = -ENODEV; | ||
762 | goto rx_dma_failed; | 742 | goto rx_dma_failed; |
763 | } | 743 | } |
764 | 744 | ||
765 | r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, | 745 | dspi->dma_tx = dma_request_channel(mask, edma_filter_fn, |
766 | dma->eventq); | 746 | &dspi->dma_tx_chnum); |
767 | if (r < 0) { | 747 | if (!dspi->dma_tx) { |
768 | pr_err("Unable to request DMA channel for SPI TX\n"); | 748 | dev_err(sdev, "request TX DMA channel failed\n"); |
769 | r = -EAGAIN; | 749 | r = -ENODEV; |
770 | goto tx_dma_failed; | 750 | goto tx_dma_failed; |
771 | } | 751 | } |
772 | 752 | ||
773 | r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); | ||
774 | if (r < 0) { | ||
775 | pr_err("Unable to request SPI TX DMA param slot\n"); | ||
776 | r = -EAGAIN; | ||
777 | goto param_failed; | ||
778 | } | ||
779 | dma->dummy_param_slot = r; | ||
780 | edma_link(dma->dummy_param_slot, dma->dummy_param_slot); | ||
781 | |||
782 | return 0; | 753 | return 0; |
783 | param_failed: | 754 | |
784 | edma_free_channel(dma->tx_channel); | ||
785 | tx_dma_failed: | 755 | tx_dma_failed: |
786 | edma_free_channel(dma->rx_channel); | 756 | dma_release_channel(dspi->dma_rx); |
787 | rx_dma_failed: | 757 | rx_dma_failed: |
788 | return r; | 758 | return r; |
789 | } | 759 | } |
@@ -898,9 +868,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev) | |||
898 | dspi->bitbang.txrx_bufs = davinci_spi_bufs; | 868 | dspi->bitbang.txrx_bufs = davinci_spi_bufs; |
899 | if (dma_rx_chan != SPI_NO_RESOURCE && | 869 | if (dma_rx_chan != SPI_NO_RESOURCE && |
900 | dma_tx_chan != SPI_NO_RESOURCE) { | 870 | dma_tx_chan != SPI_NO_RESOURCE) { |
901 | dspi->dma.rx_channel = dma_rx_chan; | 871 | dspi->dma_rx_chnum = dma_rx_chan; |
902 | dspi->dma.tx_channel = dma_tx_chan; | 872 | dspi->dma_tx_chnum = dma_tx_chan; |
903 | dspi->dma.eventq = pdata->dma_event_q; | ||
904 | 873 | ||
905 | ret = davinci_spi_request_dma(dspi); | 874 | ret = davinci_spi_request_dma(dspi); |
906 | if (ret) | 875 | if (ret) |
@@ -955,9 +924,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev) | |||
955 | return ret; | 924 | return ret; |
956 | 925 | ||
957 | free_dma: | 926 | free_dma: |
958 | edma_free_channel(dspi->dma.tx_channel); | 927 | dma_release_channel(dspi->dma_rx); |
959 | edma_free_channel(dspi->dma.rx_channel); | 928 | dma_release_channel(dspi->dma_tx); |
960 | edma_free_slot(dspi->dma.dummy_param_slot); | ||
961 | free_clk: | 929 | free_clk: |
962 | clk_disable(dspi->clk); | 930 | clk_disable(dspi->clk); |
963 | clk_put(dspi->clk); | 931 | clk_put(dspi->clk); |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 2412e02d7c0f..e1c8c9e919ac 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -19,6 +19,10 @@ | |||
19 | * @nr_channels: Number of channels supported by hardware (max 8) | 19 | * @nr_channels: Number of channels supported by hardware (max 8) |
20 | * @is_private: The device channels should be marked as private and not for | 20 | * @is_private: The device channels should be marked as private and not for |
21 | * by the general purpose DMA channel allocator. | 21 | * by the general purpose DMA channel allocator. |
22 | * @block_size: Maximum block size supported by the controller | ||
23 | * @nr_masters: Number of AHB masters supported by the controller | ||
24 | * @data_width: Maximum data width supported by hardware per AHB master | ||
25 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | ||
22 | */ | 26 | */ |
23 | struct dw_dma_platform_data { | 27 | struct dw_dma_platform_data { |
24 | unsigned int nr_channels; | 28 | unsigned int nr_channels; |
@@ -29,6 +33,9 @@ struct dw_dma_platform_data { | |||
29 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | 33 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ |
30 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | 34 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ |
31 | unsigned char chan_priority; | 35 | unsigned char chan_priority; |
36 | unsigned short block_size; | ||
37 | unsigned char nr_masters; | ||
38 | unsigned char data_width[4]; | ||
32 | }; | 39 | }; |
33 | 40 | ||
34 | /* bursts size */ | 41 | /* bursts size */ |
diff --git a/include/linux/edma.h b/include/linux/edma.h new file mode 100644 index 000000000000..a1307e7827e8 --- /dev/null +++ b/include/linux/edma.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * TI EDMA DMA engine driver | ||
3 | * | ||
4 | * Copyright 2012 Texas Instruments | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | #ifndef __LINUX_EDMA_H | ||
16 | #define __LINUX_EDMA_H | ||
17 | |||
18 | struct dma_chan; | ||
19 | |||
20 | #if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) | ||
21 | bool edma_filter_fn(struct dma_chan *, void *); | ||
22 | #else | ||
23 | static inline bool edma_filter_fn(struct dma_chan *chan, void *param) | ||
24 | { | ||
25 | return false; | ||
26 | } | ||
27 | #endif | ||
28 | |||
29 | #endif | ||
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h new file mode 100644 index 000000000000..2a330ec9e2af --- /dev/null +++ b/include/linux/platform_data/mmp_dma.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * MMP Platform DMA Management | ||
3 | * | ||
4 | * Copyright (c) 2011 Marvell Semiconductors Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #ifndef MMP_DMA_H | ||
13 | #define MMP_DMA_H | ||
14 | |||
15 | struct mmp_dma_platdata { | ||
16 | int dma_channels; | ||
17 | }; | ||
18 | |||
19 | #endif /* MMP_DMA_H */ | ||