diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2011-03-11 17:56:58 -0500 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2011-03-11 17:56:58 -0500 |
| commit | 6c11371dd17c5bd29e3a53cf5263be6ea67c51e4 (patch) | |
| tree | 012aaba103ad8f685cd1b6bb882472b56818b446 | |
| parent | f5539af572ffa35545f21f604fb747eae55c8042 (diff) | |
| parent | 0b863b333f529c7ddd8bee58e6696a7254417a05 (diff) | |
Merge branch 'for_dan' of git://git.infradead.org/users/vkoul/slave-dma into dmaengine
* 'for_dan' of git://git.infradead.org/users/vkoul/slave-dma:
drivers, pch_dma: Fix warning when CONFIG_PM=n.
dmaengine/dw_dmac fix: use readl & writel instead of __raw_readl & __raw_writel
avr32: at32ap700x: Specify DMA Flow Controller, Src and Dst msize
dw_dmac: Setting Default Burst length for transfers as 16.
dw_dmac: Allow src/dst msize & flow controller to be configured at runtime
dw_dmac: Changing type of src_master and dest_master to u8.
dw_dmac: Pass Channel Priority from platform_data
dw_dmac: Pass Channel Allocation Order from platform_data
dw_dmac: Mark all tx_descriptors with DMA_CRTL_ACK after xfer finish
dw_dmac: Change value of DWC_MAX_COUNT to 4095.
dw_dmac: Adding support for 64 bit access width for memcpy xfers
dw_dmac: Calling dwc_scan_descriptors from dwc_tx_status() after taking lock
dw_dmac: Move single descriptor from dwc->queue to dwc->active_list in dwc_complete_all
dw_dmac: Replace module_init() with subsys_initcall()
dw_dmac: Remove compilation dependency from AVR32 and put on HAVE_CLK
dmaengine: mxs-dma: add dma support for i.MX23/28
pch_dma: set the number of array correctly
pch_dma: fix kernel error issue
| -rw-r--r-- | arch/arm/mach-mxs/include/mach/dma.h | 26 | ||||
| -rw-r--r-- | arch/avr32/mach-at32ap/at32ap700x.c | 9 | ||||
| -rw-r--r-- | drivers/dma/Kconfig | 10 | ||||
| -rw-r--r-- | drivers/dma/Makefile | 1 | ||||
| -rw-r--r-- | drivers/dma/dw_dmac.c | 71 | ||||
| -rw-r--r-- | drivers/dma/dw_dmac_regs.h | 12 | ||||
| -rw-r--r-- | drivers/dma/mxs-dma.c | 724 | ||||
| -rw-r--r-- | drivers/dma/pch_dma.c | 35 | ||||
| -rw-r--r-- | include/linux/dw_dmac.h | 43 |
9 files changed, 881 insertions, 50 deletions
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/arch/arm/mach-mxs/include/mach/dma.h new file mode 100644 index 000000000000..7f4aeeaba8df --- /dev/null +++ b/arch/arm/mach-mxs/include/mach/dma.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef __MACH_MXS_DMA_H__ | ||
| 10 | #define __MACH_MXS_DMA_H__ | ||
| 11 | |||
| 12 | struct mxs_dma_data { | ||
| 13 | int chan_irq; | ||
| 14 | }; | ||
| 15 | |||
| 16 | static inline int mxs_dma_is_apbh(struct dma_chan *chan) | ||
| 17 | { | ||
| 18 | return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh"); | ||
| 19 | } | ||
| 20 | |||
| 21 | static inline int mxs_dma_is_apbx(struct dma_chan *chan) | ||
| 22 | { | ||
| 23 | return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx"); | ||
| 24 | } | ||
| 25 | |||
| 26 | #endif /* __MACH_MXS_DMA_H__ */ | ||
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 2747cde8c9a7..b4aaebd8780c 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
| @@ -2050,6 +2050,9 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
| 2050 | rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2050 | rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
| 2051 | rx_dws->src_master = 0; | 2051 | rx_dws->src_master = 0; |
| 2052 | rx_dws->dst_master = 1; | 2052 | rx_dws->dst_master = 1; |
| 2053 | rx_dws->src_msize = DW_DMA_MSIZE_1; | ||
| 2054 | rx_dws->dst_msize = DW_DMA_MSIZE_1; | ||
| 2055 | rx_dws->fc = DW_DMA_FC_D_P2M; | ||
| 2053 | } | 2056 | } |
| 2054 | 2057 | ||
| 2055 | /* Check if DMA slave interface for playback should be configured. */ | 2058 | /* Check if DMA slave interface for playback should be configured. */ |
| @@ -2060,6 +2063,9 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
| 2060 | tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2063 | tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
| 2061 | rx_dws->src_master = 0; | 2064 | rx_dws->src_master = 0; |
| 2062 | rx_dws->dst_master = 1; | 2065 | rx_dws->dst_master = 1; |
| 2066 | tx_dws->src_msize = DW_DMA_MSIZE_1; | ||
| 2067 | tx_dws->dst_msize = DW_DMA_MSIZE_1; | ||
| 2068 | tx_dws->fc = DW_DMA_FC_D_M2P; | ||
| 2063 | } | 2069 | } |
| 2064 | 2070 | ||
| 2065 | if (platform_device_add_data(pdev, data, | 2071 | if (platform_device_add_data(pdev, data, |
| @@ -2134,6 +2140,9 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) | |||
| 2134 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | 2140 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); |
| 2135 | dws->src_master = 0; | 2141 | dws->src_master = 0; |
| 2136 | dws->dst_master = 1; | 2142 | dws->dst_master = 1; |
| 2143 | dws->src_msize = DW_DMA_MSIZE_1; | ||
| 2144 | dws->dst_msize = DW_DMA_MSIZE_1; | ||
| 2145 | dws->fc = DW_DMA_FC_D_M2P; | ||
| 2137 | 2146 | ||
| 2138 | if (platform_device_add_data(pdev, data, | 2147 | if (platform_device_add_data(pdev, data, |
| 2139 | sizeof(struct atmel_abdac_pdata))) | 2148 | sizeof(struct atmel_abdac_pdata))) |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1c28816152fa..d700895cb40f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -82,7 +82,7 @@ config INTEL_IOP_ADMA | |||
| 82 | 82 | ||
| 83 | config DW_DMAC | 83 | config DW_DMAC |
| 84 | tristate "Synopsys DesignWare AHB DMA support" | 84 | tristate "Synopsys DesignWare AHB DMA support" |
| 85 | depends on AVR32 | 85 | depends on HAVE_CLK |
| 86 | select DMA_ENGINE | 86 | select DMA_ENGINE |
| 87 | default y if CPU_AT32AP7000 | 87 | default y if CPU_AT32AP7000 |
| 88 | help | 88 | help |
| @@ -227,6 +227,14 @@ config IMX_DMA | |||
| 227 | Support the i.MX DMA engine. This engine is integrated into | 227 | Support the i.MX DMA engine. This engine is integrated into |
| 228 | Freescale i.MX1/21/27 chips. | 228 | Freescale i.MX1/21/27 chips. |
| 229 | 229 | ||
| 230 | config MXS_DMA | ||
| 231 | bool "MXS DMA support" | ||
| 232 | depends on SOC_IMX23 || SOC_IMX28 | ||
| 233 | select DMA_ENGINE | ||
| 234 | help | ||
| 235 | Support the MXS DMA engine. This engine including APBH-DMA | ||
| 236 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. | ||
| 237 | |||
| 230 | config DMA_ENGINE | 238 | config DMA_ENGINE |
| 231 | bool | 239 | bool |
| 232 | 240 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 64b21f5cd740..802b55795f8b 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -23,6 +23,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | |||
| 23 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 23 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
| 24 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 24 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
| 25 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 25 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
| 26 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | ||
| 26 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 27 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
| 27 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 28 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
| 28 | obj-$(CONFIG_PL330_DMA) += pl330.o | 29 | obj-$(CONFIG_PL330_DMA) += pl330.o |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 08dab3badad2..9c25c7d099e4 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
| @@ -36,9 +36,11 @@ | |||
| 36 | struct dw_dma_slave *__slave = (private); \ | 36 | struct dw_dma_slave *__slave = (private); \ |
| 37 | int dms = __slave ? __slave->dst_master : 0; \ | 37 | int dms = __slave ? __slave->dst_master : 0; \ |
| 38 | int sms = __slave ? __slave->src_master : 1; \ | 38 | int sms = __slave ? __slave->src_master : 1; \ |
| 39 | u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ | ||
| 40 | u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ | ||
| 39 | \ | 41 | \ |
| 40 | (DWC_CTLL_DST_MSIZE(0) \ | 42 | (DWC_CTLL_DST_MSIZE(dmsize) \ |
| 41 | | DWC_CTLL_SRC_MSIZE(0) \ | 43 | | DWC_CTLL_SRC_MSIZE(smsize) \ |
| 42 | | DWC_CTLL_LLP_D_EN \ | 44 | | DWC_CTLL_LLP_D_EN \ |
| 43 | | DWC_CTLL_LLP_S_EN \ | 45 | | DWC_CTLL_LLP_S_EN \ |
| 44 | | DWC_CTLL_DMS(dms) \ | 46 | | DWC_CTLL_DMS(dms) \ |
| @@ -47,14 +49,13 @@ | |||
| 47 | 49 | ||
| 48 | /* | 50 | /* |
| 49 | * This is configuration-dependent and usually a funny size like 4095. | 51 | * This is configuration-dependent and usually a funny size like 4095. |
| 50 | * Let's round it down to the nearest power of two. | ||
| 51 | * | 52 | * |
| 52 | * Note that this is a transfer count, i.e. if we transfer 32-bit | 53 | * Note that this is a transfer count, i.e. if we transfer 32-bit |
| 53 | * words, we can do 8192 bytes per descriptor. | 54 | * words, we can do 16380 bytes per descriptor. |
| 54 | * | 55 | * |
| 55 | * This parameter is also system-specific. | 56 | * This parameter is also system-specific. |
| 56 | */ | 57 | */ |
| 57 | #define DWC_MAX_COUNT 2048U | 58 | #define DWC_MAX_COUNT 4095U |
| 58 | 59 | ||
| 59 | /* | 60 | /* |
| 60 | * Number of descriptors to allocate for each channel. This should be | 61 | * Number of descriptors to allocate for each channel. This should be |
| @@ -87,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | |||
| 87 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 88 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
| 88 | } | 89 | } |
| 89 | 90 | ||
| 90 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) | ||
| 91 | { | ||
| 92 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); | ||
| 93 | } | ||
| 94 | |||
| 95 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 91 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
| 96 | { | 92 | { |
| 97 | struct dw_desc *desc, *_desc; | 93 | struct dw_desc *desc, *_desc; |
| @@ -204,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
| 204 | dma_async_tx_callback callback; | 200 | dma_async_tx_callback callback; |
| 205 | void *param; | 201 | void *param; |
| 206 | struct dma_async_tx_descriptor *txd = &desc->txd; | 202 | struct dma_async_tx_descriptor *txd = &desc->txd; |
| 203 | struct dw_desc *child; | ||
| 207 | 204 | ||
| 208 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
| 209 | 206 | ||
| @@ -212,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
| 212 | param = txd->callback_param; | 209 | param = txd->callback_param; |
| 213 | 210 | ||
| 214 | dwc_sync_desc_for_cpu(dwc, desc); | 211 | dwc_sync_desc_for_cpu(dwc, desc); |
| 212 | |||
| 213 | /* async_tx_ack */ | ||
| 214 | list_for_each_entry(child, &desc->tx_list, desc_node) | ||
| 215 | async_tx_ack(&child->txd); | ||
| 216 | async_tx_ack(&desc->txd); | ||
| 217 | |||
| 215 | list_splice_init(&desc->tx_list, &dwc->free_list); | 218 | list_splice_init(&desc->tx_list, &dwc->free_list); |
| 216 | list_move(&desc->desc_node, &dwc->free_list); | 219 | list_move(&desc->desc_node, &dwc->free_list); |
| 217 | 220 | ||
| @@ -262,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
| 262 | * Submit queued descriptors ASAP, i.e. before we go through | 265 | * Submit queued descriptors ASAP, i.e. before we go through |
| 263 | * the completed ones. | 266 | * the completed ones. |
| 264 | */ | 267 | */ |
| 265 | if (!list_empty(&dwc->queue)) | ||
| 266 | dwc_dostart(dwc, dwc_first_queued(dwc)); | ||
| 267 | list_splice_init(&dwc->active_list, &list); | 268 | list_splice_init(&dwc->active_list, &list); |
| 268 | list_splice_init(&dwc->queue, &dwc->active_list); | 269 | if (!list_empty(&dwc->queue)) { |
| 270 | list_move(dwc->queue.next, &dwc->active_list); | ||
| 271 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
| 272 | } | ||
| 269 | 273 | ||
| 270 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 274 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
| 271 | dwc_descriptor_complete(dwc, desc); | 275 | dwc_descriptor_complete(dwc, desc); |
| @@ -325,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
| 325 | cpu_relax(); | 329 | cpu_relax(); |
| 326 | 330 | ||
| 327 | if (!list_empty(&dwc->queue)) { | 331 | if (!list_empty(&dwc->queue)) { |
| 328 | dwc_dostart(dwc, dwc_first_queued(dwc)); | 332 | list_move(dwc->queue.next, &dwc->active_list); |
| 329 | list_splice_init(&dwc->queue, &dwc->active_list); | 333 | dwc_dostart(dwc, dwc_first_active(dwc)); |
| 330 | } | 334 | } |
| 331 | } | 335 | } |
| 332 | 336 | ||
| @@ -352,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
| 352 | */ | 356 | */ |
| 353 | bad_desc = dwc_first_active(dwc); | 357 | bad_desc = dwc_first_active(dwc); |
| 354 | list_del_init(&bad_desc->desc_node); | 358 | list_del_init(&bad_desc->desc_node); |
| 355 | list_splice_init(&dwc->queue, dwc->active_list.prev); | 359 | list_move(dwc->queue.next, dwc->active_list.prev); |
| 356 | 360 | ||
| 357 | /* Clear the error flag and try to restart the controller */ | 361 | /* Clear the error flag and try to restart the controller */ |
| 358 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 362 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
| @@ -547,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 547 | if (list_empty(&dwc->active_list)) { | 551 | if (list_empty(&dwc->active_list)) { |
| 548 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 552 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
| 549 | desc->txd.cookie); | 553 | desc->txd.cookie); |
| 550 | dwc_dostart(dwc, desc); | ||
| 551 | list_add_tail(&desc->desc_node, &dwc->active_list); | 554 | list_add_tail(&desc->desc_node, &dwc->active_list); |
| 555 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
| 552 | } else { | 556 | } else { |
| 553 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 557 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
| 554 | desc->txd.cookie); | 558 | desc->txd.cookie); |
| @@ -587,7 +591,9 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 587 | * We can be a lot more clever here, but this should take care | 591 | * We can be a lot more clever here, but this should take care |
| 588 | * of the most common optimization. | 592 | * of the most common optimization. |
| 589 | */ | 593 | */ |
| 590 | if (!((src | dest | len) & 3)) | 594 | if (!((src | dest | len) & 7)) |
| 595 | src_width = dst_width = 3; | ||
| 596 | else if (!((src | dest | len) & 3)) | ||
| 591 | src_width = dst_width = 2; | 597 | src_width = dst_width = 2; |
| 592 | else if (!((src | dest | len) & 1)) | 598 | else if (!((src | dest | len) & 1)) |
| 593 | src_width = dst_width = 1; | 599 | src_width = dst_width = 1; |
| @@ -679,7 +685,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 679 | | DWC_CTLL_DST_WIDTH(reg_width) | 685 | | DWC_CTLL_DST_WIDTH(reg_width) |
| 680 | | DWC_CTLL_DST_FIX | 686 | | DWC_CTLL_DST_FIX |
| 681 | | DWC_CTLL_SRC_INC | 687 | | DWC_CTLL_SRC_INC |
| 682 | | DWC_CTLL_FC_M2P); | 688 | | DWC_CTLL_FC(dws->fc)); |
| 683 | reg = dws->tx_reg; | 689 | reg = dws->tx_reg; |
| 684 | for_each_sg(sgl, sg, sg_len, i) { | 690 | for_each_sg(sgl, sg, sg_len, i) { |
| 685 | struct dw_desc *desc; | 691 | struct dw_desc *desc; |
| @@ -724,7 +730,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 724 | | DWC_CTLL_SRC_WIDTH(reg_width) | 730 | | DWC_CTLL_SRC_WIDTH(reg_width) |
| 725 | | DWC_CTLL_DST_INC | 731 | | DWC_CTLL_DST_INC |
| 726 | | DWC_CTLL_SRC_FIX | 732 | | DWC_CTLL_SRC_FIX |
| 727 | | DWC_CTLL_FC_P2M); | 733 | | DWC_CTLL_FC(dws->fc)); |
| 728 | 734 | ||
| 729 | reg = dws->rx_reg; | 735 | reg = dws->rx_reg; |
| 730 | for_each_sg(sgl, sg, sg_len, i) { | 736 | for_each_sg(sgl, sg, sg_len, i) { |
| @@ -840,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan, | |||
| 840 | 846 | ||
| 841 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 847 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
| 842 | if (ret != DMA_SUCCESS) { | 848 | if (ret != DMA_SUCCESS) { |
| 849 | spin_lock_bh(&dwc->lock); | ||
| 843 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 850 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
| 851 | spin_unlock_bh(&dwc->lock); | ||
| 844 | 852 | ||
| 845 | last_complete = dwc->completed; | 853 | last_complete = dwc->completed; |
| 846 | last_used = chan->cookie; | 854 | last_used = chan->cookie; |
| @@ -895,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
| 895 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | 903 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
| 896 | 904 | ||
| 897 | cfghi = dws->cfg_hi; | 905 | cfghi = dws->cfg_hi; |
| 898 | cfglo = dws->cfg_lo; | 906 | cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
| 899 | } | 907 | } |
| 908 | |||
| 909 | cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); | ||
| 910 | |||
| 900 | channel_writel(dwc, CFG_LO, cfglo); | 911 | channel_writel(dwc, CFG_LO, cfglo); |
| 901 | channel_writel(dwc, CFG_HI, cfghi); | 912 | channel_writel(dwc, CFG_HI, cfghi); |
| 902 | 913 | ||
| @@ -1137,7 +1148,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
| 1137 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1148 | | DWC_CTLL_SRC_WIDTH(reg_width) |
| 1138 | | DWC_CTLL_DST_FIX | 1149 | | DWC_CTLL_DST_FIX |
| 1139 | | DWC_CTLL_SRC_INC | 1150 | | DWC_CTLL_SRC_INC |
| 1140 | | DWC_CTLL_FC_M2P | 1151 | | DWC_CTLL_FC(dws->fc) |
| 1141 | | DWC_CTLL_INT_EN); | 1152 | | DWC_CTLL_INT_EN); |
| 1142 | break; | 1153 | break; |
| 1143 | case DMA_FROM_DEVICE: | 1154 | case DMA_FROM_DEVICE: |
| @@ -1148,7 +1159,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
| 1148 | | DWC_CTLL_DST_WIDTH(reg_width) | 1159 | | DWC_CTLL_DST_WIDTH(reg_width) |
| 1149 | | DWC_CTLL_DST_INC | 1160 | | DWC_CTLL_DST_INC |
| 1150 | | DWC_CTLL_SRC_FIX | 1161 | | DWC_CTLL_SRC_FIX |
| 1151 | | DWC_CTLL_FC_P2M | 1162 | | DWC_CTLL_FC(dws->fc) |
| 1152 | | DWC_CTLL_INT_EN); | 1163 | | DWC_CTLL_INT_EN); |
| 1153 | break; | 1164 | break; |
| 1154 | default: | 1165 | default: |
| @@ -1313,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev) | |||
| 1313 | dwc->chan.device = &dw->dma; | 1324 | dwc->chan.device = &dw->dma; |
| 1314 | dwc->chan.cookie = dwc->completed = 1; | 1325 | dwc->chan.cookie = dwc->completed = 1; |
| 1315 | dwc->chan.chan_id = i; | 1326 | dwc->chan.chan_id = i; |
| 1316 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); | 1327 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
| 1328 | list_add_tail(&dwc->chan.device_node, | ||
| 1329 | &dw->dma.channels); | ||
| 1330 | else | ||
| 1331 | list_add(&dwc->chan.device_node, &dw->dma.channels); | ||
| 1332 | |||
| 1333 | /* 7 is highest priority & 0 is lowest. */ | ||
| 1334 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | ||
| 1335 | dwc->priority = 7 - i; | ||
| 1336 | else | ||
| 1337 | dwc->priority = i; | ||
| 1317 | 1338 | ||
| 1318 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | 1339 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
| 1319 | spin_lock_init(&dwc->lock); | 1340 | spin_lock_init(&dwc->lock); |
| @@ -1455,7 +1476,7 @@ static int __init dw_init(void) | |||
| 1455 | { | 1476 | { |
| 1456 | return platform_driver_probe(&dw_driver, dw_probe); | 1477 | return platform_driver_probe(&dw_driver, dw_probe); |
| 1457 | } | 1478 | } |
| 1458 | module_init(dw_init); | 1479 | subsys_initcall(dw_init); |
| 1459 | 1480 | ||
| 1460 | static void __exit dw_exit(void) | 1481 | static void __exit dw_exit(void) |
| 1461 | { | 1482 | { |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index d9a939f67f46..720f821527f8 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
| @@ -86,6 +86,7 @@ struct dw_dma_regs { | |||
| 86 | #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) | 86 | #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) |
| 87 | #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ | 87 | #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ |
| 88 | #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ | 88 | #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ |
| 89 | #define DWC_CTLL_FC(n) ((n) << 20) | ||
| 89 | #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ | 90 | #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ |
| 90 | #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ | 91 | #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ |
| 91 | #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ | 92 | #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ |
| @@ -101,6 +102,8 @@ struct dw_dma_regs { | |||
| 101 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff | 102 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff |
| 102 | 103 | ||
| 103 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ | 104 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ |
| 105 | #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ | ||
| 106 | #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ | ||
| 104 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ | 107 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ |
| 105 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ | 108 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ |
| 106 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ | 109 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ |
| @@ -134,6 +137,7 @@ struct dw_dma_chan { | |||
| 134 | struct dma_chan chan; | 137 | struct dma_chan chan; |
| 135 | void __iomem *ch_regs; | 138 | void __iomem *ch_regs; |
| 136 | u8 mask; | 139 | u8 mask; |
| 140 | u8 priority; | ||
| 137 | 141 | ||
| 138 | spinlock_t lock; | 142 | spinlock_t lock; |
| 139 | 143 | ||
| @@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc) | |||
| 155 | } | 159 | } |
| 156 | 160 | ||
| 157 | #define channel_readl(dwc, name) \ | 161 | #define channel_readl(dwc, name) \ |
| 158 | __raw_readl(&(__dwc_regs(dwc)->name)) | 162 | readl(&(__dwc_regs(dwc)->name)) |
| 159 | #define channel_writel(dwc, name, val) \ | 163 | #define channel_writel(dwc, name, val) \ |
| 160 | __raw_writel((val), &(__dwc_regs(dwc)->name)) | 164 | writel((val), &(__dwc_regs(dwc)->name)) |
| 161 | 165 | ||
| 162 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) | 166 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) |
| 163 | { | 167 | { |
| @@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) | |||
| 181 | } | 185 | } |
| 182 | 186 | ||
| 183 | #define dma_readl(dw, name) \ | 187 | #define dma_readl(dw, name) \ |
| 184 | __raw_readl(&(__dw_regs(dw)->name)) | 188 | readl(&(__dw_regs(dw)->name)) |
| 185 | #define dma_writel(dw, name, val) \ | 189 | #define dma_writel(dw, name, val) \ |
| 186 | __raw_writel((val), &(__dw_regs(dw)->name)) | 190 | writel((val), &(__dw_regs(dw)->name)) |
| 187 | 191 | ||
| 188 | #define channel_set_bit(dw, reg, mask) \ | 192 | #define channel_set_bit(dw, reg, mask) \ |
| 189 | dma_writel(dw, reg, ((mask) << 8) | (mask)) | 193 | dma_writel(dw, reg, ((mask) << 8) | (mask)) |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c new file mode 100644 index 000000000000..88aad4f54002 --- /dev/null +++ b/drivers/dma/mxs-dma.c | |||
| @@ -0,0 +1,724 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. | ||
| 3 | * | ||
| 4 | * Refer to drivers/dma/imx-sdma.c | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/init.h> | ||
| 12 | #include <linux/types.h> | ||
| 13 | #include <linux/mm.h> | ||
| 14 | #include <linux/interrupt.h> | ||
| 15 | #include <linux/clk.h> | ||
| 16 | #include <linux/wait.h> | ||
| 17 | #include <linux/sched.h> | ||
| 18 | #include <linux/semaphore.h> | ||
| 19 | #include <linux/device.h> | ||
| 20 | #include <linux/dma-mapping.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/dmaengine.h> | ||
| 24 | #include <linux/delay.h> | ||
| 25 | |||
| 26 | #include <asm/irq.h> | ||
| 27 | #include <mach/mxs.h> | ||
| 28 | #include <mach/dma.h> | ||
| 29 | #include <mach/common.h> | ||
| 30 | |||
| 31 | /* | ||
| 32 | * NOTE: The term "PIO" throughout the mxs-dma implementation means | ||
| 33 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, | ||
| 34 | * dma can program the controller registers of peripheral devices. | ||
| 35 | */ | ||
| 36 | |||
| 37 | #define MXS_DMA_APBH 0 | ||
| 38 | #define MXS_DMA_APBX 1 | ||
| 39 | #define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) | ||
| 40 | |||
| 41 | #define APBH_VERSION_LATEST 3 | ||
| 42 | #define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) | ||
| 43 | |||
| 44 | #define HW_APBHX_CTRL0 0x000 | ||
| 45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | ||
| 46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | ||
| 47 | #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 | ||
| 48 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | ||
| 49 | #define HW_APBHX_CTRL1 0x010 | ||
| 50 | #define HW_APBHX_CTRL2 0x020 | ||
| 51 | #define HW_APBHX_CHANNEL_CTRL 0x030 | ||
| 52 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 | ||
| 53 | #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) | ||
| 54 | #define HW_APBX_VERSION 0x800 | ||
| 55 | #define BP_APBHX_VERSION_MAJOR 24 | ||
| 56 | #define HW_APBHX_CHn_NXTCMDAR(n) \ | ||
| 57 | (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) | ||
| 58 | #define HW_APBHX_CHn_SEMA(n) \ | ||
| 59 | (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) | ||
| 60 | |||
| 61 | /* | ||
| 62 | * ccw bits definitions | ||
| 63 | * | ||
| 64 | * COMMAND: 0..1 (2) | ||
| 65 | * CHAIN: 2 (1) | ||
| 66 | * IRQ: 3 (1) | ||
| 67 | * NAND_LOCK: 4 (1) - not implemented | ||
| 68 | * NAND_WAIT4READY: 5 (1) - not implemented | ||
| 69 | * DEC_SEM: 6 (1) | ||
| 70 | * WAIT4END: 7 (1) | ||
| 71 | * HALT_ON_TERMINATE: 8 (1) | ||
| 72 | * TERMINATE_FLUSH: 9 (1) | ||
| 73 | * RESERVED: 10..11 (2) | ||
| 74 | * PIO_NUM: 12..15 (4) | ||
| 75 | */ | ||
| 76 | #define BP_CCW_COMMAND 0 | ||
| 77 | #define BM_CCW_COMMAND (3 << 0) | ||
| 78 | #define CCW_CHAIN (1 << 2) | ||
| 79 | #define CCW_IRQ (1 << 3) | ||
| 80 | #define CCW_DEC_SEM (1 << 6) | ||
| 81 | #define CCW_WAIT4END (1 << 7) | ||
| 82 | #define CCW_HALT_ON_TERM (1 << 8) | ||
| 83 | #define CCW_TERM_FLUSH (1 << 9) | ||
| 84 | #define BP_CCW_PIO_NUM 12 | ||
| 85 | #define BM_CCW_PIO_NUM (0xf << 12) | ||
| 86 | |||
| 87 | #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) | ||
| 88 | |||
| 89 | #define MXS_DMA_CMD_NO_XFER 0 | ||
| 90 | #define MXS_DMA_CMD_WRITE 1 | ||
| 91 | #define MXS_DMA_CMD_READ 2 | ||
| 92 | #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ | ||
| 93 | |||
| 94 | struct mxs_dma_ccw { | ||
| 95 | u32 next; | ||
| 96 | u16 bits; | ||
| 97 | u16 xfer_bytes; | ||
| 98 | #define MAX_XFER_BYTES 0xff00 | ||
| 99 | u32 bufaddr; | ||
| 100 | #define MXS_PIO_WORDS 16 | ||
| 101 | u32 pio_words[MXS_PIO_WORDS]; | ||
| 102 | }; | ||
| 103 | |||
| 104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) | ||
| 105 | |||
| 106 | struct mxs_dma_chan { | ||
| 107 | struct mxs_dma_engine *mxs_dma; | ||
| 108 | struct dma_chan chan; | ||
| 109 | struct dma_async_tx_descriptor desc; | ||
| 110 | struct tasklet_struct tasklet; | ||
| 111 | int chan_irq; | ||
| 112 | struct mxs_dma_ccw *ccw; | ||
| 113 | dma_addr_t ccw_phys; | ||
| 114 | dma_cookie_t last_completed; | ||
| 115 | enum dma_status status; | ||
| 116 | unsigned int flags; | ||
| 117 | #define MXS_DMA_SG_LOOP (1 << 0) | ||
| 118 | }; | ||
| 119 | |||
| 120 | #define MXS_DMA_CHANNELS 16 | ||
| 121 | #define MXS_DMA_CHANNELS_MASK 0xffff | ||
| 122 | |||
| 123 | struct mxs_dma_engine { | ||
| 124 | int dev_id; | ||
| 125 | unsigned int version; | ||
| 126 | void __iomem *base; | ||
| 127 | struct clk *clk; | ||
| 128 | struct dma_device dma_device; | ||
| 129 | struct device_dma_parameters dma_parms; | ||
| 130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | ||
| 131 | }; | ||
| 132 | |||
| 133 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | ||
| 134 | { | ||
| 135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 136 | int chan_id = mxs_chan->chan.chan_id; | ||
| 137 | |||
| 138 | if (dma_is_apbh() && apbh_is_old()) | ||
| 139 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), | ||
| 140 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
| 141 | else | ||
| 142 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), | ||
| 143 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); | ||
| 144 | } | ||
| 145 | |||
| 146 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | ||
| 147 | { | ||
| 148 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 149 | int chan_id = mxs_chan->chan.chan_id; | ||
| 150 | |||
| 151 | /* set cmd_addr up */ | ||
| 152 | writel(mxs_chan->ccw_phys, | ||
| 153 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); | ||
| 154 | |||
| 155 | /* enable apbh channel clock */ | ||
| 156 | if (dma_is_apbh()) { | ||
| 157 | if (apbh_is_old()) | ||
| 158 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
| 159 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
| 160 | else | ||
| 161 | writel(1 << chan_id, | ||
| 162 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
| 163 | } | ||
| 164 | |||
| 165 | /* write 1 to SEMA to kick off the channel */ | ||
| 166 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); | ||
| 167 | } | ||
| 168 | |||
| 169 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | ||
| 170 | { | ||
| 171 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 172 | int chan_id = mxs_chan->chan.chan_id; | ||
| 173 | |||
| 174 | /* disable apbh channel clock */ | ||
| 175 | if (dma_is_apbh()) { | ||
| 176 | if (apbh_is_old()) | ||
| 177 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
| 178 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
| 179 | else | ||
| 180 | writel(1 << chan_id, | ||
| 181 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
| 182 | } | ||
| 183 | |||
| 184 | mxs_chan->status = DMA_SUCCESS; | ||
| 185 | } | ||
| 186 | |||
| 187 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | ||
| 188 | { | ||
| 189 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 190 | int chan_id = mxs_chan->chan.chan_id; | ||
| 191 | |||
| 192 | /* freeze the channel */ | ||
| 193 | if (dma_is_apbh() && apbh_is_old()) | ||
| 194 | writel(1 << chan_id, | ||
| 195 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
| 196 | else | ||
| 197 | writel(1 << chan_id, | ||
| 198 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); | ||
| 199 | |||
| 200 | mxs_chan->status = DMA_PAUSED; | ||
| 201 | } | ||
| 202 | |||
| 203 | static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | ||
| 204 | { | ||
| 205 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 206 | int chan_id = mxs_chan->chan.chan_id; | ||
| 207 | |||
| 208 | /* unfreeze the channel */ | ||
| 209 | if (dma_is_apbh() && apbh_is_old()) | ||
| 210 | writel(1 << chan_id, | ||
| 211 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
| 212 | else | ||
| 213 | writel(1 << chan_id, | ||
| 214 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); | ||
| 215 | |||
| 216 | mxs_chan->status = DMA_IN_PROGRESS; | ||
| 217 | } | ||
| 218 | |||
| 219 | static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) | ||
| 220 | { | ||
| 221 | dma_cookie_t cookie = mxs_chan->chan.cookie; | ||
| 222 | |||
| 223 | if (++cookie < 0) | ||
| 224 | cookie = 1; | ||
| 225 | |||
| 226 | mxs_chan->chan.cookie = cookie; | ||
| 227 | mxs_chan->desc.cookie = cookie; | ||
| 228 | |||
| 229 | return cookie; | ||
| 230 | } | ||
| 231 | |||
| 232 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | ||
| 233 | { | ||
| 234 | return container_of(chan, struct mxs_dma_chan, chan); | ||
| 235 | } | ||
| 236 | |||
| 237 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 238 | { | ||
| 239 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); | ||
| 240 | |||
| 241 | mxs_dma_enable_chan(mxs_chan); | ||
| 242 | |||
| 243 | return mxs_dma_assign_cookie(mxs_chan); | ||
| 244 | } | ||
| 245 | |||
| 246 | static void mxs_dma_tasklet(unsigned long data) | ||
| 247 | { | ||
| 248 | struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; | ||
| 249 | |||
| 250 | if (mxs_chan->desc.callback) | ||
| 251 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); | ||
| 252 | } | ||
| 253 | |||
| 254 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | ||
| 255 | { | ||
| 256 | struct mxs_dma_engine *mxs_dma = dev_id; | ||
| 257 | u32 stat1, stat2; | ||
| 258 | |||
| 259 | /* completion status */ | ||
| 260 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); | ||
| 261 | stat1 &= MXS_DMA_CHANNELS_MASK; | ||
| 262 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); | ||
| 263 | |||
| 264 | /* error status */ | ||
| 265 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); | ||
| 266 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); | ||
| 267 | |||
| 268 | /* | ||
| 269 | * When both completion and error of termination bits set at the | ||
| 270 | * same time, we do not take it as an error. IOW, it only becomes | ||
| 271 | * an error we need to handler here in case of ether it's (1) an bus | ||
| 272 | * error or (2) a termination error with no completion. | ||
| 273 | */ | ||
| 274 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | ||
| 275 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ | ||
| 276 | |||
| 277 | /* combine error and completion status for checking */ | ||
| 278 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; | ||
| 279 | while (stat1) { | ||
| 280 | int channel = fls(stat1) - 1; | ||
| 281 | struct mxs_dma_chan *mxs_chan = | ||
| 282 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; | ||
| 283 | |||
| 284 | if (channel >= MXS_DMA_CHANNELS) { | ||
| 285 | dev_dbg(mxs_dma->dma_device.dev, | ||
| 286 | "%s: error in channel %d\n", __func__, | ||
| 287 | channel - MXS_DMA_CHANNELS); | ||
| 288 | mxs_chan->status = DMA_ERROR; | ||
| 289 | mxs_dma_reset_chan(mxs_chan); | ||
| 290 | } else { | ||
| 291 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) | ||
| 292 | mxs_chan->status = DMA_IN_PROGRESS; | ||
| 293 | else | ||
| 294 | mxs_chan->status = DMA_SUCCESS; | ||
| 295 | } | ||
| 296 | |||
| 297 | stat1 &= ~(1 << channel); | ||
| 298 | |||
| 299 | if (mxs_chan->status == DMA_SUCCESS) | ||
| 300 | mxs_chan->last_completed = mxs_chan->desc.cookie; | ||
| 301 | |||
| 302 | /* schedule tasklet on this channel */ | ||
| 303 | tasklet_schedule(&mxs_chan->tasklet); | ||
| 304 | } | ||
| 305 | |||
| 306 | return IRQ_HANDLED; | ||
| 307 | } | ||
| 308 | |||
| 309 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 310 | { | ||
| 311 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
| 312 | struct mxs_dma_data *data = chan->private; | ||
| 313 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 314 | int ret; | ||
| 315 | |||
| 316 | if (!data) | ||
| 317 | return -EINVAL; | ||
| 318 | |||
| 319 | mxs_chan->chan_irq = data->chan_irq; | ||
| 320 | |||
| 321 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
| 322 | &mxs_chan->ccw_phys, GFP_KERNEL); | ||
| 323 | if (!mxs_chan->ccw) { | ||
| 324 | ret = -ENOMEM; | ||
| 325 | goto err_alloc; | ||
| 326 | } | ||
| 327 | |||
| 328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | ||
| 329 | |||
| 330 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | ||
| 331 | 0, "mxs-dma", mxs_dma); | ||
| 332 | if (ret) | ||
| 333 | goto err_irq; | ||
| 334 | |||
| 335 | ret = clk_enable(mxs_dma->clk); | ||
| 336 | if (ret) | ||
| 337 | goto err_clk; | ||
| 338 | |||
| 339 | mxs_dma_reset_chan(mxs_chan); | ||
| 340 | |||
| 341 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | ||
| 342 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | ||
| 343 | |||
| 344 | /* the descriptor is ready */ | ||
| 345 | async_tx_ack(&mxs_chan->desc); | ||
| 346 | |||
| 347 | return 0; | ||
| 348 | |||
| 349 | err_clk: | ||
| 350 | free_irq(mxs_chan->chan_irq, mxs_dma); | ||
| 351 | err_irq: | ||
| 352 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
| 353 | mxs_chan->ccw, mxs_chan->ccw_phys); | ||
| 354 | err_alloc: | ||
| 355 | return ret; | ||
| 356 | } | ||
| 357 | |||
| 358 | static void mxs_dma_free_chan_resources(struct dma_chan *chan) | ||
| 359 | { | ||
| 360 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
| 361 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 362 | |||
| 363 | mxs_dma_disable_chan(mxs_chan); | ||
| 364 | |||
| 365 | free_irq(mxs_chan->chan_irq, mxs_dma); | ||
| 366 | |||
| 367 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
| 368 | mxs_chan->ccw, mxs_chan->ccw_phys); | ||
| 369 | |||
| 370 | clk_disable(mxs_dma->clk); | ||
| 371 | } | ||
| 372 | |||
| 373 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | ||
| 374 | struct dma_chan *chan, struct scatterlist *sgl, | ||
| 375 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 376 | unsigned long append) | ||
| 377 | { | ||
| 378 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
| 379 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 380 | struct mxs_dma_ccw *ccw; | ||
| 381 | struct scatterlist *sg; | ||
| 382 | int i, j; | ||
| 383 | u32 *pio; | ||
| 384 | static int idx; | ||
| 385 | |||
| 386 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | ||
| 387 | return NULL; | ||
| 388 | |||
| 389 | if (sg_len + (append ? idx : 0) > NUM_CCW) { | ||
| 390 | dev_err(mxs_dma->dma_device.dev, | ||
| 391 | "maximum number of sg exceeded: %d > %d\n", | ||
| 392 | sg_len, NUM_CCW); | ||
| 393 | goto err_out; | ||
| 394 | } | ||
| 395 | |||
| 396 | mxs_chan->status = DMA_IN_PROGRESS; | ||
| 397 | mxs_chan->flags = 0; | ||
| 398 | |||
| 399 | /* | ||
| 400 | * If the sg is prepared with append flag set, the sg | ||
| 401 | * will be appended to the last prepared sg. | ||
| 402 | */ | ||
| 403 | if (append) { | ||
| 404 | BUG_ON(idx < 1); | ||
| 405 | ccw = &mxs_chan->ccw[idx - 1]; | ||
| 406 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | ||
| 407 | ccw->bits |= CCW_CHAIN; | ||
| 408 | ccw->bits &= ~CCW_IRQ; | ||
| 409 | ccw->bits &= ~CCW_DEC_SEM; | ||
| 410 | ccw->bits &= ~CCW_WAIT4END; | ||
| 411 | } else { | ||
| 412 | idx = 0; | ||
| 413 | } | ||
| 414 | |||
| 415 | if (direction == DMA_NONE) { | ||
| 416 | ccw = &mxs_chan->ccw[idx++]; | ||
| 417 | pio = (u32 *) sgl; | ||
| 418 | |||
| 419 | for (j = 0; j < sg_len;) | ||
| 420 | ccw->pio_words[j++] = *pio++; | ||
| 421 | |||
| 422 | ccw->bits = 0; | ||
| 423 | ccw->bits |= CCW_IRQ; | ||
| 424 | ccw->bits |= CCW_DEC_SEM; | ||
| 425 | ccw->bits |= CCW_WAIT4END; | ||
| 426 | ccw->bits |= CCW_HALT_ON_TERM; | ||
| 427 | ccw->bits |= CCW_TERM_FLUSH; | ||
| 428 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); | ||
| 429 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); | ||
| 430 | } else { | ||
| 431 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 432 | if (sg->length > MAX_XFER_BYTES) { | ||
| 433 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", | ||
| 434 | sg->length, MAX_XFER_BYTES); | ||
| 435 | goto err_out; | ||
| 436 | } | ||
| 437 | |||
| 438 | ccw = &mxs_chan->ccw[idx++]; | ||
| 439 | |||
| 440 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | ||
| 441 | ccw->bufaddr = sg->dma_address; | ||
| 442 | ccw->xfer_bytes = sg->length; | ||
| 443 | |||
| 444 | ccw->bits = 0; | ||
| 445 | ccw->bits |= CCW_CHAIN; | ||
| 446 | ccw->bits |= CCW_HALT_ON_TERM; | ||
| 447 | ccw->bits |= CCW_TERM_FLUSH; | ||
| 448 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | ||
| 449 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | ||
| 450 | COMMAND); | ||
| 451 | |||
| 452 | if (i + 1 == sg_len) { | ||
| 453 | ccw->bits &= ~CCW_CHAIN; | ||
| 454 | ccw->bits |= CCW_IRQ; | ||
| 455 | ccw->bits |= CCW_DEC_SEM; | ||
| 456 | ccw->bits |= CCW_WAIT4END; | ||
| 457 | } | ||
| 458 | } | ||
| 459 | } | ||
| 460 | |||
| 461 | return &mxs_chan->desc; | ||
| 462 | |||
| 463 | err_out: | ||
| 464 | mxs_chan->status = DMA_ERROR; | ||
| 465 | return NULL; | ||
| 466 | } | ||
| 467 | |||
| 468 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | ||
| 469 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
| 470 | size_t period_len, enum dma_data_direction direction) | ||
| 471 | { | ||
| 472 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
| 473 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
| 474 | int num_periods = buf_len / period_len; | ||
| 475 | int i = 0, buf = 0; | ||
| 476 | |||
| 477 | if (mxs_chan->status == DMA_IN_PROGRESS) | ||
| 478 | return NULL; | ||
| 479 | |||
| 480 | mxs_chan->status = DMA_IN_PROGRESS; | ||
| 481 | mxs_chan->flags |= MXS_DMA_SG_LOOP; | ||
| 482 | |||
| 483 | if (num_periods > NUM_CCW) { | ||
| 484 | dev_err(mxs_dma->dma_device.dev, | ||
| 485 | "maximum number of sg exceeded: %d > %d\n", | ||
| 486 | num_periods, NUM_CCW); | ||
| 487 | goto err_out; | ||
| 488 | } | ||
| 489 | |||
| 490 | if (period_len > MAX_XFER_BYTES) { | ||
| 491 | dev_err(mxs_dma->dma_device.dev, | ||
| 492 | "maximum period size exceeded: %d > %d\n", | ||
| 493 | period_len, MAX_XFER_BYTES); | ||
| 494 | goto err_out; | ||
| 495 | } | ||
| 496 | |||
| 497 | while (buf < buf_len) { | ||
| 498 | struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; | ||
| 499 | |||
| 500 | if (i + 1 == num_periods) | ||
| 501 | ccw->next = mxs_chan->ccw_phys; | ||
| 502 | else | ||
| 503 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); | ||
| 504 | |||
| 505 | ccw->bufaddr = dma_addr; | ||
| 506 | ccw->xfer_bytes = period_len; | ||
| 507 | |||
| 508 | ccw->bits = 0; | ||
| 509 | ccw->bits |= CCW_CHAIN; | ||
| 510 | ccw->bits |= CCW_IRQ; | ||
| 511 | ccw->bits |= CCW_HALT_ON_TERM; | ||
| 512 | ccw->bits |= CCW_TERM_FLUSH; | ||
| 513 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | ||
| 514 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | ||
| 515 | |||
| 516 | dma_addr += period_len; | ||
| 517 | buf += period_len; | ||
| 518 | |||
| 519 | i++; | ||
| 520 | } | ||
| 521 | |||
| 522 | return &mxs_chan->desc; | ||
| 523 | |||
| 524 | err_out: | ||
| 525 | mxs_chan->status = DMA_ERROR; | ||
| 526 | return NULL; | ||
| 527 | } | ||
| 528 | |||
| 529 | static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
| 530 | unsigned long arg) | ||
| 531 | { | ||
| 532 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
| 533 | int ret = 0; | ||
| 534 | |||
| 535 | switch (cmd) { | ||
| 536 | case DMA_TERMINATE_ALL: | ||
| 537 | mxs_dma_disable_chan(mxs_chan); | ||
| 538 | break; | ||
| 539 | case DMA_PAUSE: | ||
| 540 | mxs_dma_pause_chan(mxs_chan); | ||
| 541 | break; | ||
| 542 | case DMA_RESUME: | ||
| 543 | mxs_dma_resume_chan(mxs_chan); | ||
| 544 | break; | ||
| 545 | default: | ||
| 546 | ret = -ENOSYS; | ||
| 547 | } | ||
| 548 | |||
| 549 | return ret; | ||
| 550 | } | ||
| 551 | |||
| 552 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | ||
| 553 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
| 554 | { | ||
| 555 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
| 556 | dma_cookie_t last_used; | ||
| 557 | |||
| 558 | last_used = chan->cookie; | ||
| 559 | dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); | ||
| 560 | |||
| 561 | return mxs_chan->status; | ||
| 562 | } | ||
| 563 | |||
| 564 | static void mxs_dma_issue_pending(struct dma_chan *chan) | ||
| 565 | { | ||
| 566 | /* | ||
| 567 | * Nothing to do. We only have a single descriptor. | ||
| 568 | */ | ||
| 569 | } | ||
| 570 | |||
| 571 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | ||
| 572 | { | ||
| 573 | int ret; | ||
| 574 | |||
| 575 | ret = clk_enable(mxs_dma->clk); | ||
| 576 | if (ret) | ||
| 577 | goto err_out; | ||
| 578 | |||
| 579 | ret = mxs_reset_block(mxs_dma->base); | ||
| 580 | if (ret) | ||
| 581 | goto err_out; | ||
| 582 | |||
| 583 | /* only major version matters */ | ||
| 584 | mxs_dma->version = readl(mxs_dma->base + | ||
| 585 | ((mxs_dma->dev_id == MXS_DMA_APBX) ? | ||
| 586 | HW_APBX_VERSION : HW_APBH_VERSION)) >> | ||
| 587 | BP_APBHX_VERSION_MAJOR; | ||
| 588 | |||
| 589 | /* enable apbh burst */ | ||
| 590 | if (dma_is_apbh()) { | ||
| 591 | writel(BM_APBH_CTRL0_APB_BURST_EN, | ||
| 592 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
| 593 | writel(BM_APBH_CTRL0_APB_BURST8_EN, | ||
| 594 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
| 595 | } | ||
| 596 | |||
| 597 | /* enable irq for all the channels */ | ||
| 598 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | ||
| 599 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); | ||
| 600 | |||
| 601 | clk_disable(mxs_dma->clk); | ||
| 602 | |||
| 603 | return 0; | ||
| 604 | |||
| 605 | err_out: | ||
| 606 | return ret; | ||
| 607 | } | ||
| 608 | |||
| 609 | static int __init mxs_dma_probe(struct platform_device *pdev) | ||
| 610 | { | ||
| 611 | const struct platform_device_id *id_entry = | ||
| 612 | platform_get_device_id(pdev); | ||
| 613 | struct mxs_dma_engine *mxs_dma; | ||
| 614 | struct resource *iores; | ||
| 615 | int ret, i; | ||
| 616 | |||
| 617 | mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); | ||
| 618 | if (!mxs_dma) | ||
| 619 | return -ENOMEM; | ||
| 620 | |||
| 621 | mxs_dma->dev_id = id_entry->driver_data; | ||
| 622 | |||
| 623 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 624 | |||
| 625 | if (!request_mem_region(iores->start, resource_size(iores), | ||
| 626 | pdev->name)) { | ||
| 627 | ret = -EBUSY; | ||
| 628 | goto err_request_region; | ||
| 629 | } | ||
| 630 | |||
| 631 | mxs_dma->base = ioremap(iores->start, resource_size(iores)); | ||
| 632 | if (!mxs_dma->base) { | ||
| 633 | ret = -ENOMEM; | ||
| 634 | goto err_ioremap; | ||
| 635 | } | ||
| 636 | |||
| 637 | mxs_dma->clk = clk_get(&pdev->dev, NULL); | ||
| 638 | if (IS_ERR(mxs_dma->clk)) { | ||
| 639 | ret = PTR_ERR(mxs_dma->clk); | ||
| 640 | goto err_clk; | ||
| 641 | } | ||
| 642 | |||
| 643 | dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); | ||
| 644 | dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); | ||
| 645 | |||
| 646 | INIT_LIST_HEAD(&mxs_dma->dma_device.channels); | ||
| 647 | |||
| 648 | /* Initialize channel parameters */ | ||
| 649 | for (i = 0; i < MXS_DMA_CHANNELS; i++) { | ||
| 650 | struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; | ||
| 651 | |||
| 652 | mxs_chan->mxs_dma = mxs_dma; | ||
| 653 | mxs_chan->chan.device = &mxs_dma->dma_device; | ||
| 654 | |||
| 655 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, | ||
| 656 | (unsigned long) mxs_chan); | ||
| 657 | |||
| 658 | |||
| 659 | /* Add the channel to mxs_chan list */ | ||
| 660 | list_add_tail(&mxs_chan->chan.device_node, | ||
| 661 | &mxs_dma->dma_device.channels); | ||
| 662 | } | ||
| 663 | |||
| 664 | ret = mxs_dma_init(mxs_dma); | ||
| 665 | if (ret) | ||
| 666 | goto err_init; | ||
| 667 | |||
| 668 | mxs_dma->dma_device.dev = &pdev->dev; | ||
| 669 | |||
| 670 | /* mxs_dma gets 65535 bytes maximum sg size */ | ||
| 671 | mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; | ||
| 672 | dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); | ||
| 673 | |||
| 674 | mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; | ||
| 675 | mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; | ||
| 676 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; | ||
| 677 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; | ||
| 678 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; | ||
| 679 | mxs_dma->dma_device.device_control = mxs_dma_control; | ||
| 680 | mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; | ||
| 681 | |||
| 682 | ret = dma_async_device_register(&mxs_dma->dma_device); | ||
| 683 | if (ret) { | ||
| 684 | dev_err(mxs_dma->dma_device.dev, "unable to register\n"); | ||
| 685 | goto err_init; | ||
| 686 | } | ||
| 687 | |||
| 688 | dev_info(mxs_dma->dma_device.dev, "initialized\n"); | ||
| 689 | |||
| 690 | return 0; | ||
| 691 | |||
| 692 | err_init: | ||
| 693 | clk_put(mxs_dma->clk); | ||
| 694 | err_clk: | ||
| 695 | iounmap(mxs_dma->base); | ||
| 696 | err_ioremap: | ||
| 697 | release_mem_region(iores->start, resource_size(iores)); | ||
| 698 | err_request_region: | ||
| 699 | kfree(mxs_dma); | ||
| 700 | return ret; | ||
| 701 | } | ||
| 702 | |||
| 703 | static struct platform_device_id mxs_dma_type[] = { | ||
| 704 | { | ||
| 705 | .name = "mxs-dma-apbh", | ||
| 706 | .driver_data = MXS_DMA_APBH, | ||
| 707 | }, { | ||
| 708 | .name = "mxs-dma-apbx", | ||
| 709 | .driver_data = MXS_DMA_APBX, | ||
| 710 | } | ||
| 711 | }; | ||
| 712 | |||
| 713 | static struct platform_driver mxs_dma_driver = { | ||
| 714 | .driver = { | ||
| 715 | .name = "mxs-dma", | ||
| 716 | }, | ||
| 717 | .id_table = mxs_dma_type, | ||
| 718 | }; | ||
| 719 | |||
| 720 | static int __init mxs_dma_module_init(void) | ||
| 721 | { | ||
| 722 | return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); | ||
| 723 | } | ||
| 724 | subsys_initcall(mxs_dma_module_init); | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1c38418ae61f..8d8fef1480a9 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
| @@ -82,7 +82,7 @@ struct pch_dma_regs { | |||
| 82 | u32 dma_sts1; | 82 | u32 dma_sts1; |
| 83 | u32 reserved2; | 83 | u32 reserved2; |
| 84 | u32 reserved3; | 84 | u32 reserved3; |
| 85 | struct pch_dma_desc_regs desc[0]; | 85 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | struct pch_dma_desc { | 88 | struct pch_dma_desc { |
| @@ -124,7 +124,7 @@ struct pch_dma { | |||
| 124 | struct pci_pool *pool; | 124 | struct pci_pool *pool; |
| 125 | struct pch_dma_regs regs; | 125 | struct pch_dma_regs regs; |
| 126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | 126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; |
| 127 | struct pch_dma_chan channels[0]; | 127 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
| 128 | }; | 128 | }; |
| 129 | 129 | ||
| 130 | #define PCH_DMA_CTL0 0x00 | 130 | #define PCH_DMA_CTL0 0x00 |
| @@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
| 366 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | 366 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); |
| 367 | dma_cookie_t cookie; | 367 | dma_cookie_t cookie; |
| 368 | 368 | ||
| 369 | spin_lock_bh(&pd_chan->lock); | 369 | spin_lock(&pd_chan->lock); |
| 370 | cookie = pdc_assign_cookie(pd_chan, desc); | 370 | cookie = pdc_assign_cookie(pd_chan, desc); |
| 371 | 371 | ||
| 372 | if (list_empty(&pd_chan->active_list)) { | 372 | if (list_empty(&pd_chan->active_list)) { |
| @@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
| 376 | list_add_tail(&desc->desc_node, &pd_chan->queue); | 376 | list_add_tail(&desc->desc_node, &pd_chan->queue); |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | spin_unlock_bh(&pd_chan->lock); | 379 | spin_unlock(&pd_chan->lock); |
| 380 | return 0; | 380 | return 0; |
| 381 | } | 381 | } |
| 382 | 382 | ||
| @@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |||
| 386 | struct pch_dma *pd = to_pd(chan->device); | 386 | struct pch_dma *pd = to_pd(chan->device); |
| 387 | dma_addr_t addr; | 387 | dma_addr_t addr; |
| 388 | 388 | ||
| 389 | desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); | 389 | desc = pci_pool_alloc(pd->pool, flags, &addr); |
| 390 | if (desc) { | 390 | if (desc) { |
| 391 | memset(desc, 0, sizeof(struct pch_dma_desc)); | 391 | memset(desc, 0, sizeof(struct pch_dma_desc)); |
| 392 | INIT_LIST_HEAD(&desc->tx_list); | 392 | INIT_LIST_HEAD(&desc->tx_list); |
| @@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
| 405 | struct pch_dma_desc *ret = NULL; | 405 | struct pch_dma_desc *ret = NULL; |
| 406 | int i; | 406 | int i; |
| 407 | 407 | ||
| 408 | spin_lock_bh(&pd_chan->lock); | 408 | spin_lock(&pd_chan->lock); |
| 409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | 409 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
| 410 | i++; | 410 | i++; |
| 411 | if (async_tx_test_ack(&desc->txd)) { | 411 | if (async_tx_test_ack(&desc->txd)) { |
| @@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
| 415 | } | 415 | } |
| 416 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | 416 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); |
| 417 | } | 417 | } |
| 418 | spin_unlock_bh(&pd_chan->lock); | 418 | spin_unlock(&pd_chan->lock); |
| 419 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | 419 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); |
| 420 | 420 | ||
| 421 | if (!ret) { | 421 | if (!ret) { |
| 422 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | 422 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); |
| 423 | if (ret) { | 423 | if (ret) { |
| 424 | spin_lock_bh(&pd_chan->lock); | 424 | spin_lock(&pd_chan->lock); |
| 425 | pd_chan->descs_allocated++; | 425 | pd_chan->descs_allocated++; |
| 426 | spin_unlock_bh(&pd_chan->lock); | 426 | spin_unlock(&pd_chan->lock); |
| 427 | } else { | 427 | } else { |
| 428 | dev_err(chan2dev(&pd_chan->chan), | 428 | dev_err(chan2dev(&pd_chan->chan), |
| 429 | "failed to alloc desc\n"); | 429 | "failed to alloc desc\n"); |
| @@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan, | |||
| 437 | struct pch_dma_desc *desc) | 437 | struct pch_dma_desc *desc) |
| 438 | { | 438 | { |
| 439 | if (desc) { | 439 | if (desc) { |
| 440 | spin_lock_bh(&pd_chan->lock); | 440 | spin_lock(&pd_chan->lock); |
| 441 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | 441 | list_splice_init(&desc->tx_list, &pd_chan->free_list); |
| 442 | list_add(&desc->desc_node, &pd_chan->free_list); | 442 | list_add(&desc->desc_node, &pd_chan->free_list); |
| 443 | spin_unlock_bh(&pd_chan->lock); | 443 | spin_unlock(&pd_chan->lock); |
| 444 | } | 444 | } |
| 445 | } | 445 | } |
| 446 | 446 | ||
| @@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
| 530 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 530 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 531 | 531 | ||
| 532 | if (pdc_is_idle(pd_chan)) { | 532 | if (pdc_is_idle(pd_chan)) { |
| 533 | spin_lock_bh(&pd_chan->lock); | 533 | spin_lock(&pd_chan->lock); |
| 534 | pdc_advance_work(pd_chan); | 534 | pdc_advance_work(pd_chan); |
| 535 | spin_unlock_bh(&pd_chan->lock); | 535 | spin_unlock(&pd_chan->lock); |
| 536 | } | 536 | } |
| 537 | } | 537 | } |
| 538 | 538 | ||
| @@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
| 592 | goto err_desc_get; | 592 | goto err_desc_get; |
| 593 | } | 593 | } |
| 594 | 594 | ||
| 595 | |||
| 596 | if (!first) { | 595 | if (!first) { |
| 597 | first = desc; | 596 | first = desc; |
| 598 | } else { | 597 | } else { |
| @@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
| 641 | 640 | ||
| 642 | spin_unlock_bh(&pd_chan->lock); | 641 | spin_unlock_bh(&pd_chan->lock); |
| 643 | 642 | ||
| 644 | |||
| 645 | return 0; | 643 | return 0; |
| 646 | } | 644 | } |
| 647 | 645 | ||
| 648 | static void pdc_tasklet(unsigned long data) | 646 | static void pdc_tasklet(unsigned long data) |
| 649 | { | 647 | { |
| 650 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | 648 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; |
| 649 | unsigned long flags; | ||
| 651 | 650 | ||
| 652 | if (!pdc_is_idle(pd_chan)) { | 651 | if (!pdc_is_idle(pd_chan)) { |
| 653 | dev_err(chan2dev(&pd_chan->chan), | 652 | dev_err(chan2dev(&pd_chan->chan), |
| @@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data) | |||
| 655 | return; | 654 | return; |
| 656 | } | 655 | } |
| 657 | 656 | ||
| 658 | spin_lock_bh(&pd_chan->lock); | 657 | spin_lock_irqsave(&pd_chan->lock, flags); |
| 659 | if (test_and_clear_bit(0, &pd_chan->err_status)) | 658 | if (test_and_clear_bit(0, &pd_chan->err_status)) |
| 660 | pdc_handle_error(pd_chan); | 659 | pdc_handle_error(pd_chan); |
| 661 | else | 660 | else |
| 662 | pdc_advance_work(pd_chan); | 661 | pdc_advance_work(pd_chan); |
| 663 | spin_unlock_bh(&pd_chan->lock); | 662 | spin_unlock_irqrestore(&pd_chan->lock, flags); |
| 664 | } | 663 | } |
| 665 | 664 | ||
| 666 | static irqreturn_t pd_irq(int irq, void *devid) | 665 | static irqreturn_t pd_irq(int irq, void *devid) |
| @@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid) | |||
| 694 | return ret; | 693 | return ret; |
| 695 | } | 694 | } |
| 696 | 695 | ||
| 696 | #ifdef CONFIG_PM | ||
| 697 | static void pch_dma_save_regs(struct pch_dma *pd) | 697 | static void pch_dma_save_regs(struct pch_dma *pd) |
| 698 | { | 698 | { |
| 699 | struct pch_dma_chan *pd_chan; | 699 | struct pch_dma_chan *pd_chan; |
| @@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev) | |||
| 771 | 771 | ||
| 772 | return 0; | 772 | return 0; |
| 773 | } | 773 | } |
| 774 | #endif | ||
| 774 | 775 | ||
| 775 | static int __devinit pch_dma_probe(struct pci_dev *pdev, | 776 | static int __devinit pch_dma_probe(struct pci_dev *pdev, |
| 776 | const struct pci_device_id *id) | 777 | const struct pci_device_id *id) |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index deec66b37180..6998d9376ef9 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
| @@ -22,6 +22,12 @@ | |||
| 22 | struct dw_dma_platform_data { | 22 | struct dw_dma_platform_data { |
| 23 | unsigned int nr_channels; | 23 | unsigned int nr_channels; |
| 24 | bool is_private; | 24 | bool is_private; |
| 25 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | ||
| 26 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | ||
| 27 | unsigned char chan_allocation_order; | ||
| 28 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | ||
| 29 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | ||
| 30 | unsigned char chan_priority; | ||
| 25 | }; | 31 | }; |
| 26 | 32 | ||
| 27 | /** | 33 | /** |
| @@ -36,6 +42,30 @@ enum dw_dma_slave_width { | |||
| 36 | DW_DMA_SLAVE_WIDTH_32BIT, | 42 | DW_DMA_SLAVE_WIDTH_32BIT, |
| 37 | }; | 43 | }; |
| 38 | 44 | ||
| 45 | /* bursts size */ | ||
| 46 | enum dw_dma_msize { | ||
| 47 | DW_DMA_MSIZE_1, | ||
| 48 | DW_DMA_MSIZE_4, | ||
| 49 | DW_DMA_MSIZE_8, | ||
| 50 | DW_DMA_MSIZE_16, | ||
| 51 | DW_DMA_MSIZE_32, | ||
| 52 | DW_DMA_MSIZE_64, | ||
| 53 | DW_DMA_MSIZE_128, | ||
| 54 | DW_DMA_MSIZE_256, | ||
| 55 | }; | ||
| 56 | |||
| 57 | /* flow controller */ | ||
| 58 | enum dw_dma_fc { | ||
| 59 | DW_DMA_FC_D_M2M, | ||
| 60 | DW_DMA_FC_D_M2P, | ||
| 61 | DW_DMA_FC_D_P2M, | ||
| 62 | DW_DMA_FC_D_P2P, | ||
| 63 | DW_DMA_FC_P_P2M, | ||
| 64 | DW_DMA_FC_SP_P2P, | ||
| 65 | DW_DMA_FC_P_M2P, | ||
| 66 | DW_DMA_FC_DP_P2P, | ||
| 67 | }; | ||
| 68 | |||
| 39 | /** | 69 | /** |
| 40 | * struct dw_dma_slave - Controller-specific information about a slave | 70 | * struct dw_dma_slave - Controller-specific information about a slave |
| 41 | * | 71 | * |
| @@ -47,6 +77,11 @@ enum dw_dma_slave_width { | |||
| 47 | * @reg_width: peripheral register width | 77 | * @reg_width: peripheral register width |
| 48 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | 78 | * @cfg_hi: Platform-specific initializer for the CFG_HI register |
| 49 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | 79 | * @cfg_lo: Platform-specific initializer for the CFG_LO register |
| 80 | * @src_master: src master for transfers on allocated channel. | ||
| 81 | * @dst_master: dest master for transfers on allocated channel. | ||
| 82 | * @src_msize: src burst size. | ||
| 83 | * @dst_msize: dest burst size. | ||
| 84 | * @fc: flow controller for DMA transfer | ||
| 50 | */ | 85 | */ |
| 51 | struct dw_dma_slave { | 86 | struct dw_dma_slave { |
| 52 | struct device *dma_dev; | 87 | struct device *dma_dev; |
| @@ -55,8 +90,11 @@ struct dw_dma_slave { | |||
| 55 | enum dw_dma_slave_width reg_width; | 90 | enum dw_dma_slave_width reg_width; |
| 56 | u32 cfg_hi; | 91 | u32 cfg_hi; |
| 57 | u32 cfg_lo; | 92 | u32 cfg_lo; |
| 58 | int src_master; | 93 | u8 src_master; |
| 59 | int dst_master; | 94 | u8 dst_master; |
| 95 | u8 src_msize; | ||
| 96 | u8 dst_msize; | ||
| 97 | u8 fc; | ||
| 60 | }; | 98 | }; |
| 61 | 99 | ||
| 62 | /* Platform-configurable bits in CFG_HI */ | 100 | /* Platform-configurable bits in CFG_HI */ |
| @@ -67,7 +105,6 @@ struct dw_dma_slave { | |||
| 67 | #define DWC_CFGH_DST_PER(x) ((x) << 11) | 105 | #define DWC_CFGH_DST_PER(x) ((x) << 11) |
| 68 | 106 | ||
| 69 | /* Platform-configurable bits in CFG_LO */ | 107 | /* Platform-configurable bits in CFG_LO */ |
| 70 | #define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ | ||
| 71 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ | 108 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ |
| 72 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) | 109 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) |
| 73 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) | 110 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) |
