diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 21:11:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 21:11:04 -0400 |
commit | 52d589a01d4545ce1dc5c3892bb8c7b55edfe714 (patch) | |
tree | 864858dae5d01aae411497e926679cf92392b4f6 | |
parent | 0a582821d4f8edf41d9b56ae057ee2002fc275f0 (diff) | |
parent | 6b997bab20448cfe85456e4789d5d9222ab6b830 (diff) |
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"For dmaengine contributions we have:
- designware cleanup by Andy
- my series moving device_control users to dmanegine_xxx APIs for
later removal of device_control API
- minor fixes spread over drivers mainly mv_xor, pl330, mmp, imx-sdma
etc"
* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (60 commits)
serial: atmel: add missing dmaengine header
dmaengine: remove FSLDMA_EXTERNAL_START
dmaengine: freescale: remove FSLDMA_EXTERNAL_START control method
carma-fpga: move to fsl_dma_external_start()
carma-fpga: use dmaengine_xxx() API
dmaengine: freescale: add and export fsl_dma_external_start()
dmaengine: add dmaengine_prep_dma_sg() helper
video: mx3fb: use dmaengine_terminate_all() API
serial: sh-sci: use dmaengine_terminate_all() API
net: ks8842: use dmaengine_terminate_all() API
mtd: sh_flctl: use dmaengine_terminate_all() API
mtd: fsmc_nand: use dmaengine_terminate_all() API
V4L2: mx3_camer: use dmaengine_pause() API
dmaengine: coh901318: use dmaengine_terminate_all() API
pata_arasan_cf: use dmaengine_terminate_all() API
dmaengine: edma: check for echan->edesc => NULL in edma_dma_pause()
dmaengine: dw: export probe()/remove() and Co to users
dmaengine: dw: enable and disable controller when needed
dmaengine: dw: always export dw_dma_{en,dis}able
dmaengine: dw: introduce dw_dma_on() helper
...
50 files changed, 835 insertions, 692 deletions
diff --git a/Documentation/devicetree/bindings/dma/qcom_adm.txt b/Documentation/devicetree/bindings/dma/qcom_adm.txt new file mode 100644 index 000000000000..9bcab9115982 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/qcom_adm.txt | |||
@@ -0,0 +1,62 @@ | |||
1 | QCOM ADM DMA Controller | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: must contain "qcom,adm" for IPQ/APQ8064 and MSM8960 | ||
5 | - reg: Address range for DMA registers | ||
6 | - interrupts: Should contain one interrupt shared by all channels | ||
7 | - #dma-cells: must be <2>. First cell denotes the channel number. Second cell | ||
8 | denotes CRCI (client rate control interface) flow control assignment. | ||
9 | - clocks: Should contain the core clock and interface clock. | ||
10 | - clock-names: Must contain "core" for the core clock and "iface" for the | ||
11 | interface clock. | ||
12 | - resets: Must contain an entry for each entry in reset names. | ||
13 | - reset-names: Must include the following entries: | ||
14 | - clk | ||
15 | - c0 | ||
16 | - c1 | ||
17 | - c2 | ||
18 | - qcom,ee: indicates the security domain identifier used in the secure world. | ||
19 | |||
20 | Example: | ||
21 | adm_dma: dma@18300000 { | ||
22 | compatible = "qcom,adm"; | ||
23 | reg = <0x18300000 0x100000>; | ||
24 | interrupts = <0 170 0>; | ||
25 | #dma-cells = <2>; | ||
26 | |||
27 | clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>; | ||
28 | clock-names = "core", "iface"; | ||
29 | |||
30 | resets = <&gcc ADM0_RESET>, | ||
31 | <&gcc ADM0_C0_RESET>, | ||
32 | <&gcc ADM0_C1_RESET>, | ||
33 | <&gcc ADM0_C2_RESET>; | ||
34 | reset-names = "clk", "c0", "c1", "c2"; | ||
35 | qcom,ee = <0>; | ||
36 | }; | ||
37 | |||
38 | DMA clients must use the format descripted in the dma.txt file, using a three | ||
39 | cell specifier for each channel. | ||
40 | |||
41 | Each dmas request consists of 3 cells: | ||
42 | 1. phandle pointing to the DMA controller | ||
43 | 2. channel number | ||
44 | 3. CRCI assignment, if applicable. If no CRCI flow control is required, use 0. | ||
45 | The CRCI is used for flow control. It identifies the peripheral device that | ||
46 | is the source/destination for the transferred data. | ||
47 | |||
48 | Example: | ||
49 | |||
50 | spi4: spi@1a280000 { | ||
51 | status = "ok"; | ||
52 | spi-max-frequency = <50000000>; | ||
53 | |||
54 | pinctrl-0 = <&spi_pins>; | ||
55 | pinctrl-names = "default"; | ||
56 | |||
57 | cs-gpios = <&qcom_pinmux 20 0>; | ||
58 | |||
59 | dmas = <&adm_dma 6 9>, | ||
60 | <&adm_dma 5 10>; | ||
61 | dma-names = "rx", "tx"; | ||
62 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt new file mode 100644 index 000000000000..2291c4098730 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt | |||
@@ -0,0 +1,65 @@ | |||
1 | Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream | ||
2 | target devices. It can be configured to have one channel or two channels. | ||
3 | If configured as two channels, one is to transmit to the device and another | ||
4 | is to receive from the device. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible: Should be "xlnx,axi-dma-1.00.a" | ||
8 | - #dma-cells: Should be <1>, see "dmas" property below | ||
9 | - reg: Should contain DMA registers location and length. | ||
10 | - dma-channel child node: Should have atleast one channel and can have upto | ||
11 | two channels per device. This node specifies the properties of each | ||
12 | DMA channel (see child node properties below). | ||
13 | |||
14 | Optional properties: | ||
15 | - xlnx,include-sg: Tells whether configured for Scatter-mode in | ||
16 | the hardware. | ||
17 | |||
18 | Required child node properties: | ||
19 | - compatible: It should be either "xlnx,axi-dma-mm2s-channel" or | ||
20 | "xlnx,axi-dma-s2mm-channel". | ||
21 | - interrupts: Should contain per channel DMA interrupts. | ||
22 | - xlnx,datawidth: Should contain the stream data width, take values | ||
23 | {32,64...1024}. | ||
24 | |||
25 | Option child node properties: | ||
26 | - xlnx,include-dre: Tells whether hardware is configured for Data | ||
27 | Realignment Engine. | ||
28 | |||
29 | Example: | ||
30 | ++++++++ | ||
31 | |||
32 | axi_dma_0: axidma@40400000 { | ||
33 | compatible = "xlnx,axi-dma-1.00.a"; | ||
34 | #dma_cells = <1>; | ||
35 | reg = < 0x40400000 0x10000 >; | ||
36 | dma-channel@40400000 { | ||
37 | compatible = "xlnx,axi-dma-mm2s-channel"; | ||
38 | interrupts = < 0 59 4 >; | ||
39 | xlnx,datawidth = <0x40>; | ||
40 | } ; | ||
41 | dma-channel@40400030 { | ||
42 | compatible = "xlnx,axi-dma-s2mm-channel"; | ||
43 | interrupts = < 0 58 4 >; | ||
44 | xlnx,datawidth = <0x40>; | ||
45 | } ; | ||
46 | } ; | ||
47 | |||
48 | |||
49 | * DMA client | ||
50 | |||
51 | Required properties: | ||
52 | - dmas: a list of <[DMA device phandle] [Channel ID]> pairs, | ||
53 | where Channel ID is '0' for write/tx and '1' for read/rx | ||
54 | channel. | ||
55 | - dma-names: a list of DMA channel names, one per "dmas" entry | ||
56 | |||
57 | Example: | ||
58 | ++++++++ | ||
59 | |||
60 | dmatest_0: dmatest@0 { | ||
61 | compatible ="xlnx,axi-dma-test-1.00.a"; | ||
62 | dmas = <&axi_dma_0 0 | ||
63 | &axi_dma_0 1>; | ||
64 | dma-names = "dma0", "dma1"; | ||
65 | } ; | ||
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt index 573e28ce9751..11fb87ff6cd0 100644 --- a/Documentation/dmaengine.txt +++ b/Documentation/dmaengine.txt | |||
@@ -98,7 +98,7 @@ The slave DMA usage consists of following steps: | |||
98 | unsigned long flags); | 98 | unsigned long flags); |
99 | 99 | ||
100 | The peripheral driver is expected to have mapped the scatterlist for | 100 | The peripheral driver is expected to have mapped the scatterlist for |
101 | the DMA operation prior to calling device_prep_slave_sg, and must | 101 | the DMA operation prior to calling dmaengine_prep_slave_sg(), and must |
102 | keep the scatterlist mapped until the DMA operation has completed. | 102 | keep the scatterlist mapped until the DMA operation has completed. |
103 | The scatterlist must be mapped using the DMA struct device. | 103 | The scatterlist must be mapped using the DMA struct device. |
104 | If a mapping needs to be synchronized later, dma_sync_*_for_*() must be | 104 | If a mapping needs to be synchronized later, dma_sync_*_for_*() must be |
@@ -195,5 +195,5 @@ Further APIs: | |||
195 | Note: | 195 | Note: |
196 | Not all DMA engine drivers can return reliable information for | 196 | Not all DMA engine drivers can return reliable information for |
197 | a running DMA channel. It is recommended that DMA engine users | 197 | a running DMA channel. It is recommended that DMA engine users |
198 | pause or stop (via dmaengine_terminate_all) the channel before | 198 | pause or stop (via dmaengine_terminate_all()) the channel before |
199 | using this API. | 199 | using this API. |
diff --git a/MAINTAINERS b/MAINTAINERS index 6c59c6697a54..33c0d433d554 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -8062,7 +8062,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER | |||
8062 | M: Viresh Kumar <viresh.linux@gmail.com> | 8062 | M: Viresh Kumar <viresh.linux@gmail.com> |
8063 | M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 8063 | M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
8064 | S: Maintained | 8064 | S: Maintained |
8065 | F: include/linux/dw_dmac.h | 8065 | F: include/linux/platform_data/dma-dw.h |
8066 | F: drivers/dma/dw/ | 8066 | F: drivers/dma/dw/ |
8067 | 8067 | ||
8068 | SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER | 8068 | SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index db85b5ec3351..37b75602adf6 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -7,7 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/clk.h> | 8 | #include <linux/clk.h> |
9 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
10 | #include <linux/dw_dmac.h> | 10 | #include <linux/platform_data/dma-dw.h> |
11 | #include <linux/fb.h> | 11 | #include <linux/fb.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
@@ -1356,10 +1356,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | |||
1356 | goto fail; | 1356 | goto fail; |
1357 | 1357 | ||
1358 | slave->sdata.dma_dev = &dw_dmac0_device.dev; | 1358 | slave->sdata.dma_dev = &dw_dmac0_device.dev; |
1359 | slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0) | 1359 | slave->sdata.src_id = 0; |
1360 | | DWC_CFGH_DST_PER(1)); | 1360 | slave->sdata.dst_id = 1; |
1361 | slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL | 1361 | slave->sdata.src_master = 1; |
1362 | | DWC_CFGL_HS_SRC_POL); | 1362 | slave->sdata.dst_master = 0; |
1363 | 1363 | ||
1364 | data->dma_slave = slave; | 1364 | data->dma_slave = slave; |
1365 | 1365 | ||
@@ -2052,8 +2052,7 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
2052 | /* Check if DMA slave interface for capture should be configured. */ | 2052 | /* Check if DMA slave interface for capture should be configured. */ |
2053 | if (flags & AC97C_CAPTURE) { | 2053 | if (flags & AC97C_CAPTURE) { |
2054 | rx_dws->dma_dev = &dw_dmac0_device.dev; | 2054 | rx_dws->dma_dev = &dw_dmac0_device.dev; |
2055 | rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); | 2055 | rx_dws->src_id = 3; |
2056 | rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | ||
2057 | rx_dws->src_master = 0; | 2056 | rx_dws->src_master = 0; |
2058 | rx_dws->dst_master = 1; | 2057 | rx_dws->dst_master = 1; |
2059 | } | 2058 | } |
@@ -2061,8 +2060,7 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
2061 | /* Check if DMA slave interface for playback should be configured. */ | 2060 | /* Check if DMA slave interface for playback should be configured. */ |
2062 | if (flags & AC97C_PLAYBACK) { | 2061 | if (flags & AC97C_PLAYBACK) { |
2063 | tx_dws->dma_dev = &dw_dmac0_device.dev; | 2062 | tx_dws->dma_dev = &dw_dmac0_device.dev; |
2064 | tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); | 2063 | tx_dws->dst_id = 4; |
2065 | tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | ||
2066 | tx_dws->src_master = 0; | 2064 | tx_dws->src_master = 0; |
2067 | tx_dws->dst_master = 1; | 2065 | tx_dws->dst_master = 1; |
2068 | } | 2066 | } |
@@ -2134,8 +2132,7 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) | |||
2134 | dws = &data->dws; | 2132 | dws = &data->dws; |
2135 | 2133 | ||
2136 | dws->dma_dev = &dw_dmac0_device.dev; | 2134 | dws->dma_dev = &dw_dmac0_device.dev; |
2137 | dws->cfg_hi = DWC_CFGH_DST_PER(2); | 2135 | dws->dst_id = 2; |
2138 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); | ||
2139 | dws->src_master = 0; | 2136 | dws->src_master = 0; |
2140 | dws->dst_master = 1; | 2137 | dws->dst_master = 1; |
2141 | 2138 | ||
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h index 4bba58561d5c..11d7f4b28dc8 100644 --- a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h +++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __MACH_ATMEL_MCI_H | 1 | #ifndef __MACH_ATMEL_MCI_H |
2 | #define __MACH_ATMEL_MCI_H | 2 | #define __MACH_ATMEL_MCI_H |
3 | 3 | ||
4 | #include <linux/dw_dmac.h> | 4 | #include <linux/platform_data/dma-dw.h> |
5 | 5 | ||
6 | /** | 6 | /** |
7 | * struct mci_dma_data - DMA data for MCI interface | 7 | * struct mci_dma_data - DMA data for MCI interface |
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 4edb1a81f63f..38216b991474 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c | |||
@@ -420,7 +420,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) | |||
420 | 420 | ||
421 | /* Wait for DMA to complete */ | 421 | /* Wait for DMA to complete */ |
422 | if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) { | 422 | if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) { |
423 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 423 | dmaengine_terminate_all(chan); |
424 | dev_err(acdev->host->dev, "wait_for_completion_timeout\n"); | 424 | dev_err(acdev->host->dev, "wait_for_completion_timeout\n"); |
425 | return -ETIMEDOUT; | 425 | return -ETIMEDOUT; |
426 | } | 426 | } |
@@ -928,8 +928,7 @@ static int arasan_cf_suspend(struct device *dev) | |||
928 | struct arasan_cf_dev *acdev = host->ports[0]->private_data; | 928 | struct arasan_cf_dev *acdev = host->ports[0]->private_data; |
929 | 929 | ||
930 | if (acdev->dma_chan) | 930 | if (acdev->dma_chan) |
931 | acdev->dma_chan->device->device_control(acdev->dma_chan, | 931 | dmaengine_terminate_all(acdev->dma_chan); |
932 | DMA_TERMINATE_ALL, 0); | ||
933 | 932 | ||
934 | cf_exit(acdev); | 933 | cf_exit(acdev); |
935 | return ata_host_suspend(host, PMSG_SUSPEND); | 934 | return ata_host_suspend(host, PMSG_SUSPEND); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a016490c95ae..de469821bc1b 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -270,7 +270,7 @@ config IMX_SDMA | |||
270 | select DMA_ENGINE | 270 | select DMA_ENGINE |
271 | help | 271 | help |
272 | Support the i.MX SDMA engine. This engine is integrated into | 272 | Support the i.MX SDMA engine. This engine is integrated into |
273 | Freescale i.MX25/31/35/51/53 chips. | 273 | Freescale i.MX25/31/35/51/53/6 chips. |
274 | 274 | ||
275 | config IMX_DMA | 275 | config IMX_DMA |
276 | tristate "i.MX DMA support" | 276 | tristate "i.MX DMA support" |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 3c6716e0b78e..e88588d8ecd3 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -2156,7 +2156,7 @@ coh901318_free_chan_resources(struct dma_chan *chan) | |||
2156 | 2156 | ||
2157 | spin_unlock_irqrestore(&cohc->lock, flags); | 2157 | spin_unlock_irqrestore(&cohc->lock, flags); |
2158 | 2158 | ||
2159 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 2159 | dmaengine_terminate_all(chan); |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | 2162 | ||
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 8f8b0b608875..a58eec3b2cad 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -938,7 +938,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
938 | if (!glue_info) | 938 | if (!glue_info) |
939 | return -EINVAL; | 939 | return -EINVAL; |
940 | 940 | ||
941 | cdd = kzalloc(sizeof(*cdd), GFP_KERNEL); | 941 | cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL); |
942 | if (!cdd) | 942 | if (!cdd) |
943 | return -ENOMEM; | 943 | return -ENOMEM; |
944 | 944 | ||
@@ -959,10 +959,8 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
959 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); | 959 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); |
960 | 960 | ||
961 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || | 961 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || |
962 | !cdd->qmgr_mem) { | 962 | !cdd->qmgr_mem) |
963 | ret = -ENXIO; | 963 | return -ENXIO; |
964 | goto err_remap; | ||
965 | } | ||
966 | 964 | ||
967 | pm_runtime_enable(dev); | 965 | pm_runtime_enable(dev); |
968 | ret = pm_runtime_get_sync(dev); | 966 | ret = pm_runtime_get_sync(dev); |
@@ -989,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
989 | 987 | ||
990 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | 988 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); |
991 | 989 | ||
992 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, | 990 | ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED, |
993 | dev_name(dev), cdd); | 991 | dev_name(dev), cdd); |
994 | if (ret) | 992 | if (ret) |
995 | goto err_irq; | 993 | goto err_irq; |
@@ -1009,7 +1007,6 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
1009 | err_of: | 1007 | err_of: |
1010 | dma_async_device_unregister(&cdd->ddev); | 1008 | dma_async_device_unregister(&cdd->ddev); |
1011 | err_dma_reg: | 1009 | err_dma_reg: |
1012 | free_irq(irq, cdd); | ||
1013 | err_irq: | 1010 | err_irq: |
1014 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1011 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
1015 | cleanup_chans(cdd); | 1012 | cleanup_chans(cdd); |
@@ -1023,8 +1020,6 @@ err_get_sync: | |||
1023 | iounmap(cdd->ctrl_mem); | 1020 | iounmap(cdd->ctrl_mem); |
1024 | iounmap(cdd->sched_mem); | 1021 | iounmap(cdd->sched_mem); |
1025 | iounmap(cdd->qmgr_mem); | 1022 | iounmap(cdd->qmgr_mem); |
1026 | err_remap: | ||
1027 | kfree(cdd); | ||
1028 | return ret; | 1023 | return ret; |
1029 | } | 1024 | } |
1030 | 1025 | ||
@@ -1036,7 +1031,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
1036 | dma_async_device_unregister(&cdd->ddev); | 1031 | dma_async_device_unregister(&cdd->ddev); |
1037 | 1032 | ||
1038 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1033 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
1039 | free_irq(cdd->irq, cdd); | 1034 | devm_free_irq(&pdev->dev, cdd->irq, cdd); |
1040 | cleanup_chans(cdd); | 1035 | cleanup_chans(cdd); |
1041 | deinit_cppi41(&pdev->dev, cdd); | 1036 | deinit_cppi41(&pdev->dev, cdd); |
1042 | iounmap(cdd->usbss_mem); | 1037 | iounmap(cdd->usbss_mem); |
@@ -1045,7 +1040,6 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
1045 | iounmap(cdd->qmgr_mem); | 1040 | iounmap(cdd->qmgr_mem); |
1046 | pm_runtime_put(&pdev->dev); | 1041 | pm_runtime_put(&pdev->dev); |
1047 | pm_runtime_disable(&pdev->dev); | 1042 | pm_runtime_disable(&pdev->dev); |
1048 | kfree(cdd); | ||
1049 | return 0; | 1043 | return 0; |
1050 | } | 1044 | } |
1051 | 1045 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 1af731b83b3f..244722170410 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
14 | #include <linux/clk.h> | ||
15 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
16 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
17 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
@@ -37,24 +36,6 @@ | |||
37 | * support descriptor writeback. | 36 | * support descriptor writeback. |
38 | */ | 37 | */ |
39 | 38 | ||
40 | static inline bool is_request_line_unset(struct dw_dma_chan *dwc) | ||
41 | { | ||
42 | return dwc->request_line == (typeof(dwc->request_line))~0; | ||
43 | } | ||
44 | |||
45 | static inline void dwc_set_masters(struct dw_dma_chan *dwc) | ||
46 | { | ||
47 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
48 | struct dw_dma_slave *dws = dwc->chan.private; | ||
49 | unsigned char mmax = dw->nr_masters - 1; | ||
50 | |||
51 | if (!is_request_line_unset(dwc)) | ||
52 | return; | ||
53 | |||
54 | dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); | ||
55 | dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); | ||
56 | } | ||
57 | |||
58 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
59 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | 40 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
60 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | 41 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
@@ -155,13 +136,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
155 | */ | 136 | */ |
156 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | 137 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
157 | 138 | ||
158 | cfghi = dws->cfg_hi; | 139 | cfghi |= DWC_CFGH_DST_PER(dws->dst_id); |
159 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | 140 | cfghi |= DWC_CFGH_SRC_PER(dws->src_id); |
160 | } else { | 141 | } else { |
161 | if (dwc->direction == DMA_MEM_TO_DEV) | 142 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
162 | cfghi = DWC_CFGH_DST_PER(dwc->request_line); | 143 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); |
163 | else if (dwc->direction == DMA_DEV_TO_MEM) | ||
164 | cfghi = DWC_CFGH_SRC_PER(dwc->request_line); | ||
165 | } | 144 | } |
166 | 145 | ||
167 | channel_writel(dwc, CFG_LO, cfglo); | 146 | channel_writel(dwc, CFG_LO, cfglo); |
@@ -939,6 +918,26 @@ err_desc_get: | |||
939 | return NULL; | 918 | return NULL; |
940 | } | 919 | } |
941 | 920 | ||
921 | bool dw_dma_filter(struct dma_chan *chan, void *param) | ||
922 | { | ||
923 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
924 | struct dw_dma_slave *dws = param; | ||
925 | |||
926 | if (!dws || dws->dma_dev != chan->device->dev) | ||
927 | return false; | ||
928 | |||
929 | /* We have to copy data since dws can be temporary storage */ | ||
930 | |||
931 | dwc->src_id = dws->src_id; | ||
932 | dwc->dst_id = dws->dst_id; | ||
933 | |||
934 | dwc->src_master = dws->src_master; | ||
935 | dwc->dst_master = dws->dst_master; | ||
936 | |||
937 | return true; | ||
938 | } | ||
939 | EXPORT_SYMBOL_GPL(dw_dma_filter); | ||
940 | |||
942 | /* | 941 | /* |
943 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | 942 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: |
944 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | 943 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. |
@@ -967,10 +966,6 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
967 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | 966 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
968 | dwc->direction = sconfig->direction; | 967 | dwc->direction = sconfig->direction; |
969 | 968 | ||
970 | /* Take the request line from slave_id member */ | ||
971 | if (is_request_line_unset(dwc)) | ||
972 | dwc->request_line = sconfig->slave_id; | ||
973 | |||
974 | convert_burst(&dwc->dma_sconfig.src_maxburst); | 969 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
975 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | 970 | convert_burst(&dwc->dma_sconfig.dst_maxburst); |
976 | 971 | ||
@@ -1099,6 +1094,31 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
1099 | spin_unlock_irqrestore(&dwc->lock, flags); | 1094 | spin_unlock_irqrestore(&dwc->lock, flags); |
1100 | } | 1095 | } |
1101 | 1096 | ||
1097 | /*----------------------------------------------------------------------*/ | ||
1098 | |||
1099 | static void dw_dma_off(struct dw_dma *dw) | ||
1100 | { | ||
1101 | int i; | ||
1102 | |||
1103 | dma_writel(dw, CFG, 0); | ||
1104 | |||
1105 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
1106 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1107 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1108 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1109 | |||
1110 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | ||
1111 | cpu_relax(); | ||
1112 | |||
1113 | for (i = 0; i < dw->dma.chancnt; i++) | ||
1114 | dw->chan[i].initialized = false; | ||
1115 | } | ||
1116 | |||
1117 | static void dw_dma_on(struct dw_dma *dw) | ||
1118 | { | ||
1119 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1120 | } | ||
1121 | |||
1102 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 1122 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
1103 | { | 1123 | { |
1104 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1124 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
@@ -1123,7 +1143,10 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1123 | * doesn't mean what you think it means), and status writeback. | 1143 | * doesn't mean what you think it means), and status writeback. |
1124 | */ | 1144 | */ |
1125 | 1145 | ||
1126 | dwc_set_masters(dwc); | 1146 | /* Enable controller here if needed */ |
1147 | if (!dw->in_use) | ||
1148 | dw_dma_on(dw); | ||
1149 | dw->in_use |= dwc->mask; | ||
1127 | 1150 | ||
1128 | spin_lock_irqsave(&dwc->lock, flags); | 1151 | spin_lock_irqsave(&dwc->lock, flags); |
1129 | i = dwc->descs_allocated; | 1152 | i = dwc->descs_allocated; |
@@ -1182,7 +1205,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1182 | list_splice_init(&dwc->free_list, &list); | 1205 | list_splice_init(&dwc->free_list, &list); |
1183 | dwc->descs_allocated = 0; | 1206 | dwc->descs_allocated = 0; |
1184 | dwc->initialized = false; | 1207 | dwc->initialized = false; |
1185 | dwc->request_line = ~0; | ||
1186 | 1208 | ||
1187 | /* Disable interrupts */ | 1209 | /* Disable interrupts */ |
1188 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1210 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
@@ -1190,6 +1212,11 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1190 | 1212 | ||
1191 | spin_unlock_irqrestore(&dwc->lock, flags); | 1213 | spin_unlock_irqrestore(&dwc->lock, flags); |
1192 | 1214 | ||
1215 | /* Disable controller in case it was a last user */ | ||
1216 | dw->in_use &= ~dwc->mask; | ||
1217 | if (!dw->in_use) | ||
1218 | dw_dma_off(dw); | ||
1219 | |||
1193 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 1220 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1194 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 1221 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1195 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | 1222 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); |
@@ -1460,24 +1487,6 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); | |||
1460 | 1487 | ||
1461 | /*----------------------------------------------------------------------*/ | 1488 | /*----------------------------------------------------------------------*/ |
1462 | 1489 | ||
1463 | static void dw_dma_off(struct dw_dma *dw) | ||
1464 | { | ||
1465 | int i; | ||
1466 | |||
1467 | dma_writel(dw, CFG, 0); | ||
1468 | |||
1469 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
1470 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1471 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1472 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1473 | |||
1474 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | ||
1475 | cpu_relax(); | ||
1476 | |||
1477 | for (i = 0; i < dw->dma.chancnt; i++) | ||
1478 | dw->chan[i].initialized = false; | ||
1479 | } | ||
1480 | |||
1481 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | 1490 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) |
1482 | { | 1491 | { |
1483 | struct dw_dma *dw; | 1492 | struct dw_dma *dw; |
@@ -1495,13 +1504,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1495 | dw->regs = chip->regs; | 1504 | dw->regs = chip->regs; |
1496 | chip->dw = dw; | 1505 | chip->dw = dw; |
1497 | 1506 | ||
1498 | dw->clk = devm_clk_get(chip->dev, "hclk"); | ||
1499 | if (IS_ERR(dw->clk)) | ||
1500 | return PTR_ERR(dw->clk); | ||
1501 | err = clk_prepare_enable(dw->clk); | ||
1502 | if (err) | ||
1503 | return err; | ||
1504 | |||
1505 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | 1507 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); |
1506 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | 1508 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; |
1507 | 1509 | ||
@@ -1604,7 +1606,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1604 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1606 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1605 | 1607 | ||
1606 | dwc->direction = DMA_TRANS_NONE; | 1608 | dwc->direction = DMA_TRANS_NONE; |
1607 | dwc->request_line = ~0; | ||
1608 | 1609 | ||
1609 | /* Hardware configuration */ | 1610 | /* Hardware configuration */ |
1610 | if (autocfg) { | 1611 | if (autocfg) { |
@@ -1659,8 +1660,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1659 | dw->dma.device_tx_status = dwc_tx_status; | 1660 | dw->dma.device_tx_status = dwc_tx_status; |
1660 | dw->dma.device_issue_pending = dwc_issue_pending; | 1661 | dw->dma.device_issue_pending = dwc_issue_pending; |
1661 | 1662 | ||
1662 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1663 | |||
1664 | err = dma_async_device_register(&dw->dma); | 1663 | err = dma_async_device_register(&dw->dma); |
1665 | if (err) | 1664 | if (err) |
1666 | goto err_dma_register; | 1665 | goto err_dma_register; |
@@ -1673,7 +1672,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1673 | err_dma_register: | 1672 | err_dma_register: |
1674 | free_irq(chip->irq, dw); | 1673 | free_irq(chip->irq, dw); |
1675 | err_pdata: | 1674 | err_pdata: |
1676 | clk_disable_unprepare(dw->clk); | ||
1677 | return err; | 1675 | return err; |
1678 | } | 1676 | } |
1679 | EXPORT_SYMBOL_GPL(dw_dma_probe); | 1677 | EXPORT_SYMBOL_GPL(dw_dma_probe); |
@@ -1695,46 +1693,27 @@ int dw_dma_remove(struct dw_dma_chip *chip) | |||
1695 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1693 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1696 | } | 1694 | } |
1697 | 1695 | ||
1698 | clk_disable_unprepare(dw->clk); | ||
1699 | |||
1700 | return 0; | 1696 | return 0; |
1701 | } | 1697 | } |
1702 | EXPORT_SYMBOL_GPL(dw_dma_remove); | 1698 | EXPORT_SYMBOL_GPL(dw_dma_remove); |
1703 | 1699 | ||
1704 | void dw_dma_shutdown(struct dw_dma_chip *chip) | 1700 | int dw_dma_disable(struct dw_dma_chip *chip) |
1705 | { | ||
1706 | struct dw_dma *dw = chip->dw; | ||
1707 | |||
1708 | dw_dma_off(dw); | ||
1709 | clk_disable_unprepare(dw->clk); | ||
1710 | } | ||
1711 | EXPORT_SYMBOL_GPL(dw_dma_shutdown); | ||
1712 | |||
1713 | #ifdef CONFIG_PM_SLEEP | ||
1714 | |||
1715 | int dw_dma_suspend(struct dw_dma_chip *chip) | ||
1716 | { | 1701 | { |
1717 | struct dw_dma *dw = chip->dw; | 1702 | struct dw_dma *dw = chip->dw; |
1718 | 1703 | ||
1719 | dw_dma_off(dw); | 1704 | dw_dma_off(dw); |
1720 | clk_disable_unprepare(dw->clk); | ||
1721 | |||
1722 | return 0; | 1705 | return 0; |
1723 | } | 1706 | } |
1724 | EXPORT_SYMBOL_GPL(dw_dma_suspend); | 1707 | EXPORT_SYMBOL_GPL(dw_dma_disable); |
1725 | 1708 | ||
1726 | int dw_dma_resume(struct dw_dma_chip *chip) | 1709 | int dw_dma_enable(struct dw_dma_chip *chip) |
1727 | { | 1710 | { |
1728 | struct dw_dma *dw = chip->dw; | 1711 | struct dw_dma *dw = chip->dw; |
1729 | 1712 | ||
1730 | clk_prepare_enable(dw->clk); | 1713 | dw_dma_on(dw); |
1731 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1732 | |||
1733 | return 0; | 1714 | return 0; |
1734 | } | 1715 | } |
1735 | EXPORT_SYMBOL_GPL(dw_dma_resume); | 1716 | EXPORT_SYMBOL_GPL(dw_dma_enable); |
1736 | |||
1737 | #endif /* CONFIG_PM_SLEEP */ | ||
1738 | 1717 | ||
1739 | MODULE_LICENSE("GPL v2"); | 1718 | MODULE_LICENSE("GPL v2"); |
1740 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); | 1719 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h index 32667f9e0dda..41439732ff6b 100644 --- a/drivers/dma/dw/internal.h +++ b/drivers/dma/dw/internal.h | |||
@@ -8,63 +8,16 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _DW_DMAC_INTERNAL_H | 11 | #ifndef _DMA_DW_INTERNAL_H |
12 | #define _DW_DMAC_INTERNAL_H | 12 | #define _DMA_DW_INTERNAL_H |
13 | 13 | ||
14 | #include <linux/device.h> | 14 | #include <linux/dma/dw.h> |
15 | #include <linux/dw_dmac.h> | ||
16 | 15 | ||
17 | #include "regs.h" | 16 | #include "regs.h" |
18 | 17 | ||
19 | /** | 18 | int dw_dma_disable(struct dw_dma_chip *chip); |
20 | * struct dw_dma_chip - representation of DesignWare DMA controller hardware | 19 | int dw_dma_enable(struct dw_dma_chip *chip); |
21 | * @dev: struct device of the DMA controller | ||
22 | * @irq: irq line | ||
23 | * @regs: memory mapped I/O space | ||
24 | * @dw: struct dw_dma that is filed by dw_dma_probe() | ||
25 | */ | ||
26 | struct dw_dma_chip { | ||
27 | struct device *dev; | ||
28 | int irq; | ||
29 | void __iomem *regs; | ||
30 | struct dw_dma *dw; | ||
31 | }; | ||
32 | |||
33 | /* Export to the platform drivers */ | ||
34 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); | ||
35 | int dw_dma_remove(struct dw_dma_chip *chip); | ||
36 | |||
37 | void dw_dma_shutdown(struct dw_dma_chip *chip); | ||
38 | |||
39 | #ifdef CONFIG_PM_SLEEP | ||
40 | |||
41 | int dw_dma_suspend(struct dw_dma_chip *chip); | ||
42 | int dw_dma_resume(struct dw_dma_chip *chip); | ||
43 | |||
44 | #endif /* CONFIG_PM_SLEEP */ | ||
45 | 20 | ||
46 | /** | 21 | extern bool dw_dma_filter(struct dma_chan *chan, void *param); |
47 | * dwc_get_dms - get destination master | ||
48 | * @slave: pointer to the custom slave configuration | ||
49 | * | ||
50 | * Returns destination master in the custom slave configuration if defined, or | ||
51 | * default value otherwise. | ||
52 | */ | ||
53 | static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) | ||
54 | { | ||
55 | return slave ? slave->dst_master : 0; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * dwc_get_sms - get source master | ||
60 | * @slave: pointer to the custom slave configuration | ||
61 | * | ||
62 | * Returns source master in the custom slave configuration if defined, or | ||
63 | * default value otherwise. | ||
64 | */ | ||
65 | static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | ||
66 | { | ||
67 | return slave ? slave->src_master : 1; | ||
68 | } | ||
69 | 22 | ||
70 | #endif /* _DW_DMAC_INTERNAL_H */ | 23 | #endif /* _DMA_DW_INTERNAL_H */ |
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 39e30c3c7a9d..b144706b3d85 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -82,7 +82,7 @@ static int dw_pci_suspend_late(struct device *dev) | |||
82 | struct pci_dev *pci = to_pci_dev(dev); | 82 | struct pci_dev *pci = to_pci_dev(dev); |
83 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 83 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
84 | 84 | ||
85 | return dw_dma_suspend(chip); | 85 | return dw_dma_disable(chip); |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static int dw_pci_resume_early(struct device *dev) | 88 | static int dw_pci_resume_early(struct device *dev) |
@@ -90,7 +90,7 @@ static int dw_pci_resume_early(struct device *dev) | |||
90 | struct pci_dev *pci = to_pci_dev(dev); | 90 | struct pci_dev *pci = to_pci_dev(dev); |
91 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 91 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
92 | 92 | ||
93 | return dw_dma_resume(chip); | 93 | return dw_dma_enable(chip); |
94 | }; | 94 | }; |
95 | 95 | ||
96 | #endif /* CONFIG_PM_SLEEP */ | 96 | #endif /* CONFIG_PM_SLEEP */ |
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = { | |||
108 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, | 108 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, |
109 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, | 109 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, |
110 | 110 | ||
111 | /* Braswell */ | ||
112 | { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_pdata }, | ||
113 | { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_pdata }, | ||
114 | |||
111 | /* Haswell */ | 115 | /* Haswell */ |
112 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata }, | 116 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata }, |
113 | { } | 117 | { } |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index c5b339af6be5..a630161473a4 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -25,72 +25,49 @@ | |||
25 | 25 | ||
26 | #include "internal.h" | 26 | #include "internal.h" |
27 | 27 | ||
28 | struct dw_dma_of_filter_args { | ||
29 | struct dw_dma *dw; | ||
30 | unsigned int req; | ||
31 | unsigned int src; | ||
32 | unsigned int dst; | ||
33 | }; | ||
34 | |||
35 | static bool dw_dma_of_filter(struct dma_chan *chan, void *param) | ||
36 | { | ||
37 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
38 | struct dw_dma_of_filter_args *fargs = param; | ||
39 | |||
40 | /* Ensure the device matches our channel */ | ||
41 | if (chan->device != &fargs->dw->dma) | ||
42 | return false; | ||
43 | |||
44 | dwc->request_line = fargs->req; | ||
45 | dwc->src_master = fargs->src; | ||
46 | dwc->dst_master = fargs->dst; | ||
47 | |||
48 | return true; | ||
49 | } | ||
50 | |||
51 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | 28 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, |
52 | struct of_dma *ofdma) | 29 | struct of_dma *ofdma) |
53 | { | 30 | { |
54 | struct dw_dma *dw = ofdma->of_dma_data; | 31 | struct dw_dma *dw = ofdma->of_dma_data; |
55 | struct dw_dma_of_filter_args fargs = { | 32 | struct dw_dma_slave slave = { |
56 | .dw = dw, | 33 | .dma_dev = dw->dma.dev, |
57 | }; | 34 | }; |
58 | dma_cap_mask_t cap; | 35 | dma_cap_mask_t cap; |
59 | 36 | ||
60 | if (dma_spec->args_count != 3) | 37 | if (dma_spec->args_count != 3) |
61 | return NULL; | 38 | return NULL; |
62 | 39 | ||
63 | fargs.req = dma_spec->args[0]; | 40 | slave.src_id = dma_spec->args[0]; |
64 | fargs.src = dma_spec->args[1]; | 41 | slave.dst_id = dma_spec->args[0]; |
65 | fargs.dst = dma_spec->args[2]; | 42 | slave.src_master = dma_spec->args[1]; |
43 | slave.dst_master = dma_spec->args[2]; | ||
66 | 44 | ||
67 | if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || | 45 | if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || |
68 | fargs.src >= dw->nr_masters || | 46 | slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || |
69 | fargs.dst >= dw->nr_masters)) | 47 | slave.src_master >= dw->nr_masters || |
48 | slave.dst_master >= dw->nr_masters)) | ||
70 | return NULL; | 49 | return NULL; |
71 | 50 | ||
72 | dma_cap_zero(cap); | 51 | dma_cap_zero(cap); |
73 | dma_cap_set(DMA_SLAVE, cap); | 52 | dma_cap_set(DMA_SLAVE, cap); |
74 | 53 | ||
75 | /* TODO: there should be a simpler way to do this */ | 54 | /* TODO: there should be a simpler way to do this */ |
76 | return dma_request_channel(cap, dw_dma_of_filter, &fargs); | 55 | return dma_request_channel(cap, dw_dma_filter, &slave); |
77 | } | 56 | } |
78 | 57 | ||
79 | #ifdef CONFIG_ACPI | 58 | #ifdef CONFIG_ACPI |
80 | static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) | 59 | static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) |
81 | { | 60 | { |
82 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
83 | struct acpi_dma_spec *dma_spec = param; | 61 | struct acpi_dma_spec *dma_spec = param; |
62 | struct dw_dma_slave slave = { | ||
63 | .dma_dev = dma_spec->dev, | ||
64 | .src_id = dma_spec->slave_id, | ||
65 | .dst_id = dma_spec->slave_id, | ||
66 | .src_master = 1, | ||
67 | .dst_master = 0, | ||
68 | }; | ||
84 | 69 | ||
85 | if (chan->device->dev != dma_spec->dev || | 70 | return dw_dma_filter(chan, &slave); |
86 | chan->chan_id != dma_spec->chan_id) | ||
87 | return false; | ||
88 | |||
89 | dwc->request_line = dma_spec->slave_id; | ||
90 | dwc->src_master = dwc_get_sms(NULL); | ||
91 | dwc->dst_master = dwc_get_dms(NULL); | ||
92 | |||
93 | return true; | ||
94 | } | 71 | } |
95 | 72 | ||
96 | static void dw_dma_acpi_controller_register(struct dw_dma *dw) | 73 | static void dw_dma_acpi_controller_register(struct dw_dma *dw) |
@@ -201,10 +178,17 @@ static int dw_probe(struct platform_device *pdev) | |||
201 | 178 | ||
202 | chip->dev = dev; | 179 | chip->dev = dev; |
203 | 180 | ||
204 | err = dw_dma_probe(chip, pdata); | 181 | chip->clk = devm_clk_get(chip->dev, "hclk"); |
182 | if (IS_ERR(chip->clk)) | ||
183 | return PTR_ERR(chip->clk); | ||
184 | err = clk_prepare_enable(chip->clk); | ||
205 | if (err) | 185 | if (err) |
206 | return err; | 186 | return err; |
207 | 187 | ||
188 | err = dw_dma_probe(chip, pdata); | ||
189 | if (err) | ||
190 | goto err_dw_dma_probe; | ||
191 | |||
208 | platform_set_drvdata(pdev, chip); | 192 | platform_set_drvdata(pdev, chip); |
209 | 193 | ||
210 | if (pdev->dev.of_node) { | 194 | if (pdev->dev.of_node) { |
@@ -219,6 +203,10 @@ static int dw_probe(struct platform_device *pdev) | |||
219 | dw_dma_acpi_controller_register(chip->dw); | 203 | dw_dma_acpi_controller_register(chip->dw); |
220 | 204 | ||
221 | return 0; | 205 | return 0; |
206 | |||
207 | err_dw_dma_probe: | ||
208 | clk_disable_unprepare(chip->clk); | ||
209 | return err; | ||
222 | } | 210 | } |
223 | 211 | ||
224 | static int dw_remove(struct platform_device *pdev) | 212 | static int dw_remove(struct platform_device *pdev) |
@@ -228,14 +216,18 @@ static int dw_remove(struct platform_device *pdev) | |||
228 | if (pdev->dev.of_node) | 216 | if (pdev->dev.of_node) |
229 | of_dma_controller_free(pdev->dev.of_node); | 217 | of_dma_controller_free(pdev->dev.of_node); |
230 | 218 | ||
231 | return dw_dma_remove(chip); | 219 | dw_dma_remove(chip); |
220 | clk_disable_unprepare(chip->clk); | ||
221 | |||
222 | return 0; | ||
232 | } | 223 | } |
233 | 224 | ||
234 | static void dw_shutdown(struct platform_device *pdev) | 225 | static void dw_shutdown(struct platform_device *pdev) |
235 | { | 226 | { |
236 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 227 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
237 | 228 | ||
238 | dw_dma_shutdown(chip); | 229 | dw_dma_disable(chip); |
230 | clk_disable_unprepare(chip->clk); | ||
239 | } | 231 | } |
240 | 232 | ||
241 | #ifdef CONFIG_OF | 233 | #ifdef CONFIG_OF |
@@ -261,7 +253,10 @@ static int dw_suspend_late(struct device *dev) | |||
261 | struct platform_device *pdev = to_platform_device(dev); | 253 | struct platform_device *pdev = to_platform_device(dev); |
262 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 254 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
263 | 255 | ||
264 | return dw_dma_suspend(chip); | 256 | dw_dma_disable(chip); |
257 | clk_disable_unprepare(chip->clk); | ||
258 | |||
259 | return 0; | ||
265 | } | 260 | } |
266 | 261 | ||
267 | static int dw_resume_early(struct device *dev) | 262 | static int dw_resume_early(struct device *dev) |
@@ -269,7 +264,8 @@ static int dw_resume_early(struct device *dev) | |||
269 | struct platform_device *pdev = to_platform_device(dev); | 264 | struct platform_device *pdev = to_platform_device(dev); |
270 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 265 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
271 | 266 | ||
272 | return dw_dma_resume(chip); | 267 | clk_prepare_enable(chip->clk); |
268 | return dw_dma_enable(chip); | ||
273 | } | 269 | } |
274 | 270 | ||
275 | #endif /* CONFIG_PM_SLEEP */ | 271 | #endif /* CONFIG_PM_SLEEP */ |
@@ -281,7 +277,7 @@ static const struct dev_pm_ops dw_dev_pm_ops = { | |||
281 | static struct platform_driver dw_driver = { | 277 | static struct platform_driver dw_driver = { |
282 | .probe = dw_probe, | 278 | .probe = dw_probe, |
283 | .remove = dw_remove, | 279 | .remove = dw_remove, |
284 | .shutdown = dw_shutdown, | 280 | .shutdown = dw_shutdown, |
285 | .driver = { | 281 | .driver = { |
286 | .name = "dw_dmac", | 282 | .name = "dw_dmac", |
287 | .pm = &dw_dev_pm_ops, | 283 | .pm = &dw_dev_pm_ops, |
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index bb98d3e91e8b..848e232f7cc7 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/dmaengine.h> | 13 | #include <linux/dmaengine.h> |
14 | #include <linux/dw_dmac.h> | ||
15 | 14 | ||
16 | #define DW_DMA_MAX_NR_CHANNELS 8 | 15 | #define DW_DMA_MAX_NR_CHANNELS 8 |
17 | #define DW_DMA_MAX_NR_REQUESTS 16 | 16 | #define DW_DMA_MAX_NR_REQUESTS 16 |
@@ -132,6 +131,18 @@ struct dw_dma_regs { | |||
132 | /* Bitfields in DWC_PARAMS */ | 131 | /* Bitfields in DWC_PARAMS */ |
133 | #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ | 132 | #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ |
134 | 133 | ||
134 | /* bursts size */ | ||
135 | enum dw_dma_msize { | ||
136 | DW_DMA_MSIZE_1, | ||
137 | DW_DMA_MSIZE_4, | ||
138 | DW_DMA_MSIZE_8, | ||
139 | DW_DMA_MSIZE_16, | ||
140 | DW_DMA_MSIZE_32, | ||
141 | DW_DMA_MSIZE_64, | ||
142 | DW_DMA_MSIZE_128, | ||
143 | DW_DMA_MSIZE_256, | ||
144 | }; | ||
145 | |||
135 | /* Bitfields in CTL_LO */ | 146 | /* Bitfields in CTL_LO */ |
136 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | 147 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ |
137 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | 148 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ |
@@ -161,20 +172,35 @@ struct dw_dma_regs { | |||
161 | #define DWC_CTLH_DONE 0x00001000 | 172 | #define DWC_CTLH_DONE 0x00001000 |
162 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff | 173 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff |
163 | 174 | ||
164 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ | 175 | /* Bitfields in CFG_LO */ |
165 | #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ | 176 | #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ |
166 | #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ | 177 | #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ |
167 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ | 178 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ |
168 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ | 179 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ |
169 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ | 180 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ |
170 | #define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ | 181 | #define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ |
182 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ | ||
183 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) | ||
184 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) | ||
185 | #define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ | ||
186 | #define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) | ||
187 | #define DWC_CFGL_LOCK_BUS_XACT (2 << 14) | ||
188 | #define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ | ||
189 | #define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ | ||
190 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | ||
191 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | ||
171 | #define DWC_CFGL_MAX_BURST(x) ((x) << 20) | 192 | #define DWC_CFGL_MAX_BURST(x) ((x) << 20) |
172 | #define DWC_CFGL_RELOAD_SAR (1 << 30) | 193 | #define DWC_CFGL_RELOAD_SAR (1 << 30) |
173 | #define DWC_CFGL_RELOAD_DAR (1 << 31) | 194 | #define DWC_CFGL_RELOAD_DAR (1 << 31) |
174 | 195 | ||
175 | /* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */ | 196 | /* Bitfields in CFG_HI */ |
197 | #define DWC_CFGH_FCMODE (1 << 0) | ||
198 | #define DWC_CFGH_FIFO_MODE (1 << 1) | ||
199 | #define DWC_CFGH_PROTCTL(x) ((x) << 2) | ||
176 | #define DWC_CFGH_DS_UPD_EN (1 << 5) | 200 | #define DWC_CFGH_DS_UPD_EN (1 << 5) |
177 | #define DWC_CFGH_SS_UPD_EN (1 << 6) | 201 | #define DWC_CFGH_SS_UPD_EN (1 << 6) |
202 | #define DWC_CFGH_SRC_PER(x) ((x) << 7) | ||
203 | #define DWC_CFGH_DST_PER(x) ((x) << 11) | ||
178 | 204 | ||
179 | /* Bitfields in SGR */ | 205 | /* Bitfields in SGR */ |
180 | #define DWC_SGR_SGI(x) ((x) << 0) | 206 | #define DWC_SGR_SGI(x) ((x) << 0) |
@@ -221,9 +247,10 @@ struct dw_dma_chan { | |||
221 | bool nollp; | 247 | bool nollp; |
222 | 248 | ||
223 | /* custom slave configuration */ | 249 | /* custom slave configuration */ |
224 | unsigned int request_line; | 250 | u8 src_id; |
225 | unsigned char src_master; | 251 | u8 dst_id; |
226 | unsigned char dst_master; | 252 | u8 src_master; |
253 | u8 dst_master; | ||
227 | 254 | ||
228 | /* configuration passed via DMA_SLAVE_CONFIG */ | 255 | /* configuration passed via DMA_SLAVE_CONFIG */ |
229 | struct dma_slave_config dma_sconfig; | 256 | struct dma_slave_config dma_sconfig; |
@@ -250,11 +277,11 @@ struct dw_dma { | |||
250 | void __iomem *regs; | 277 | void __iomem *regs; |
251 | struct dma_pool *desc_pool; | 278 | struct dma_pool *desc_pool; |
252 | struct tasklet_struct tasklet; | 279 | struct tasklet_struct tasklet; |
253 | struct clk *clk; | ||
254 | 280 | ||
255 | /* channels */ | 281 | /* channels */ |
256 | struct dw_dma_chan *chan; | 282 | struct dw_dma_chan *chan; |
257 | u8 all_chan_mask; | 283 | u8 all_chan_mask; |
284 | u8 in_use; | ||
258 | 285 | ||
259 | /* hardware configuration */ | 286 | /* hardware configuration */ |
260 | unsigned char nr_masters; | 287 | unsigned char nr_masters; |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 7b65633f495e..123f578d6dd3 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -288,7 +288,7 @@ static int edma_slave_config(struct edma_chan *echan, | |||
288 | static int edma_dma_pause(struct edma_chan *echan) | 288 | static int edma_dma_pause(struct edma_chan *echan) |
289 | { | 289 | { |
290 | /* Pause/Resume only allowed with cyclic mode */ | 290 | /* Pause/Resume only allowed with cyclic mode */ |
291 | if (!echan->edesc->cyclic) | 291 | if (!echan->edesc || !echan->edesc->cyclic) |
292 | return -EINVAL; | 292 | return -EINVAL; |
293 | 293 | ||
294 | edma_pause(echan->ch_num); | 294 | edma_pause(echan->ch_num); |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index d5d6885ab341..994bcb2c6b92 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/of_address.h> | 36 | #include <linux/of_address.h> |
37 | #include <linux/of_irq.h> | 37 | #include <linux/of_irq.h> |
38 | #include <linux/of_platform.h> | 38 | #include <linux/of_platform.h> |
39 | 39 | #include <linux/fsldma.h> | |
40 | #include "dmaengine.h" | 40 | #include "dmaengine.h" |
41 | #include "fsldma.h" | 41 | #include "fsldma.h" |
42 | 42 | ||
@@ -367,6 +367,20 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) | |||
367 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 367 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
368 | } | 368 | } |
369 | 369 | ||
370 | int fsl_dma_external_start(struct dma_chan *dchan, int enable) | ||
371 | { | ||
372 | struct fsldma_chan *chan; | ||
373 | |||
374 | if (!dchan) | ||
375 | return -EINVAL; | ||
376 | |||
377 | chan = to_fsl_chan(dchan); | ||
378 | |||
379 | fsl_chan_toggle_ext_start(chan, enable); | ||
380 | return 0; | ||
381 | } | ||
382 | EXPORT_SYMBOL_GPL(fsl_dma_external_start); | ||
383 | |||
370 | static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | 384 | static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
371 | { | 385 | { |
372 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); | 386 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); |
@@ -998,15 +1012,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
998 | chan->set_request_count(chan, size); | 1012 | chan->set_request_count(chan, size); |
999 | return 0; | 1013 | return 0; |
1000 | 1014 | ||
1001 | case FSLDMA_EXTERNAL_START: | ||
1002 | |||
1003 | /* make sure the channel supports external start */ | ||
1004 | if (!chan->toggle_ext_start) | ||
1005 | return -ENXIO; | ||
1006 | |||
1007 | chan->toggle_ext_start(chan, arg); | ||
1008 | return 0; | ||
1009 | |||
1010 | default: | 1015 | default: |
1011 | return -ENXIO; | 1016 | return -ENXIO; |
1012 | } | 1017 | } |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f7626e37d0b8..88afc48c2ca7 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1334,7 +1334,7 @@ err_firmware: | |||
1334 | release_firmware(fw); | 1334 | release_firmware(fw); |
1335 | } | 1335 | } |
1336 | 1336 | ||
1337 | static int __init sdma_get_firmware(struct sdma_engine *sdma, | 1337 | static int sdma_get_firmware(struct sdma_engine *sdma, |
1338 | const char *fw_name) | 1338 | const char *fw_name) |
1339 | { | 1339 | { |
1340 | int ret; | 1340 | int ret; |
@@ -1448,7 +1448,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, | |||
1448 | return dma_request_channel(mask, sdma_filter_fn, &data); | 1448 | return dma_request_channel(mask, sdma_filter_fn, &data); |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | static int __init sdma_probe(struct platform_device *pdev) | 1451 | static int sdma_probe(struct platform_device *pdev) |
1452 | { | 1452 | { |
1453 | const struct of_device_id *of_id = | 1453 | const struct of_device_id *of_id = |
1454 | of_match_device(sdma_dt_ids, &pdev->dev); | 1454 | of_match_device(sdma_dt_ids, &pdev->dev); |
@@ -1603,6 +1603,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1603 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | 1603 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
1604 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | 1604 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); |
1605 | 1605 | ||
1606 | platform_set_drvdata(pdev, sdma); | ||
1607 | |||
1606 | ret = dma_async_device_register(&sdma->dma_device); | 1608 | ret = dma_async_device_register(&sdma->dma_device); |
1607 | if (ret) { | 1609 | if (ret) { |
1608 | dev_err(&pdev->dev, "unable to register\n"); | 1610 | dev_err(&pdev->dev, "unable to register\n"); |
@@ -1640,7 +1642,27 @@ err_irq: | |||
1640 | 1642 | ||
1641 | static int sdma_remove(struct platform_device *pdev) | 1643 | static int sdma_remove(struct platform_device *pdev) |
1642 | { | 1644 | { |
1643 | return -EBUSY; | 1645 | struct sdma_engine *sdma = platform_get_drvdata(pdev); |
1646 | struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1647 | int irq = platform_get_irq(pdev, 0); | ||
1648 | int i; | ||
1649 | |||
1650 | dma_async_device_unregister(&sdma->dma_device); | ||
1651 | kfree(sdma->script_addrs); | ||
1652 | free_irq(irq, sdma); | ||
1653 | iounmap(sdma->regs); | ||
1654 | release_mem_region(iores->start, resource_size(iores)); | ||
1655 | /* Kill the tasklet */ | ||
1656 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
1657 | struct sdma_channel *sdmac = &sdma->channel[i]; | ||
1658 | |||
1659 | tasklet_kill(&sdmac->tasklet); | ||
1660 | } | ||
1661 | kfree(sdma); | ||
1662 | |||
1663 | platform_set_drvdata(pdev, NULL); | ||
1664 | dev_info(&pdev->dev, "Removed...\n"); | ||
1665 | return 0; | ||
1644 | } | 1666 | } |
1645 | 1667 | ||
1646 | static struct platform_driver sdma_driver = { | 1668 | static struct platform_driver sdma_driver = { |
@@ -1650,13 +1672,10 @@ static struct platform_driver sdma_driver = { | |||
1650 | }, | 1672 | }, |
1651 | .id_table = sdma_devtypes, | 1673 | .id_table = sdma_devtypes, |
1652 | .remove = sdma_remove, | 1674 | .remove = sdma_remove, |
1675 | .probe = sdma_probe, | ||
1653 | }; | 1676 | }; |
1654 | 1677 | ||
1655 | static int __init sdma_module_init(void) | 1678 | module_platform_driver(sdma_driver); |
1656 | { | ||
1657 | return platform_driver_probe(&sdma_driver, sdma_probe); | ||
1658 | } | ||
1659 | module_init(sdma_module_init); | ||
1660 | 1679 | ||
1661 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | 1680 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); |
1662 | MODULE_DESCRIPTION("i.MX SDMA driver"); | 1681 | MODULE_DESCRIPTION("i.MX SDMA driver"); |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 6ad30e2c5038..c6bd015b7165 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -148,10 +148,16 @@ static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) | |||
148 | tdmac->reg_base + TDCR); | 148 | tdmac->reg_base + TDCR); |
149 | } | 149 | } |
150 | 150 | ||
151 | static void mmp_tdma_enable_irq(struct mmp_tdma_chan *tdmac, bool enable) | ||
152 | { | ||
153 | if (enable) | ||
154 | writel(TDIMR_COMP, tdmac->reg_base + TDIMR); | ||
155 | else | ||
156 | writel(0, tdmac->reg_base + TDIMR); | ||
157 | } | ||
158 | |||
151 | static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) | 159 | static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) |
152 | { | 160 | { |
153 | /* enable irq */ | ||
154 | writel(TDIMR_COMP, tdmac->reg_base + TDIMR); | ||
155 | /* enable dma chan */ | 161 | /* enable dma chan */ |
156 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, | 162 | writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, |
157 | tdmac->reg_base + TDCR); | 163 | tdmac->reg_base + TDCR); |
@@ -163,9 +169,6 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | |||
163 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | 169 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, |
164 | tdmac->reg_base + TDCR); | 170 | tdmac->reg_base + TDCR); |
165 | 171 | ||
166 | /* disable irq */ | ||
167 | writel(0, tdmac->reg_base + TDIMR); | ||
168 | |||
169 | tdmac->status = DMA_COMPLETE; | 172 | tdmac->status = DMA_COMPLETE; |
170 | } | 173 | } |
171 | 174 | ||
@@ -434,6 +437,10 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( | |||
434 | i++; | 437 | i++; |
435 | } | 438 | } |
436 | 439 | ||
440 | /* enable interrupt */ | ||
441 | if (flags & DMA_PREP_INTERRUPT) | ||
442 | mmp_tdma_enable_irq(tdmac, true); | ||
443 | |||
437 | tdmac->buf_len = buf_len; | 444 | tdmac->buf_len = buf_len; |
438 | tdmac->period_len = period_len; | 445 | tdmac->period_len = period_len; |
439 | tdmac->pos = 0; | 446 | tdmac->pos = 0; |
@@ -455,6 +462,8 @@ static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
455 | switch (cmd) { | 462 | switch (cmd) { |
456 | case DMA_TERMINATE_ALL: | 463 | case DMA_TERMINATE_ALL: |
457 | mmp_tdma_disable_chan(tdmac); | 464 | mmp_tdma_disable_chan(tdmac); |
465 | /* disable interrupt */ | ||
466 | mmp_tdma_enable_irq(tdmac, false); | ||
458 | break; | 467 | break; |
459 | case DMA_PAUSE: | 468 | case DMA_PAUSE: |
460 | mmp_tdma_pause_chan(tdmac); | 469 | mmp_tdma_pause_chan(tdmac); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7938272f2edf..a63837ca1410 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -45,19 +45,18 @@ static void mv_xor_issue_pending(struct dma_chan *chan); | |||
45 | #define mv_chan_to_devp(chan) \ | 45 | #define mv_chan_to_devp(chan) \ |
46 | ((chan)->dmadev.dev) | 46 | ((chan)->dmadev.dev) |
47 | 47 | ||
48 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | 48 | static void mv_desc_init(struct mv_xor_desc_slot *desc, |
49 | dma_addr_t addr, u32 byte_count, | ||
50 | enum dma_ctrl_flags flags) | ||
49 | { | 51 | { |
50 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 52 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
51 | 53 | ||
52 | hw_desc->status = (1 << 31); | 54 | hw_desc->status = XOR_DESC_DMA_OWNED; |
53 | hw_desc->phy_next_desc = 0; | 55 | hw_desc->phy_next_desc = 0; |
54 | hw_desc->desc_command = (1 << 31); | 56 | /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ |
55 | } | 57 | hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? |
56 | 58 | XOR_DESC_EOD_INT_EN : 0; | |
57 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 59 | hw_desc->phy_dest_addr = addr; |
58 | u32 byte_count) | ||
59 | { | ||
60 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
61 | hw_desc->byte_count = byte_count; | 60 | hw_desc->byte_count = byte_count; |
62 | } | 61 | } |
63 | 62 | ||
@@ -75,20 +74,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |||
75 | hw_desc->phy_next_desc = 0; | 74 | hw_desc->phy_next_desc = 0; |
76 | } | 75 | } |
77 | 76 | ||
78 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | ||
79 | dma_addr_t addr) | ||
80 | { | ||
81 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
82 | hw_desc->phy_dest_addr = addr; | ||
83 | } | ||
84 | |||
85 | static int mv_chan_memset_slot_count(size_t len) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) | ||
91 | |||
92 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | 77 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, |
93 | int index, dma_addr_t addr) | 78 | int index, dma_addr_t addr) |
94 | { | 79 | { |
@@ -123,17 +108,12 @@ static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |||
123 | return intr_cause; | 108 | return intr_cause; |
124 | } | 109 | } |
125 | 110 | ||
126 | static int mv_is_err_intr(u32 intr_cause) | ||
127 | { | ||
128 | if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) | ||
129 | return 1; | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | 111 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) |
135 | { | 112 | { |
136 | u32 val = ~(1 << (chan->idx * 16)); | 113 | u32 val; |
114 | |||
115 | val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; | ||
116 | val = ~(val << (chan->idx * 16)); | ||
137 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); | 117 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
138 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); | 118 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
139 | } | 119 | } |
@@ -144,17 +124,6 @@ static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | |||
144 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); | 124 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
145 | } | 125 | } |
146 | 126 | ||
147 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | ||
148 | { | ||
149 | struct mv_xor_desc_slot *chain_old_tail = list_entry( | ||
150 | desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); | ||
151 | |||
152 | if (chain_old_tail->type != desc->type) | ||
153 | return 0; | ||
154 | |||
155 | return 1; | ||
156 | } | ||
157 | |||
158 | static void mv_set_mode(struct mv_xor_chan *chan, | 127 | static void mv_set_mode(struct mv_xor_chan *chan, |
159 | enum dma_transaction_type type) | 128 | enum dma_transaction_type type) |
160 | { | 129 | { |
@@ -206,11 +175,6 @@ static char mv_chan_is_busy(struct mv_xor_chan *chan) | |||
206 | return (state == 1) ? 1 : 0; | 175 | return (state == 1) ? 1 : 0; |
207 | } | 176 | } |
208 | 177 | ||
209 | static int mv_chan_xor_slot_count(size_t len, int src_cnt) | ||
210 | { | ||
211 | return 1; | ||
212 | } | ||
213 | |||
214 | /** | 178 | /** |
215 | * mv_xor_free_slots - flags descriptor slots for reuse | 179 | * mv_xor_free_slots - flags descriptor slots for reuse |
216 | * @slot: Slot to free | 180 | * @slot: Slot to free |
@@ -222,7 +186,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | |||
222 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", | 186 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", |
223 | __func__, __LINE__, slot); | 187 | __func__, __LINE__, slot); |
224 | 188 | ||
225 | slot->slots_per_op = 0; | 189 | slot->slot_used = 0; |
226 | 190 | ||
227 | } | 191 | } |
228 | 192 | ||
@@ -236,13 +200,11 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |||
236 | { | 200 | { |
237 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", | 201 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", |
238 | __func__, __LINE__, sw_desc); | 202 | __func__, __LINE__, sw_desc); |
239 | if (sw_desc->type != mv_chan->current_type) | ||
240 | mv_set_mode(mv_chan, sw_desc->type); | ||
241 | 203 | ||
242 | /* set the hardware chain */ | 204 | /* set the hardware chain */ |
243 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | 205 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); |
244 | 206 | ||
245 | mv_chan->pending += sw_desc->slot_cnt; | 207 | mv_chan->pending++; |
246 | mv_xor_issue_pending(&mv_chan->dmachan); | 208 | mv_xor_issue_pending(&mv_chan->dmachan); |
247 | } | 209 | } |
248 | 210 | ||
@@ -263,8 +225,6 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
263 | desc->async_tx.callback_param); | 225 | desc->async_tx.callback_param); |
264 | 226 | ||
265 | dma_descriptor_unmap(&desc->async_tx); | 227 | dma_descriptor_unmap(&desc->async_tx); |
266 | if (desc->group_head) | ||
267 | desc->group_head = NULL; | ||
268 | } | 228 | } |
269 | 229 | ||
270 | /* run dependent operations */ | 230 | /* run dependent operations */ |
@@ -377,19 +337,16 @@ static void mv_xor_tasklet(unsigned long data) | |||
377 | } | 337 | } |
378 | 338 | ||
379 | static struct mv_xor_desc_slot * | 339 | static struct mv_xor_desc_slot * |
380 | mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, | 340 | mv_xor_alloc_slot(struct mv_xor_chan *mv_chan) |
381 | int slots_per_op) | ||
382 | { | 341 | { |
383 | struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; | 342 | struct mv_xor_desc_slot *iter, *_iter; |
384 | LIST_HEAD(chain); | 343 | int retry = 0; |
385 | int slots_found, retry = 0; | ||
386 | 344 | ||
387 | /* start search from the last allocated descrtiptor | 345 | /* start search from the last allocated descrtiptor |
388 | * if a contiguous allocation can not be found start searching | 346 | * if a contiguous allocation can not be found start searching |
389 | * from the beginning of the list | 347 | * from the beginning of the list |
390 | */ | 348 | */ |
391 | retry: | 349 | retry: |
392 | slots_found = 0; | ||
393 | if (retry == 0) | 350 | if (retry == 0) |
394 | iter = mv_chan->last_used; | 351 | iter = mv_chan->last_used; |
395 | else | 352 | else |
@@ -399,55 +356,29 @@ retry: | |||
399 | 356 | ||
400 | list_for_each_entry_safe_continue( | 357 | list_for_each_entry_safe_continue( |
401 | iter, _iter, &mv_chan->all_slots, slot_node) { | 358 | iter, _iter, &mv_chan->all_slots, slot_node) { |
359 | |||
402 | prefetch(_iter); | 360 | prefetch(_iter); |
403 | prefetch(&_iter->async_tx); | 361 | prefetch(&_iter->async_tx); |
404 | if (iter->slots_per_op) { | 362 | if (iter->slot_used) { |
405 | /* give up after finding the first busy slot | 363 | /* give up after finding the first busy slot |
406 | * on the second pass through the list | 364 | * on the second pass through the list |
407 | */ | 365 | */ |
408 | if (retry) | 366 | if (retry) |
409 | break; | 367 | break; |
410 | |||
411 | slots_found = 0; | ||
412 | continue; | 368 | continue; |
413 | } | 369 | } |
414 | 370 | ||
415 | /* start the allocation if the slot is correctly aligned */ | 371 | /* pre-ack descriptor */ |
416 | if (!slots_found++) | 372 | async_tx_ack(&iter->async_tx); |
417 | alloc_start = iter; | 373 | |
418 | 374 | iter->slot_used = 1; | |
419 | if (slots_found == num_slots) { | 375 | INIT_LIST_HEAD(&iter->chain_node); |
420 | struct mv_xor_desc_slot *alloc_tail = NULL; | 376 | iter->async_tx.cookie = -EBUSY; |
421 | struct mv_xor_desc_slot *last_used = NULL; | 377 | mv_chan->last_used = iter; |
422 | iter = alloc_start; | 378 | mv_desc_clear_next_desc(iter); |
423 | while (num_slots) { | 379 | |
424 | int i; | 380 | return iter; |
425 | 381 | ||
426 | /* pre-ack all but the last descriptor */ | ||
427 | async_tx_ack(&iter->async_tx); | ||
428 | |||
429 | list_add_tail(&iter->chain_node, &chain); | ||
430 | alloc_tail = iter; | ||
431 | iter->async_tx.cookie = 0; | ||
432 | iter->slot_cnt = num_slots; | ||
433 | iter->xor_check_result = NULL; | ||
434 | for (i = 0; i < slots_per_op; i++) { | ||
435 | iter->slots_per_op = slots_per_op - i; | ||
436 | last_used = iter; | ||
437 | iter = list_entry(iter->slot_node.next, | ||
438 | struct mv_xor_desc_slot, | ||
439 | slot_node); | ||
440 | } | ||
441 | num_slots -= slots_per_op; | ||
442 | } | ||
443 | alloc_tail->group_head = alloc_start; | ||
444 | alloc_tail->async_tx.cookie = -EBUSY; | ||
445 | list_splice(&chain, &alloc_tail->tx_list); | ||
446 | mv_chan->last_used = last_used; | ||
447 | mv_desc_clear_next_desc(alloc_start); | ||
448 | mv_desc_clear_next_desc(alloc_tail); | ||
449 | return alloc_tail; | ||
450 | } | ||
451 | } | 382 | } |
452 | if (!retry++) | 383 | if (!retry++) |
453 | goto retry; | 384 | goto retry; |
@@ -464,7 +395,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
464 | { | 395 | { |
465 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | 396 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); |
466 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | 397 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); |
467 | struct mv_xor_desc_slot *grp_start, *old_chain_tail; | 398 | struct mv_xor_desc_slot *old_chain_tail; |
468 | dma_cookie_t cookie; | 399 | dma_cookie_t cookie; |
469 | int new_hw_chain = 1; | 400 | int new_hw_chain = 1; |
470 | 401 | ||
@@ -472,30 +403,24 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
472 | "%s sw_desc %p: async_tx %p\n", | 403 | "%s sw_desc %p: async_tx %p\n", |
473 | __func__, sw_desc, &sw_desc->async_tx); | 404 | __func__, sw_desc, &sw_desc->async_tx); |
474 | 405 | ||
475 | grp_start = sw_desc->group_head; | ||
476 | |||
477 | spin_lock_bh(&mv_chan->lock); | 406 | spin_lock_bh(&mv_chan->lock); |
478 | cookie = dma_cookie_assign(tx); | 407 | cookie = dma_cookie_assign(tx); |
479 | 408 | ||
480 | if (list_empty(&mv_chan->chain)) | 409 | if (list_empty(&mv_chan->chain)) |
481 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); | 410 | list_add_tail(&sw_desc->chain_node, &mv_chan->chain); |
482 | else { | 411 | else { |
483 | new_hw_chain = 0; | 412 | new_hw_chain = 0; |
484 | 413 | ||
485 | old_chain_tail = list_entry(mv_chan->chain.prev, | 414 | old_chain_tail = list_entry(mv_chan->chain.prev, |
486 | struct mv_xor_desc_slot, | 415 | struct mv_xor_desc_slot, |
487 | chain_node); | 416 | chain_node); |
488 | list_splice_init(&grp_start->tx_list, | 417 | list_add_tail(&sw_desc->chain_node, &mv_chan->chain); |
489 | &old_chain_tail->chain_node); | ||
490 | |||
491 | if (!mv_can_chain(grp_start)) | ||
492 | goto submit_done; | ||
493 | 418 | ||
494 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", | 419 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
495 | &old_chain_tail->async_tx.phys); | 420 | &old_chain_tail->async_tx.phys); |
496 | 421 | ||
497 | /* fix up the hardware chain */ | 422 | /* fix up the hardware chain */ |
498 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | 423 | mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); |
499 | 424 | ||
500 | /* if the channel is not busy */ | 425 | /* if the channel is not busy */ |
501 | if (!mv_chan_is_busy(mv_chan)) { | 426 | if (!mv_chan_is_busy(mv_chan)) { |
@@ -510,9 +435,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
510 | } | 435 | } |
511 | 436 | ||
512 | if (new_hw_chain) | 437 | if (new_hw_chain) |
513 | mv_xor_start_new_chain(mv_chan, grp_start); | 438 | mv_xor_start_new_chain(mv_chan, sw_desc); |
514 | 439 | ||
515 | submit_done: | ||
516 | spin_unlock_bh(&mv_chan->lock); | 440 | spin_unlock_bh(&mv_chan->lock); |
517 | 441 | ||
518 | return cookie; | 442 | return cookie; |
@@ -533,8 +457,9 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
533 | while (idx < num_descs_in_pool) { | 457 | while (idx < num_descs_in_pool) { |
534 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | 458 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); |
535 | if (!slot) { | 459 | if (!slot) { |
536 | printk(KERN_INFO "MV XOR Channel only initialized" | 460 | dev_info(mv_chan_to_devp(mv_chan), |
537 | " %d descriptor slots", idx); | 461 | "channel only initialized %d descriptor slots", |
462 | idx); | ||
538 | break; | 463 | break; |
539 | } | 464 | } |
540 | virt_desc = mv_chan->dma_desc_pool_virt; | 465 | virt_desc = mv_chan->dma_desc_pool_virt; |
@@ -544,7 +469,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
544 | slot->async_tx.tx_submit = mv_xor_tx_submit; | 469 | slot->async_tx.tx_submit = mv_xor_tx_submit; |
545 | INIT_LIST_HEAD(&slot->chain_node); | 470 | INIT_LIST_HEAD(&slot->chain_node); |
546 | INIT_LIST_HEAD(&slot->slot_node); | 471 | INIT_LIST_HEAD(&slot->slot_node); |
547 | INIT_LIST_HEAD(&slot->tx_list); | ||
548 | dma_desc = mv_chan->dma_desc_pool; | 472 | dma_desc = mv_chan->dma_desc_pool; |
549 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | 473 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; |
550 | slot->idx = idx++; | 474 | slot->idx = idx++; |
@@ -568,51 +492,11 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
568 | } | 492 | } |
569 | 493 | ||
570 | static struct dma_async_tx_descriptor * | 494 | static struct dma_async_tx_descriptor * |
571 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
572 | size_t len, unsigned long flags) | ||
573 | { | ||
574 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
575 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
576 | int slot_cnt; | ||
577 | |||
578 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
579 | "%s dest: %pad src %pad len: %u flags: %ld\n", | ||
580 | __func__, &dest, &src, len, flags); | ||
581 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
582 | return NULL; | ||
583 | |||
584 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | ||
585 | |||
586 | spin_lock_bh(&mv_chan->lock); | ||
587 | slot_cnt = mv_chan_memcpy_slot_count(len); | ||
588 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
589 | if (sw_desc) { | ||
590 | sw_desc->type = DMA_MEMCPY; | ||
591 | sw_desc->async_tx.flags = flags; | ||
592 | grp_start = sw_desc->group_head; | ||
593 | mv_desc_init(grp_start, flags); | ||
594 | mv_desc_set_byte_count(grp_start, len); | ||
595 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
596 | mv_desc_set_src_addr(grp_start, 0, src); | ||
597 | sw_desc->unmap_src_cnt = 1; | ||
598 | sw_desc->unmap_len = len; | ||
599 | } | ||
600 | spin_unlock_bh(&mv_chan->lock); | ||
601 | |||
602 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
603 | "%s sw_desc %p async_tx %p\n", | ||
604 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL); | ||
605 | |||
606 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
607 | } | ||
608 | |||
609 | static struct dma_async_tx_descriptor * | ||
610 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 495 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
611 | unsigned int src_cnt, size_t len, unsigned long flags) | 496 | unsigned int src_cnt, size_t len, unsigned long flags) |
612 | { | 497 | { |
613 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 498 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
614 | struct mv_xor_desc_slot *sw_desc, *grp_start; | 499 | struct mv_xor_desc_slot *sw_desc; |
615 | int slot_cnt; | ||
616 | 500 | ||
617 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 501 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
618 | return NULL; | 502 | return NULL; |
@@ -624,20 +508,13 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
624 | __func__, src_cnt, len, &dest, flags); | 508 | __func__, src_cnt, len, &dest, flags); |
625 | 509 | ||
626 | spin_lock_bh(&mv_chan->lock); | 510 | spin_lock_bh(&mv_chan->lock); |
627 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | 511 | sw_desc = mv_xor_alloc_slot(mv_chan); |
628 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
629 | if (sw_desc) { | 512 | if (sw_desc) { |
630 | sw_desc->type = DMA_XOR; | 513 | sw_desc->type = DMA_XOR; |
631 | sw_desc->async_tx.flags = flags; | 514 | sw_desc->async_tx.flags = flags; |
632 | grp_start = sw_desc->group_head; | 515 | mv_desc_init(sw_desc, dest, len, flags); |
633 | mv_desc_init(grp_start, flags); | ||
634 | /* the byte count field is the same as in memcpy desc*/ | ||
635 | mv_desc_set_byte_count(grp_start, len); | ||
636 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
637 | sw_desc->unmap_src_cnt = src_cnt; | ||
638 | sw_desc->unmap_len = len; | ||
639 | while (src_cnt--) | 516 | while (src_cnt--) |
640 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | 517 | mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); |
641 | } | 518 | } |
642 | spin_unlock_bh(&mv_chan->lock); | 519 | spin_unlock_bh(&mv_chan->lock); |
643 | dev_dbg(mv_chan_to_devp(mv_chan), | 520 | dev_dbg(mv_chan_to_devp(mv_chan), |
@@ -646,6 +523,35 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
646 | return sw_desc ? &sw_desc->async_tx : NULL; | 523 | return sw_desc ? &sw_desc->async_tx : NULL; |
647 | } | 524 | } |
648 | 525 | ||
526 | static struct dma_async_tx_descriptor * | ||
527 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
528 | size_t len, unsigned long flags) | ||
529 | { | ||
530 | /* | ||
531 | * A MEMCPY operation is identical to an XOR operation with only | ||
532 | * a single source address. | ||
533 | */ | ||
534 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | ||
535 | } | ||
536 | |||
537 | static struct dma_async_tx_descriptor * | ||
538 | mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | ||
539 | { | ||
540 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
541 | dma_addr_t src, dest; | ||
542 | size_t len; | ||
543 | |||
544 | src = mv_chan->dummy_src_addr; | ||
545 | dest = mv_chan->dummy_dst_addr; | ||
546 | len = MV_XOR_MIN_BYTE_COUNT; | ||
547 | |||
548 | /* | ||
549 | * We implement the DMA_INTERRUPT operation as a minimum sized | ||
550 | * XOR operation with a single dummy source address. | ||
551 | */ | ||
552 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | ||
553 | } | ||
554 | |||
649 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | 555 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
650 | { | 556 | { |
651 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 557 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
@@ -733,18 +639,16 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |||
733 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | 639 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, |
734 | u32 intr_cause) | 640 | u32 intr_cause) |
735 | { | 641 | { |
736 | if (intr_cause & (1 << 4)) { | 642 | if (intr_cause & XOR_INT_ERR_DECODE) { |
737 | dev_dbg(mv_chan_to_devp(chan), | 643 | dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); |
738 | "ignore this error\n"); | 644 | return; |
739 | return; | ||
740 | } | 645 | } |
741 | 646 | ||
742 | dev_err(mv_chan_to_devp(chan), | 647 | dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", |
743 | "error on chan %d. intr cause 0x%08x\n", | ||
744 | chan->idx, intr_cause); | 648 | chan->idx, intr_cause); |
745 | 649 | ||
746 | mv_dump_xor_regs(chan); | 650 | mv_dump_xor_regs(chan); |
747 | BUG(); | 651 | WARN_ON(1); |
748 | } | 652 | } |
749 | 653 | ||
750 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | 654 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) |
@@ -754,7 +658,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |||
754 | 658 | ||
755 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); | 659 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); |
756 | 660 | ||
757 | if (mv_is_err_intr(intr_cause)) | 661 | if (intr_cause & XOR_INTR_ERRORS) |
758 | mv_xor_err_interrupt_handler(chan, intr_cause); | 662 | mv_xor_err_interrupt_handler(chan, intr_cause); |
759 | 663 | ||
760 | tasklet_schedule(&chan->irq_tasklet); | 664 | tasklet_schedule(&chan->irq_tasklet); |
@@ -1041,6 +945,10 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) | |||
1041 | 945 | ||
1042 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, | 946 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1043 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); | 947 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
948 | dma_unmap_single(dev, mv_chan->dummy_src_addr, | ||
949 | MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | ||
950 | dma_unmap_single(dev, mv_chan->dummy_dst_addr, | ||
951 | MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | ||
1044 | 952 | ||
1045 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, | 953 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
1046 | device_node) { | 954 | device_node) { |
@@ -1070,6 +978,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1070 | 978 | ||
1071 | dma_dev = &mv_chan->dmadev; | 979 | dma_dev = &mv_chan->dmadev; |
1072 | 980 | ||
981 | /* | ||
982 | * These source and destination dummy buffers are used to implement | ||
983 | * a DMA_INTERRUPT operation as a minimum-sized XOR operation. | ||
984 | * Hence, we only need to map the buffers at initialization-time. | ||
985 | */ | ||
986 | mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, | ||
987 | mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | ||
988 | mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, | ||
989 | mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | ||
990 | |||
1073 | /* allocate coherent memory for hardware descriptors | 991 | /* allocate coherent memory for hardware descriptors |
1074 | * note: writecombine gives slightly better performance, but | 992 | * note: writecombine gives slightly better performance, but |
1075 | * requires that we explicitly flush the writes | 993 | * requires that we explicitly flush the writes |
@@ -1094,6 +1012,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1094 | dma_dev->dev = &pdev->dev; | 1012 | dma_dev->dev = &pdev->dev; |
1095 | 1013 | ||
1096 | /* set prep routines based on capability */ | 1014 | /* set prep routines based on capability */ |
1015 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) | ||
1016 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | ||
1097 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1017 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1098 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1018 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1099 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1019 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
@@ -1116,7 +1036,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1116 | 1036 | ||
1117 | mv_chan_unmask_interrupts(mv_chan); | 1037 | mv_chan_unmask_interrupts(mv_chan); |
1118 | 1038 | ||
1119 | mv_set_mode(mv_chan, DMA_MEMCPY); | 1039 | mv_set_mode(mv_chan, DMA_XOR); |
1120 | 1040 | ||
1121 | spin_lock_init(&mv_chan->lock); | 1041 | spin_lock_init(&mv_chan->lock); |
1122 | INIT_LIST_HEAD(&mv_chan->chain); | 1042 | INIT_LIST_HEAD(&mv_chan->chain); |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index d0749229c875..78edc7e44569 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -23,17 +23,22 @@ | |||
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | 25 | ||
26 | #define USE_TIMER | ||
27 | #define MV_XOR_POOL_SIZE PAGE_SIZE | 26 | #define MV_XOR_POOL_SIZE PAGE_SIZE |
28 | #define MV_XOR_SLOT_SIZE 64 | 27 | #define MV_XOR_SLOT_SIZE 64 |
29 | #define MV_XOR_THRESHOLD 1 | 28 | #define MV_XOR_THRESHOLD 1 |
30 | #define MV_XOR_MAX_CHANNELS 2 | 29 | #define MV_XOR_MAX_CHANNELS 2 |
31 | 30 | ||
31 | #define MV_XOR_MIN_BYTE_COUNT SZ_128 | ||
32 | #define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1) | ||
33 | |||
32 | /* Values for the XOR_CONFIG register */ | 34 | /* Values for the XOR_CONFIG register */ |
33 | #define XOR_OPERATION_MODE_XOR 0 | 35 | #define XOR_OPERATION_MODE_XOR 0 |
34 | #define XOR_OPERATION_MODE_MEMCPY 2 | 36 | #define XOR_OPERATION_MODE_MEMCPY 2 |
35 | #define XOR_DESCRIPTOR_SWAP BIT(14) | 37 | #define XOR_DESCRIPTOR_SWAP BIT(14) |
36 | 38 | ||
39 | #define XOR_DESC_DMA_OWNED BIT(31) | ||
40 | #define XOR_DESC_EOD_INT_EN BIT(31) | ||
41 | |||
37 | #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) | 42 | #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) |
38 | #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) | 43 | #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) |
39 | #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) | 44 | #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) |
@@ -48,7 +53,24 @@ | |||
48 | #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) | 53 | #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) |
49 | #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) | 54 | #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) |
50 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) | 55 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) |
51 | #define XOR_INTR_MASK_VALUE 0x3F5 | 56 | |
57 | #define XOR_INT_END_OF_DESC BIT(0) | ||
58 | #define XOR_INT_END_OF_CHAIN BIT(1) | ||
59 | #define XOR_INT_STOPPED BIT(2) | ||
60 | #define XOR_INT_PAUSED BIT(3) | ||
61 | #define XOR_INT_ERR_DECODE BIT(4) | ||
62 | #define XOR_INT_ERR_RDPROT BIT(5) | ||
63 | #define XOR_INT_ERR_WRPROT BIT(6) | ||
64 | #define XOR_INT_ERR_OWN BIT(7) | ||
65 | #define XOR_INT_ERR_PAR BIT(8) | ||
66 | #define XOR_INT_ERR_MBUS BIT(9) | ||
67 | |||
68 | #define XOR_INTR_ERRORS (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \ | ||
69 | XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN | \ | ||
70 | XOR_INT_ERR_PAR | XOR_INT_ERR_MBUS) | ||
71 | |||
72 | #define XOR_INTR_MASK_VALUE (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \ | ||
73 | XOR_INT_STOPPED | XOR_INTR_ERRORS) | ||
52 | 74 | ||
53 | #define WINDOW_BASE(w) (0x50 + ((w) << 2)) | 75 | #define WINDOW_BASE(w) (0x50 + ((w) << 2)) |
54 | #define WINDOW_SIZE(w) (0x70 + ((w) << 2)) | 76 | #define WINDOW_SIZE(w) (0x70 + ((w) << 2)) |
@@ -97,10 +119,9 @@ struct mv_xor_chan { | |||
97 | struct list_head all_slots; | 119 | struct list_head all_slots; |
98 | int slots_allocated; | 120 | int slots_allocated; |
99 | struct tasklet_struct irq_tasklet; | 121 | struct tasklet_struct irq_tasklet; |
100 | #ifdef USE_TIMER | 122 | char dummy_src[MV_XOR_MIN_BYTE_COUNT]; |
101 | unsigned long cleanup_time; | 123 | char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; |
102 | u32 current_on_last_cleanup; | 124 | dma_addr_t dummy_src_addr, dummy_dst_addr; |
103 | #endif | ||
104 | }; | 125 | }; |
105 | 126 | ||
106 | /** | 127 | /** |
@@ -110,16 +131,10 @@ struct mv_xor_chan { | |||
110 | * @completed_node: node on the mv_xor_chan.completed_slots list | 131 | * @completed_node: node on the mv_xor_chan.completed_slots list |
111 | * @hw_desc: virtual address of the hardware descriptor chain | 132 | * @hw_desc: virtual address of the hardware descriptor chain |
112 | * @phys: hardware address of the hardware descriptor chain | 133 | * @phys: hardware address of the hardware descriptor chain |
113 | * @group_head: first operation in a transaction | 134 | * @slot_used: slot in use or not |
114 | * @slot_cnt: total slots used in an transaction (group of operations) | ||
115 | * @slots_per_op: number of slots per operation | ||
116 | * @idx: pool index | 135 | * @idx: pool index |
117 | * @unmap_src_cnt: number of xor sources | ||
118 | * @unmap_len: transaction bytecount | ||
119 | * @tx_list: list of slots that make up a multi-descriptor transaction | 136 | * @tx_list: list of slots that make up a multi-descriptor transaction |
120 | * @async_tx: support for the async_tx api | 137 | * @async_tx: support for the async_tx api |
121 | * @xor_check_result: result of zero sum | ||
122 | * @crc32_result: result crc calculation | ||
123 | */ | 138 | */ |
124 | struct mv_xor_desc_slot { | 139 | struct mv_xor_desc_slot { |
125 | struct list_head slot_node; | 140 | struct list_head slot_node; |
@@ -127,23 +142,9 @@ struct mv_xor_desc_slot { | |||
127 | struct list_head completed_node; | 142 | struct list_head completed_node; |
128 | enum dma_transaction_type type; | 143 | enum dma_transaction_type type; |
129 | void *hw_desc; | 144 | void *hw_desc; |
130 | struct mv_xor_desc_slot *group_head; | 145 | u16 slot_used; |
131 | u16 slot_cnt; | ||
132 | u16 slots_per_op; | ||
133 | u16 idx; | 146 | u16 idx; |
134 | u16 unmap_src_cnt; | ||
135 | u32 value; | ||
136 | size_t unmap_len; | ||
137 | struct list_head tx_list; | ||
138 | struct dma_async_tx_descriptor async_tx; | 147 | struct dma_async_tx_descriptor async_tx; |
139 | union { | ||
140 | u32 *xor_check_result; | ||
141 | u32 *crc32_result; | ||
142 | }; | ||
143 | #ifdef USE_TIMER | ||
144 | unsigned long arrival_time; | ||
145 | struct timer_list timeout; | ||
146 | #endif | ||
147 | }; | 148 | }; |
148 | 149 | ||
149 | /* | 150 | /* |
@@ -189,9 +190,4 @@ struct mv_xor_desc { | |||
189 | #define mv_hw_desc_slot_idx(hw_desc, idx) \ | 190 | #define mv_hw_desc_slot_idx(hw_desc, idx) \ |
190 | ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) | 191 | ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) |
191 | 192 | ||
192 | #define MV_XOR_MIN_BYTE_COUNT (128) | ||
193 | #define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1) | ||
194 | #define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT | ||
195 | |||
196 | |||
197 | #endif | 193 | #endif |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index d5149aacd2fe..4839bfa74a10 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -1367,17 +1367,10 @@ static int pl330_submit_req(struct pl330_thread *thrd, | |||
1367 | struct pl330_dmac *pl330 = thrd->dmac; | 1367 | struct pl330_dmac *pl330 = thrd->dmac; |
1368 | struct _xfer_spec xs; | 1368 | struct _xfer_spec xs; |
1369 | unsigned long flags; | 1369 | unsigned long flags; |
1370 | void __iomem *regs; | ||
1371 | unsigned idx; | 1370 | unsigned idx; |
1372 | u32 ccr; | 1371 | u32 ccr; |
1373 | int ret = 0; | 1372 | int ret = 0; |
1374 | 1373 | ||
1375 | /* No Req or Unacquired Channel or DMAC */ | ||
1376 | if (!desc || !thrd || thrd->free) | ||
1377 | return -EINVAL; | ||
1378 | |||
1379 | regs = thrd->dmac->base; | ||
1380 | |||
1381 | if (pl330->state == DYING | 1374 | if (pl330->state == DYING |
1382 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | 1375 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { |
1383 | dev_info(thrd->dmac->ddma.dev, "%s:%d\n", | 1376 | dev_info(thrd->dmac->ddma.dev, "%s:%d\n", |
@@ -2755,8 +2748,10 @@ probe_err3: | |||
2755 | list_del(&pch->chan.device_node); | 2748 | list_del(&pch->chan.device_node); |
2756 | 2749 | ||
2757 | /* Flush the channel */ | 2750 | /* Flush the channel */ |
2758 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | 2751 | if (pch->thread) { |
2759 | pl330_free_chan_resources(&pch->chan); | 2752 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
2753 | pl330_free_chan_resources(&pch->chan); | ||
2754 | } | ||
2760 | } | 2755 | } |
2761 | probe_err2: | 2756 | probe_err2: |
2762 | pl330_del(pl330); | 2757 | pl330_del(pl330); |
@@ -2782,8 +2777,10 @@ static int pl330_remove(struct amba_device *adev) | |||
2782 | list_del(&pch->chan.device_node); | 2777 | list_del(&pch->chan.device_node); |
2783 | 2778 | ||
2784 | /* Flush the channel */ | 2779 | /* Flush the channel */ |
2785 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | 2780 | if (pch->thread) { |
2786 | pl330_free_chan_resources(&pch->chan); | 2781 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); |
2782 | pl330_free_chan_resources(&pch->chan); | ||
2783 | } | ||
2787 | } | 2784 | } |
2788 | 2785 | ||
2789 | pl330_del(pl330); | 2786 | pl330_del(pl330); |
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index dabbf0aba2e9..80fd2aeb4870 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c | |||
@@ -117,7 +117,7 @@ static void audmapp_start_xfer(struct shdma_chan *schan, | |||
117 | audmapp_write(auchan, chcr, PDMACHCR); | 117 | audmapp_write(auchan, chcr, PDMACHCR); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id, | 120 | static int audmapp_get_config(struct audmapp_chan *auchan, int slave_id, |
121 | u32 *chcr, dma_addr_t *dst) | 121 | u32 *chcr, dma_addr_t *dst) |
122 | { | 122 | { |
123 | struct audmapp_device *audev = to_dev(auchan); | 123 | struct audmapp_device *audev = to_dev(auchan); |
@@ -131,20 +131,22 @@ static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id, | |||
131 | if (!pdata) { /* DT */ | 131 | if (!pdata) { /* DT */ |
132 | *chcr = ((u32)slave_id) << 16; | 132 | *chcr = ((u32)slave_id) << 16; |
133 | auchan->shdma_chan.slave_id = (slave_id) >> 8; | 133 | auchan->shdma_chan.slave_id = (slave_id) >> 8; |
134 | return; | 134 | return 0; |
135 | } | 135 | } |
136 | 136 | ||
137 | /* non-DT */ | 137 | /* non-DT */ |
138 | 138 | ||
139 | if (slave_id >= AUDMAPP_SLAVE_NUMBER) | 139 | if (slave_id >= AUDMAPP_SLAVE_NUMBER) |
140 | return; | 140 | return -ENXIO; |
141 | 141 | ||
142 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | 142 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
143 | if (cfg->slave_id == slave_id) { | 143 | if (cfg->slave_id == slave_id) { |
144 | *chcr = cfg->chcr; | 144 | *chcr = cfg->chcr; |
145 | *dst = cfg->dst; | 145 | *dst = cfg->dst; |
146 | break; | 146 | return 0; |
147 | } | 147 | } |
148 | |||
149 | return -ENXIO; | ||
148 | } | 150 | } |
149 | 151 | ||
150 | static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, | 152 | static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, |
@@ -153,8 +155,11 @@ static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, | |||
153 | struct audmapp_chan *auchan = to_chan(schan); | 155 | struct audmapp_chan *auchan = to_chan(schan); |
154 | u32 chcr; | 156 | u32 chcr; |
155 | dma_addr_t dst; | 157 | dma_addr_t dst; |
158 | int ret; | ||
156 | 159 | ||
157 | audmapp_get_config(auchan, slave_id, &chcr, &dst); | 160 | ret = audmapp_get_config(auchan, slave_id, &chcr, &dst); |
161 | if (ret < 0) | ||
162 | return ret; | ||
158 | 163 | ||
159 | if (try) | 164 | if (try) |
160 | return 0; | 165 | return 0; |
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 1f92a56fd2b6..3aa10b328254 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -862,7 +862,6 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
862 | { | 862 | { |
863 | struct sun6i_dma_dev *sdc; | 863 | struct sun6i_dma_dev *sdc; |
864 | struct resource *res; | 864 | struct resource *res; |
865 | struct clk *mux, *pll6; | ||
866 | int ret, i; | 865 | int ret, i; |
867 | 866 | ||
868 | sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); | 867 | sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); |
@@ -886,28 +885,6 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
886 | return PTR_ERR(sdc->clk); | 885 | return PTR_ERR(sdc->clk); |
887 | } | 886 | } |
888 | 887 | ||
889 | mux = clk_get(NULL, "ahb1_mux"); | ||
890 | if (IS_ERR(mux)) { | ||
891 | dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n"); | ||
892 | return PTR_ERR(mux); | ||
893 | } | ||
894 | |||
895 | pll6 = clk_get(NULL, "pll6"); | ||
896 | if (IS_ERR(pll6)) { | ||
897 | dev_err(&pdev->dev, "Couldn't get PLL6\n"); | ||
898 | clk_put(mux); | ||
899 | return PTR_ERR(pll6); | ||
900 | } | ||
901 | |||
902 | ret = clk_set_parent(mux, pll6); | ||
903 | clk_put(pll6); | ||
904 | clk_put(mux); | ||
905 | |||
906 | if (ret) { | ||
907 | dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n"); | ||
908 | return ret; | ||
909 | } | ||
910 | |||
911 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); | 888 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); |
912 | if (IS_ERR(sdc->rstc)) { | 889 | if (IS_ERR(sdc->rstc)) { |
913 | dev_err(&pdev->dev, "No reset controller specified\n"); | 890 | dev_err(&pdev->dev, "No reset controller specified\n"); |
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 42a13e8d4607..a6e64767186e 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -1365,7 +1365,6 @@ static const struct of_device_id xilinx_vdma_of_ids[] = { | |||
1365 | static struct platform_driver xilinx_vdma_driver = { | 1365 | static struct platform_driver xilinx_vdma_driver = { |
1366 | .driver = { | 1366 | .driver = { |
1367 | .name = "xilinx-vdma", | 1367 | .name = "xilinx-vdma", |
1368 | .owner = THIS_MODULE, | ||
1369 | .of_match_table = xilinx_vdma_of_ids, | 1368 | .of_match_table = xilinx_vdma_of_ids, |
1370 | }, | 1369 | }, |
1371 | .probe = xilinx_vdma_probe, | 1370 | .probe = xilinx_vdma_probe, |
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c index 83315dfeef62..7696a873510d 100644 --- a/drivers/media/platform/soc_camera/mx3_camera.c +++ b/drivers/media/platform/soc_camera/mx3_camera.c | |||
@@ -415,10 +415,8 @@ static void mx3_stop_streaming(struct vb2_queue *q) | |||
415 | struct mx3_camera_buffer *buf, *tmp; | 415 | struct mx3_camera_buffer *buf, *tmp; |
416 | unsigned long flags; | 416 | unsigned long flags; |
417 | 417 | ||
418 | if (ichan) { | 418 | if (ichan) |
419 | struct dma_chan *chan = &ichan->dma_chan; | 419 | dmaengine_pause(&ichan->dma_chan); |
420 | chan->device->device_control(chan, DMA_PAUSE, 0); | ||
421 | } | ||
422 | 420 | ||
423 | spin_lock_irqsave(&mx3_cam->lock, flags); | 421 | spin_lock_irqsave(&mx3_cam->lock, flags); |
424 | 422 | ||
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c index 7e97e53f9ff2..339b252fcedd 100644 --- a/drivers/misc/carma/carma-fpga-program.c +++ b/drivers/misc/carma/carma-fpga-program.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/completion.h> | 16 | #include <linux/completion.h> |
17 | #include <linux/miscdevice.h> | 17 | #include <linux/miscdevice.h> |
18 | #include <linux/dmaengine.h> | 18 | #include <linux/dmaengine.h> |
19 | #include <linux/fsldma.h> | ||
19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
20 | #include <linux/highmem.h> | 21 | #include <linux/highmem.h> |
21 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
@@ -518,23 +519,22 @@ static noinline int fpga_program_dma(struct fpga_dev *priv) | |||
518 | config.direction = DMA_MEM_TO_DEV; | 519 | config.direction = DMA_MEM_TO_DEV; |
519 | config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 520 | config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
520 | config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; | 521 | config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; |
521 | ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, | 522 | ret = dmaengine_slave_config(chan, &config); |
522 | (unsigned long)&config); | ||
523 | if (ret) { | 523 | if (ret) { |
524 | dev_err(priv->dev, "DMA slave configuration failed\n"); | 524 | dev_err(priv->dev, "DMA slave configuration failed\n"); |
525 | goto out_dma_unmap; | 525 | goto out_dma_unmap; |
526 | } | 526 | } |
527 | 527 | ||
528 | ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1); | 528 | ret = fsl_dma_external_start(chan, 1) |
529 | if (ret) { | 529 | if (ret) { |
530 | dev_err(priv->dev, "DMA external control setup failed\n"); | 530 | dev_err(priv->dev, "DMA external control setup failed\n"); |
531 | goto out_dma_unmap; | 531 | goto out_dma_unmap; |
532 | } | 532 | } |
533 | 533 | ||
534 | /* setup and submit the DMA transaction */ | 534 | /* setup and submit the DMA transaction */ |
535 | tx = chan->device->device_prep_dma_sg(chan, | 535 | |
536 | table.sgl, num_pages, | 536 | tx = dmaengine_prep_dma_sg(chan, table.sgl, num_pages, |
537 | vb->sglist, vb->sglen, 0); | 537 | vb->sglist, vb->sglen, 0); |
538 | if (!tx) { | 538 | if (!tx) { |
539 | dev_err(priv->dev, "Unable to prep DMA transaction\n"); | 539 | dev_err(priv->dev, "Unable to prep DMA transaction\n"); |
540 | ret = -ENOMEM; | 540 | ret = -ENOMEM; |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 1550692973dc..7a915870d9d6 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
@@ -605,7 +605,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
605 | wait_for_completion_timeout(&host->dma_access_complete, | 605 | wait_for_completion_timeout(&host->dma_access_complete, |
606 | msecs_to_jiffies(3000)); | 606 | msecs_to_jiffies(3000)); |
607 | if (ret <= 0) { | 607 | if (ret <= 0) { |
608 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 608 | dmaengine_terminate_all(chan); |
609 | dev_err(host->dev, "wait_for_completion_timeout\n"); | 609 | dev_err(host->dev, "wait_for_completion_timeout\n"); |
610 | if (!ret) | 610 | if (!ret) |
611 | ret = -ETIMEDOUT; | 611 | ret = -ETIMEDOUT; |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index c0670237e7a2..0ed7c603298f 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
@@ -395,7 +395,7 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, | |||
395 | msecs_to_jiffies(3000)); | 395 | msecs_to_jiffies(3000)); |
396 | 396 | ||
397 | if (ret <= 0) { | 397 | if (ret <= 0) { |
398 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 398 | dmaengine_terminate_all(chan); |
399 | dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); | 399 | dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); |
400 | } | 400 | } |
401 | 401 | ||
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 822616e3c375..0c33b92a5a81 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c | |||
@@ -875,13 +875,11 @@ static void ks8842_stop_dma(struct ks8842_adapter *adapter) | |||
875 | 875 | ||
876 | tx_ctl->adesc = NULL; | 876 | tx_ctl->adesc = NULL; |
877 | if (tx_ctl->chan) | 877 | if (tx_ctl->chan) |
878 | tx_ctl->chan->device->device_control(tx_ctl->chan, | 878 | dmaengine_terminate_all(tx_ctl->chan); |
879 | DMA_TERMINATE_ALL, 0); | ||
880 | 879 | ||
881 | rx_ctl->adesc = NULL; | 880 | rx_ctl->adesc = NULL; |
882 | if (rx_ctl->chan) | 881 | if (rx_ctl->chan) |
883 | rx_ctl->chan->device->device_control(rx_ctl->chan, | 882 | dmaengine_terminate_all(rx_ctl->chan); |
884 | DMA_TERMINATE_ALL, 0); | ||
885 | 883 | ||
886 | if (sg_dma_address(&rx_ctl->sg)) | 884 | if (sg_dma_address(&rx_ctl->sg)) |
887 | dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), | 885 | dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), |
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index c41ff148a2b4..62a9297e96ac 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c | |||
@@ -157,7 +157,6 @@ static struct dma_async_tx_descriptor * | |||
157 | pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, | 157 | pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, |
158 | enum dma_transfer_direction dir) | 158 | enum dma_transfer_direction dir) |
159 | { | 159 | { |
160 | struct pxa2xx_spi_master *pdata = drv_data->master_info; | ||
161 | struct chip_data *chip = drv_data->cur_chip; | 160 | struct chip_data *chip = drv_data->cur_chip; |
162 | enum dma_slave_buswidth width; | 161 | enum dma_slave_buswidth width; |
163 | struct dma_slave_config cfg; | 162 | struct dma_slave_config cfg; |
@@ -184,7 +183,6 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, | |||
184 | cfg.dst_addr = drv_data->ssdr_physical; | 183 | cfg.dst_addr = drv_data->ssdr_physical; |
185 | cfg.dst_addr_width = width; | 184 | cfg.dst_addr_width = width; |
186 | cfg.dst_maxburst = chip->dma_burst_size; | 185 | cfg.dst_maxburst = chip->dma_burst_size; |
187 | cfg.slave_id = pdata->tx_slave_id; | ||
188 | 186 | ||
189 | sgt = &drv_data->tx_sgt; | 187 | sgt = &drv_data->tx_sgt; |
190 | nents = drv_data->tx_nents; | 188 | nents = drv_data->tx_nents; |
@@ -193,7 +191,6 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, | |||
193 | cfg.src_addr = drv_data->ssdr_physical; | 191 | cfg.src_addr = drv_data->ssdr_physical; |
194 | cfg.src_addr_width = width; | 192 | cfg.src_addr_width = width; |
195 | cfg.src_maxburst = chip->dma_burst_size; | 193 | cfg.src_maxburst = chip->dma_burst_size; |
196 | cfg.slave_id = pdata->rx_slave_id; | ||
197 | 194 | ||
198 | sgt = &drv_data->rx_sgt; | 195 | sgt = &drv_data->rx_sgt; |
199 | nents = drv_data->rx_nents; | 196 | nents = drv_data->rx_nents; |
@@ -210,14 +207,6 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, | |||
210 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 207 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
211 | } | 208 | } |
212 | 209 | ||
213 | static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param) | ||
214 | { | ||
215 | const struct pxa2xx_spi_master *pdata = param; | ||
216 | |||
217 | return chan->chan_id == pdata->tx_chan_id || | ||
218 | chan->chan_id == pdata->rx_chan_id; | ||
219 | } | ||
220 | |||
221 | bool pxa2xx_spi_dma_is_possible(size_t len) | 210 | bool pxa2xx_spi_dma_is_possible(size_t len) |
222 | { | 211 | { |
223 | return len <= MAX_DMA_LEN; | 212 | return len <= MAX_DMA_LEN; |
@@ -321,12 +310,12 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data) | |||
321 | return -ENOMEM; | 310 | return -ENOMEM; |
322 | 311 | ||
323 | drv_data->tx_chan = dma_request_slave_channel_compat(mask, | 312 | drv_data->tx_chan = dma_request_slave_channel_compat(mask, |
324 | pxa2xx_spi_dma_filter, pdata, dev, "tx"); | 313 | pdata->dma_filter, pdata->tx_param, dev, "tx"); |
325 | if (!drv_data->tx_chan) | 314 | if (!drv_data->tx_chan) |
326 | return -ENODEV; | 315 | return -ENODEV; |
327 | 316 | ||
328 | drv_data->rx_chan = dma_request_slave_channel_compat(mask, | 317 | drv_data->rx_chan = dma_request_slave_channel_compat(mask, |
329 | pxa2xx_spi_dma_filter, pdata, dev, "rx"); | 318 | pdata->dma_filter, pdata->rx_param, dev, "rx"); |
330 | if (!drv_data->rx_chan) { | 319 | if (!drv_data->rx_chan) { |
331 | dma_release_channel(drv_data->tx_chan); | 320 | dma_release_channel(drv_data->tx_chan); |
332 | drv_data->tx_chan = NULL; | 321 | drv_data->tx_chan = NULL; |
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 536c863bebf1..6beee8ce2d68 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c | |||
@@ -10,42 +10,87 @@ | |||
10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
11 | #include <linux/clk-provider.h> | 11 | #include <linux/clk-provider.h> |
12 | 12 | ||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/platform_data/dma-dw.h> | ||
15 | |||
13 | enum { | 16 | enum { |
14 | PORT_CE4100, | 17 | PORT_CE4100, |
15 | PORT_BYT, | 18 | PORT_BYT, |
19 | PORT_BSW0, | ||
20 | PORT_BSW1, | ||
21 | PORT_BSW2, | ||
16 | }; | 22 | }; |
17 | 23 | ||
18 | struct pxa_spi_info { | 24 | struct pxa_spi_info { |
19 | enum pxa_ssp_type type; | 25 | enum pxa_ssp_type type; |
20 | int port_id; | 26 | int port_id; |
21 | int num_chipselect; | 27 | int num_chipselect; |
22 | int tx_slave_id; | ||
23 | int tx_chan_id; | ||
24 | int rx_slave_id; | ||
25 | int rx_chan_id; | ||
26 | unsigned long max_clk_rate; | 28 | unsigned long max_clk_rate; |
29 | |||
30 | /* DMA channel request parameters */ | ||
31 | void *tx_param; | ||
32 | void *rx_param; | ||
27 | }; | 33 | }; |
28 | 34 | ||
35 | static struct dw_dma_slave byt_tx_param = { .dst_id = 0 }; | ||
36 | static struct dw_dma_slave byt_rx_param = { .src_id = 1 }; | ||
37 | |||
38 | static struct dw_dma_slave bsw0_tx_param = { .dst_id = 0 }; | ||
39 | static struct dw_dma_slave bsw0_rx_param = { .src_id = 1 }; | ||
40 | static struct dw_dma_slave bsw1_tx_param = { .dst_id = 6 }; | ||
41 | static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 }; | ||
42 | static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 }; | ||
43 | static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 }; | ||
44 | |||
45 | static bool lpss_dma_filter(struct dma_chan *chan, void *param) | ||
46 | { | ||
47 | struct dw_dma_slave *dws = param; | ||
48 | |||
49 | if (dws->dma_dev != chan->device->dev) | ||
50 | return false; | ||
51 | |||
52 | chan->private = dws; | ||
53 | return true; | ||
54 | } | ||
55 | |||
29 | static struct pxa_spi_info spi_info_configs[] = { | 56 | static struct pxa_spi_info spi_info_configs[] = { |
30 | [PORT_CE4100] = { | 57 | [PORT_CE4100] = { |
31 | .type = PXA25x_SSP, | 58 | .type = PXA25x_SSP, |
32 | .port_id = -1, | 59 | .port_id = -1, |
33 | .num_chipselect = -1, | 60 | .num_chipselect = -1, |
34 | .tx_slave_id = -1, | ||
35 | .tx_chan_id = -1, | ||
36 | .rx_slave_id = -1, | ||
37 | .rx_chan_id = -1, | ||
38 | .max_clk_rate = 3686400, | 61 | .max_clk_rate = 3686400, |
39 | }, | 62 | }, |
40 | [PORT_BYT] = { | 63 | [PORT_BYT] = { |
41 | .type = LPSS_SSP, | 64 | .type = LPSS_SSP, |
42 | .port_id = 0, | 65 | .port_id = 0, |
43 | .num_chipselect = 1, | 66 | .num_chipselect = 1, |
44 | .tx_slave_id = 0, | ||
45 | .tx_chan_id = 0, | ||
46 | .rx_slave_id = 1, | ||
47 | .rx_chan_id = 1, | ||
48 | .max_clk_rate = 50000000, | 67 | .max_clk_rate = 50000000, |
68 | .tx_param = &byt_tx_param, | ||
69 | .rx_param = &byt_rx_param, | ||
70 | }, | ||
71 | [PORT_BSW0] = { | ||
72 | .type = LPSS_SSP, | ||
73 | .port_id = 0, | ||
74 | .num_chipselect = 1, | ||
75 | .max_clk_rate = 50000000, | ||
76 | .tx_param = &bsw0_tx_param, | ||
77 | .rx_param = &bsw0_rx_param, | ||
78 | }, | ||
79 | [PORT_BSW1] = { | ||
80 | .type = LPSS_SSP, | ||
81 | .port_id = 1, | ||
82 | .num_chipselect = 1, | ||
83 | .max_clk_rate = 50000000, | ||
84 | .tx_param = &bsw1_tx_param, | ||
85 | .rx_param = &bsw1_rx_param, | ||
86 | }, | ||
87 | [PORT_BSW2] = { | ||
88 | .type = LPSS_SSP, | ||
89 | .port_id = 2, | ||
90 | .num_chipselect = 1, | ||
91 | .max_clk_rate = 50000000, | ||
92 | .tx_param = &bsw2_tx_param, | ||
93 | .rx_param = &bsw2_rx_param, | ||
49 | }, | 94 | }, |
50 | }; | 95 | }; |
51 | 96 | ||
@@ -59,6 +104,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, | |||
59 | struct ssp_device *ssp; | 104 | struct ssp_device *ssp; |
60 | struct pxa_spi_info *c; | 105 | struct pxa_spi_info *c; |
61 | char buf[40]; | 106 | char buf[40]; |
107 | struct pci_dev *dma_dev; | ||
62 | 108 | ||
63 | ret = pcim_enable_device(dev); | 109 | ret = pcim_enable_device(dev); |
64 | if (ret) | 110 | if (ret) |
@@ -73,11 +119,29 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, | |||
73 | memset(&spi_pdata, 0, sizeof(spi_pdata)); | 119 | memset(&spi_pdata, 0, sizeof(spi_pdata)); |
74 | spi_pdata.num_chipselect = (c->num_chipselect > 0) ? | 120 | spi_pdata.num_chipselect = (c->num_chipselect > 0) ? |
75 | c->num_chipselect : dev->devfn; | 121 | c->num_chipselect : dev->devfn; |
76 | spi_pdata.tx_slave_id = c->tx_slave_id; | 122 | |
77 | spi_pdata.tx_chan_id = c->tx_chan_id; | 123 | dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); |
78 | spi_pdata.rx_slave_id = c->rx_slave_id; | 124 | |
79 | spi_pdata.rx_chan_id = c->rx_chan_id; | 125 | if (c->tx_param) { |
80 | spi_pdata.enable_dma = c->rx_slave_id >= 0 && c->tx_slave_id >= 0; | 126 | struct dw_dma_slave *slave = c->tx_param; |
127 | |||
128 | slave->dma_dev = &dma_dev->dev; | ||
129 | slave->src_master = 1; | ||
130 | slave->dst_master = 0; | ||
131 | } | ||
132 | |||
133 | if (c->rx_param) { | ||
134 | struct dw_dma_slave *slave = c->rx_param; | ||
135 | |||
136 | slave->dma_dev = &dma_dev->dev; | ||
137 | slave->src_master = 1; | ||
138 | slave->dst_master = 0; | ||
139 | } | ||
140 | |||
141 | spi_pdata.dma_filter = lpss_dma_filter; | ||
142 | spi_pdata.tx_param = c->tx_param; | ||
143 | spi_pdata.rx_param = c->rx_param; | ||
144 | spi_pdata.enable_dma = c->rx_param && c->tx_param; | ||
81 | 145 | ||
82 | ssp = &spi_pdata.ssp; | 146 | ssp = &spi_pdata.ssp; |
83 | ssp->phys_base = pci_resource_start(dev, 0); | 147 | ssp->phys_base = pci_resource_start(dev, 0); |
@@ -128,6 +192,9 @@ static void pxa2xx_spi_pci_remove(struct pci_dev *dev) | |||
128 | static const struct pci_device_id pxa2xx_spi_pci_devices[] = { | 192 | static const struct pci_device_id pxa2xx_spi_pci_devices[] = { |
129 | { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 }, | 193 | { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 }, |
130 | { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT }, | 194 | { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT }, |
195 | { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 }, | ||
196 | { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 }, | ||
197 | { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 }, | ||
131 | { }, | 198 | { }, |
132 | }; | 199 | }; |
133 | MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); | 200 | MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 46f45ca2c694..d8a105f76837 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -1062,8 +1062,6 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) | |||
1062 | 1062 | ||
1063 | pdata->num_chipselect = 1; | 1063 | pdata->num_chipselect = 1; |
1064 | pdata->enable_dma = true; | 1064 | pdata->enable_dma = true; |
1065 | pdata->tx_chan_id = -1; | ||
1066 | pdata->rx_chan_id = -1; | ||
1067 | 1065 | ||
1068 | return pdata; | 1066 | return pdata; |
1069 | } | 1067 | } |
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 1bcb4b2141a6..cb51be55989e 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h | |||
@@ -16,13 +16,13 @@ | |||
16 | #include <linux/dmaengine.h> | 16 | #include <linux/dmaengine.h> |
17 | 17 | ||
18 | struct uart_8250_dma { | 18 | struct uart_8250_dma { |
19 | /* Filter function */ | ||
19 | dma_filter_fn fn; | 20 | dma_filter_fn fn; |
21 | |||
22 | /* Parameter to the filter function */ | ||
20 | void *rx_param; | 23 | void *rx_param; |
21 | void *tx_param; | 24 | void *tx_param; |
22 | 25 | ||
23 | int rx_chan_id; | ||
24 | int tx_chan_id; | ||
25 | |||
26 | struct dma_slave_config rxconf; | 26 | struct dma_slave_config rxconf; |
27 | struct dma_slave_config txconf; | 27 | struct dma_slave_config txconf; |
28 | 28 | ||
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 57d9df84ce5d..beea6ca73ee5 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -216,10 +216,7 @@ out: | |||
216 | 216 | ||
217 | static bool dw8250_dma_filter(struct dma_chan *chan, void *param) | 217 | static bool dw8250_dma_filter(struct dma_chan *chan, void *param) |
218 | { | 218 | { |
219 | struct dw8250_data *data = param; | 219 | return false; |
220 | |||
221 | return chan->chan_id == data->dma.tx_chan_id || | ||
222 | chan->chan_id == data->dma.rx_chan_id; | ||
223 | } | 220 | } |
224 | 221 | ||
225 | static void dw8250_setup_port(struct uart_8250_port *up) | 222 | static void dw8250_setup_port(struct uart_8250_port *up) |
@@ -399,8 +396,6 @@ static int dw8250_probe(struct platform_device *pdev) | |||
399 | if (!IS_ERR(data->rst)) | 396 | if (!IS_ERR(data->rst)) |
400 | reset_control_deassert(data->rst); | 397 | reset_control_deassert(data->rst); |
401 | 398 | ||
402 | data->dma.rx_chan_id = -1; | ||
403 | data->dma.tx_chan_id = -1; | ||
404 | data->dma.rx_param = data; | 399 | data->dma.rx_param = data; |
405 | data->dma.tx_param = data; | 400 | data->dma.tx_param = data; |
406 | data->dma.fn = dw8250_dma_filter; | 401 | data->dma.fn = dw8250_dma_filter; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 4f1cd296f1b1..beb9d71cd47a 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -25,6 +25,9 @@ | |||
25 | #include <asm/byteorder.h> | 25 | #include <asm/byteorder.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | 27 | ||
28 | #include <linux/dmaengine.h> | ||
29 | #include <linux/platform_data/dma-dw.h> | ||
30 | |||
28 | #include "8250.h" | 31 | #include "8250.h" |
29 | 32 | ||
30 | /* | 33 | /* |
@@ -1349,6 +1352,9 @@ ce4100_serial_setup(struct serial_private *priv, | |||
1349 | #define PCI_DEVICE_ID_INTEL_BYT_UART1 0x0f0a | 1352 | #define PCI_DEVICE_ID_INTEL_BYT_UART1 0x0f0a |
1350 | #define PCI_DEVICE_ID_INTEL_BYT_UART2 0x0f0c | 1353 | #define PCI_DEVICE_ID_INTEL_BYT_UART2 0x0f0c |
1351 | 1354 | ||
1355 | #define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a | ||
1356 | #define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c | ||
1357 | |||
1352 | #define BYT_PRV_CLK 0x800 | 1358 | #define BYT_PRV_CLK 0x800 |
1353 | #define BYT_PRV_CLK_EN (1 << 0) | 1359 | #define BYT_PRV_CLK_EN (1 << 0) |
1354 | #define BYT_PRV_CLK_M_VAL_SHIFT 1 | 1360 | #define BYT_PRV_CLK_M_VAL_SHIFT 1 |
@@ -1414,7 +1420,13 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios, | |||
1414 | 1420 | ||
1415 | static bool byt_dma_filter(struct dma_chan *chan, void *param) | 1421 | static bool byt_dma_filter(struct dma_chan *chan, void *param) |
1416 | { | 1422 | { |
1417 | return chan->chan_id == *(int *)param; | 1423 | struct dw_dma_slave *dws = param; |
1424 | |||
1425 | if (dws->dma_dev != chan->device->dev) | ||
1426 | return false; | ||
1427 | |||
1428 | chan->private = dws; | ||
1429 | return true; | ||
1418 | } | 1430 | } |
1419 | 1431 | ||
1420 | static int | 1432 | static int |
@@ -1422,35 +1434,57 @@ byt_serial_setup(struct serial_private *priv, | |||
1422 | const struct pciserial_board *board, | 1434 | const struct pciserial_board *board, |
1423 | struct uart_8250_port *port, int idx) | 1435 | struct uart_8250_port *port, int idx) |
1424 | { | 1436 | { |
1437 | struct pci_dev *pdev = priv->dev; | ||
1438 | struct device *dev = port->port.dev; | ||
1425 | struct uart_8250_dma *dma; | 1439 | struct uart_8250_dma *dma; |
1440 | struct dw_dma_slave *tx_param, *rx_param; | ||
1441 | struct pci_dev *dma_dev; | ||
1426 | int ret; | 1442 | int ret; |
1427 | 1443 | ||
1428 | dma = devm_kzalloc(port->port.dev, sizeof(*dma), GFP_KERNEL); | 1444 | dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); |
1429 | if (!dma) | 1445 | if (!dma) |
1430 | return -ENOMEM; | 1446 | return -ENOMEM; |
1431 | 1447 | ||
1432 | switch (priv->dev->device) { | 1448 | tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL); |
1449 | if (!tx_param) | ||
1450 | return -ENOMEM; | ||
1451 | |||
1452 | rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL); | ||
1453 | if (!rx_param) | ||
1454 | return -ENOMEM; | ||
1455 | |||
1456 | switch (pdev->device) { | ||
1433 | case PCI_DEVICE_ID_INTEL_BYT_UART1: | 1457 | case PCI_DEVICE_ID_INTEL_BYT_UART1: |
1434 | dma->rx_chan_id = 3; | 1458 | case PCI_DEVICE_ID_INTEL_BSW_UART1: |
1435 | dma->tx_chan_id = 2; | 1459 | rx_param->src_id = 3; |
1460 | tx_param->dst_id = 2; | ||
1436 | break; | 1461 | break; |
1437 | case PCI_DEVICE_ID_INTEL_BYT_UART2: | 1462 | case PCI_DEVICE_ID_INTEL_BYT_UART2: |
1438 | dma->rx_chan_id = 5; | 1463 | case PCI_DEVICE_ID_INTEL_BSW_UART2: |
1439 | dma->tx_chan_id = 4; | 1464 | rx_param->src_id = 5; |
1465 | tx_param->dst_id = 4; | ||
1440 | break; | 1466 | break; |
1441 | default: | 1467 | default: |
1442 | return -EINVAL; | 1468 | return -EINVAL; |
1443 | } | 1469 | } |
1444 | 1470 | ||
1445 | dma->rxconf.slave_id = dma->rx_chan_id; | 1471 | rx_param->src_master = 1; |
1472 | rx_param->dst_master = 0; | ||
1473 | |||
1446 | dma->rxconf.src_maxburst = 16; | 1474 | dma->rxconf.src_maxburst = 16; |
1447 | 1475 | ||
1448 | dma->txconf.slave_id = dma->tx_chan_id; | 1476 | tx_param->src_master = 1; |
1477 | tx_param->dst_master = 0; | ||
1478 | |||
1449 | dma->txconf.dst_maxburst = 16; | 1479 | dma->txconf.dst_maxburst = 16; |
1450 | 1480 | ||
1481 | dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); | ||
1482 | rx_param->dma_dev = &dma_dev->dev; | ||
1483 | tx_param->dma_dev = &dma_dev->dev; | ||
1484 | |||
1451 | dma->fn = byt_dma_filter; | 1485 | dma->fn = byt_dma_filter; |
1452 | dma->rx_param = &dma->rx_chan_id; | 1486 | dma->rx_param = rx_param; |
1453 | dma->tx_param = &dma->tx_chan_id; | 1487 | dma->tx_param = tx_param; |
1454 | 1488 | ||
1455 | ret = pci_default_setup(priv, board, port, idx); | 1489 | ret = pci_default_setup(priv, board, port, idx); |
1456 | port->port.iotype = UPIO_MEM; | 1490 | port->port.iotype = UPIO_MEM; |
@@ -1893,6 +1927,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
1893 | .subdevice = PCI_ANY_ID, | 1927 | .subdevice = PCI_ANY_ID, |
1894 | .setup = pci_default_setup, | 1928 | .setup = pci_default_setup, |
1895 | }, | 1929 | }, |
1930 | { | ||
1931 | .vendor = PCI_VENDOR_ID_INTEL, | ||
1932 | .device = PCI_DEVICE_ID_INTEL_BSW_UART1, | ||
1933 | .subvendor = PCI_ANY_ID, | ||
1934 | .subdevice = PCI_ANY_ID, | ||
1935 | .setup = byt_serial_setup, | ||
1936 | }, | ||
1937 | { | ||
1938 | .vendor = PCI_VENDOR_ID_INTEL, | ||
1939 | .device = PCI_DEVICE_ID_INTEL_BSW_UART2, | ||
1940 | .subvendor = PCI_ANY_ID, | ||
1941 | .subdevice = PCI_ANY_ID, | ||
1942 | .setup = byt_serial_setup, | ||
1943 | }, | ||
1896 | /* | 1944 | /* |
1897 | * ITE | 1945 | * ITE |
1898 | */ | 1946 | */ |
@@ -5192,6 +5240,14 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
5192 | PCI_ANY_ID, PCI_ANY_ID, | 5240 | PCI_ANY_ID, PCI_ANY_ID, |
5193 | PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000, | 5241 | PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000, |
5194 | pbn_byt }, | 5242 | pbn_byt }, |
5243 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW_UART1, | ||
5244 | PCI_ANY_ID, PCI_ANY_ID, | ||
5245 | PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000, | ||
5246 | pbn_byt }, | ||
5247 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW_UART2, | ||
5248 | PCI_ANY_ID, PCI_ANY_ID, | ||
5249 | PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000, | ||
5250 | pbn_byt }, | ||
5195 | 5251 | ||
5196 | /* | 5252 | /* |
5197 | * Intel Quark x1000 | 5253 | * Intel Quark x1000 |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index d7d4584549a5..edde3eca055d 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/of_device.h> | 37 | #include <linux/of_device.h> |
38 | #include <linux/of_gpio.h> | 38 | #include <linux/of_gpio.h> |
39 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
40 | #include <linux/dmaengine.h> | ||
40 | #include <linux/atmel_pdc.h> | 41 | #include <linux/atmel_pdc.h> |
41 | #include <linux/atmel_serial.h> | 42 | #include <linux/atmel_serial.h> |
42 | #include <linux/uaccess.h> | 43 | #include <linux/uaccess.h> |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 3081e46085ce..eb17c7124e72 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -1403,7 +1403,7 @@ static void work_fn_rx(struct work_struct *work) | |||
1403 | unsigned long flags; | 1403 | unsigned long flags; |
1404 | int count; | 1404 | int count; |
1405 | 1405 | ||
1406 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 1406 | dmaengine_terminate_all(chan); |
1407 | dev_dbg(port->dev, "Read %zu bytes with cookie %d\n", | 1407 | dev_dbg(port->dev, "Read %zu bytes with cookie %d\n", |
1408 | sh_desc->partial, sh_desc->cookie); | 1408 | sh_desc->partial, sh_desc->cookie); |
1409 | 1409 | ||
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c index 5e97baf92721..23ec781e9a61 100644 --- a/drivers/video/fbdev/mx3fb.c +++ b/drivers/video/fbdev/mx3fb.c | |||
@@ -461,8 +461,7 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi) | |||
461 | 461 | ||
462 | spin_unlock_irqrestore(&mx3fb->lock, flags); | 462 | spin_unlock_irqrestore(&mx3fb->lock, flags); |
463 | 463 | ||
464 | mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan, | 464 | dmaengine_terminate_all(mx3_fbi->txd->chan); |
465 | DMA_TERMINATE_ALL, 0); | ||
466 | mx3_fbi->txd = NULL; | 465 | mx3_fbi->txd = NULL; |
467 | mx3_fbi->cookie = -EINVAL; | 466 | mx3_fbi->cookie = -EINVAL; |
468 | } | 467 | } |
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h new file mode 100644 index 000000000000..71456442ebe3 --- /dev/null +++ b/include/linux/dma/dw.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Driver for the Synopsys DesignWare DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2007 Atmel Corporation | ||
5 | * Copyright (C) 2010-2011 ST Microelectronics | ||
6 | * Copyright (C) 2014 Intel Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #ifndef _DMA_DW_H | ||
13 | #define _DMA_DW_H | ||
14 | |||
15 | #include <linux/clk.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/dmaengine.h> | ||
18 | |||
19 | #include <linux/platform_data/dma-dw.h> | ||
20 | |||
21 | struct dw_dma; | ||
22 | |||
23 | /** | ||
24 | * struct dw_dma_chip - representation of DesignWare DMA controller hardware | ||
25 | * @dev: struct device of the DMA controller | ||
26 | * @irq: irq line | ||
27 | * @regs: memory mapped I/O space | ||
28 | * @clk: hclk clock | ||
29 | * @dw: struct dw_dma that is filed by dw_dma_probe() | ||
30 | */ | ||
31 | struct dw_dma_chip { | ||
32 | struct device *dev; | ||
33 | int irq; | ||
34 | void __iomem *regs; | ||
35 | struct clk *clk; | ||
36 | struct dw_dma *dw; | ||
37 | }; | ||
38 | |||
39 | /* Export to the platform drivers */ | ||
40 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); | ||
41 | int dw_dma_remove(struct dw_dma_chip *chip); | ||
42 | |||
43 | /* DMA API extensions */ | ||
44 | struct dw_desc; | ||
45 | |||
46 | struct dw_cyclic_desc { | ||
47 | struct dw_desc **desc; | ||
48 | unsigned long periods; | ||
49 | void (*period_callback)(void *param); | ||
50 | void *period_callback_param; | ||
51 | }; | ||
52 | |||
53 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | ||
54 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | ||
55 | enum dma_transfer_direction direction); | ||
56 | void dw_dma_cyclic_free(struct dma_chan *chan); | ||
57 | int dw_dma_cyclic_start(struct dma_chan *chan); | ||
58 | void dw_dma_cyclic_stop(struct dma_chan *chan); | ||
59 | |||
60 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); | ||
61 | |||
62 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); | ||
63 | |||
64 | #endif /* _DMA_DW_H */ | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 212c5b9ac106..653a1fd07ae8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -199,15 +199,12 @@ enum dma_ctrl_flags { | |||
199 | * configuration data in statically from the platform). An additional | 199 | * configuration data in statically from the platform). An additional |
200 | * argument of struct dma_slave_config must be passed in with this | 200 | * argument of struct dma_slave_config must be passed in with this |
201 | * command. | 201 | * command. |
202 | * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller | ||
203 | * into external start mode. | ||
204 | */ | 202 | */ |
205 | enum dma_ctrl_cmd { | 203 | enum dma_ctrl_cmd { |
206 | DMA_TERMINATE_ALL, | 204 | DMA_TERMINATE_ALL, |
207 | DMA_PAUSE, | 205 | DMA_PAUSE, |
208 | DMA_RESUME, | 206 | DMA_RESUME, |
209 | DMA_SLAVE_CONFIG, | 207 | DMA_SLAVE_CONFIG, |
210 | FSLDMA_EXTERNAL_START, | ||
211 | }; | 208 | }; |
212 | 209 | ||
213 | /** | 210 | /** |
@@ -307,7 +304,9 @@ enum dma_slave_buswidth { | |||
307 | * struct dma_slave_config - dma slave channel runtime config | 304 | * struct dma_slave_config - dma slave channel runtime config |
308 | * @direction: whether the data shall go in or out on this slave | 305 | * @direction: whether the data shall go in or out on this slave |
309 | * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are | 306 | * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are |
310 | * legal values. | 307 | * legal values. DEPRECATED, drivers should use the direction argument |
308 | * to the device_prep_slave_sg and device_prep_dma_cyclic functions or | ||
309 | * the dir field in the dma_interleaved_template structure. | ||
311 | * @src_addr: this is the physical address where DMA slave data | 310 | * @src_addr: this is the physical address where DMA slave data |
312 | * should be read (RX), if the source is memory this argument is | 311 | * should be read (RX), if the source is memory this argument is |
313 | * ignored. | 312 | * ignored. |
@@ -755,6 +754,16 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( | |||
755 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); | 754 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); |
756 | } | 755 | } |
757 | 756 | ||
757 | static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | ||
758 | struct dma_chan *chan, | ||
759 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
760 | struct scatterlist *src_sg, unsigned int src_nents, | ||
761 | unsigned long flags) | ||
762 | { | ||
763 | return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, | ||
764 | src_sg, src_nents, flags); | ||
765 | } | ||
766 | |||
758 | static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | 767 | static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) |
759 | { | 768 | { |
760 | if (!chan || !caps) | 769 | if (!chan || !caps) |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h deleted file mode 100644 index 68b4024184de..000000000000 --- a/include/linux/dw_dmac.h +++ /dev/null | |||
@@ -1,111 +0,0 @@ | |||
1 | /* | ||
2 | * Driver for the Synopsys DesignWare DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2007 Atmel Corporation | ||
5 | * Copyright (C) 2010-2011 ST Microelectronics | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef DW_DMAC_H | ||
12 | #define DW_DMAC_H | ||
13 | |||
14 | #include <linux/dmaengine.h> | ||
15 | |||
16 | /** | ||
17 | * struct dw_dma_slave - Controller-specific information about a slave | ||
18 | * | ||
19 | * @dma_dev: required DMA master device. Depricated. | ||
20 | * @bus_id: name of this device channel, not just a device name since | ||
21 | * devices may have more than one channel e.g. "foo_tx" | ||
22 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | ||
23 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | ||
24 | * @src_master: src master for transfers on allocated channel. | ||
25 | * @dst_master: dest master for transfers on allocated channel. | ||
26 | */ | ||
27 | struct dw_dma_slave { | ||
28 | struct device *dma_dev; | ||
29 | u32 cfg_hi; | ||
30 | u32 cfg_lo; | ||
31 | u8 src_master; | ||
32 | u8 dst_master; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct dw_dma_platform_data - Controller configuration parameters | ||
37 | * @nr_channels: Number of channels supported by hardware (max 8) | ||
38 | * @is_private: The device channels should be marked as private and not for | ||
39 | * by the general purpose DMA channel allocator. | ||
40 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | ||
41 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | ||
42 | * @block_size: Maximum block size supported by the controller | ||
43 | * @nr_masters: Number of AHB masters supported by the controller | ||
44 | * @data_width: Maximum data width supported by hardware per AHB master | ||
45 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | ||
46 | */ | ||
47 | struct dw_dma_platform_data { | ||
48 | unsigned int nr_channels; | ||
49 | bool is_private; | ||
50 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | ||
51 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | ||
52 | unsigned char chan_allocation_order; | ||
53 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | ||
54 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | ||
55 | unsigned char chan_priority; | ||
56 | unsigned short block_size; | ||
57 | unsigned char nr_masters; | ||
58 | unsigned char data_width[4]; | ||
59 | }; | ||
60 | |||
61 | /* bursts size */ | ||
62 | enum dw_dma_msize { | ||
63 | DW_DMA_MSIZE_1, | ||
64 | DW_DMA_MSIZE_4, | ||
65 | DW_DMA_MSIZE_8, | ||
66 | DW_DMA_MSIZE_16, | ||
67 | DW_DMA_MSIZE_32, | ||
68 | DW_DMA_MSIZE_64, | ||
69 | DW_DMA_MSIZE_128, | ||
70 | DW_DMA_MSIZE_256, | ||
71 | }; | ||
72 | |||
73 | /* Platform-configurable bits in CFG_HI */ | ||
74 | #define DWC_CFGH_FCMODE (1 << 0) | ||
75 | #define DWC_CFGH_FIFO_MODE (1 << 1) | ||
76 | #define DWC_CFGH_PROTCTL(x) ((x) << 2) | ||
77 | #define DWC_CFGH_SRC_PER(x) ((x) << 7) | ||
78 | #define DWC_CFGH_DST_PER(x) ((x) << 11) | ||
79 | |||
80 | /* Platform-configurable bits in CFG_LO */ | ||
81 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ | ||
82 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) | ||
83 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) | ||
84 | #define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ | ||
85 | #define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) | ||
86 | #define DWC_CFGL_LOCK_BUS_XACT (2 << 14) | ||
87 | #define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ | ||
88 | #define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ | ||
89 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | ||
90 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | ||
91 | |||
92 | /* DMA API extensions */ | ||
93 | struct dw_cyclic_desc { | ||
94 | struct dw_desc **desc; | ||
95 | unsigned long periods; | ||
96 | void (*period_callback)(void *param); | ||
97 | void *period_callback_param; | ||
98 | }; | ||
99 | |||
100 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | ||
101 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | ||
102 | enum dma_transfer_direction direction); | ||
103 | void dw_dma_cyclic_free(struct dma_chan *chan); | ||
104 | int dw_dma_cyclic_start(struct dma_chan *chan); | ||
105 | void dw_dma_cyclic_stop(struct dma_chan *chan); | ||
106 | |||
107 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); | ||
108 | |||
109 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); | ||
110 | |||
111 | #endif /* DW_DMAC_H */ | ||
diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h new file mode 100644 index 000000000000..b213c02963c9 --- /dev/null +++ b/include/linux/fsldma.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* | ||
2 | * This is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | |||
8 | #ifndef FSL_DMA_H | ||
9 | #define FSL_DMA_H | ||
10 | /* fsl dma API for enxternal start */ | ||
11 | int fsl_dma_external_start(struct dma_chan *dchan, int enable); | ||
12 | |||
13 | #endif | ||
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h new file mode 100644 index 000000000000..d8155c005242 --- /dev/null +++ b/include/linux/platform_data/dma-dw.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Driver for the Synopsys DesignWare DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2007 Atmel Corporation | ||
5 | * Copyright (C) 2010-2011 ST Microelectronics | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef _PLATFORM_DATA_DMA_DW_H | ||
12 | #define _PLATFORM_DATA_DMA_DW_H | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | |||
16 | /** | ||
17 | * struct dw_dma_slave - Controller-specific information about a slave | ||
18 | * | ||
19 | * @dma_dev: required DMA master device. Depricated. | ||
20 | * @src_id: src request line | ||
21 | * @dst_id: dst request line | ||
22 | * @src_master: src master for transfers on allocated channel. | ||
23 | * @dst_master: dest master for transfers on allocated channel. | ||
24 | */ | ||
25 | struct dw_dma_slave { | ||
26 | struct device *dma_dev; | ||
27 | u8 src_id; | ||
28 | u8 dst_id; | ||
29 | u8 src_master; | ||
30 | u8 dst_master; | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * struct dw_dma_platform_data - Controller configuration parameters | ||
35 | * @nr_channels: Number of channels supported by hardware (max 8) | ||
36 | * @is_private: The device channels should be marked as private and not for | ||
37 | * by the general purpose DMA channel allocator. | ||
38 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | ||
39 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | ||
40 | * @block_size: Maximum block size supported by the controller | ||
41 | * @nr_masters: Number of AHB masters supported by the controller | ||
42 | * @data_width: Maximum data width supported by hardware per AHB master | ||
43 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | ||
44 | */ | ||
45 | struct dw_dma_platform_data { | ||
46 | unsigned int nr_channels; | ||
47 | bool is_private; | ||
48 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | ||
49 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | ||
50 | unsigned char chan_allocation_order; | ||
51 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | ||
52 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | ||
53 | unsigned char chan_priority; | ||
54 | unsigned short block_size; | ||
55 | unsigned char nr_masters; | ||
56 | unsigned char data_width[4]; | ||
57 | }; | ||
58 | |||
59 | #endif /* _PLATFORM_DATA_DMA_DW_H */ | ||
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 82d5111cd0c2..d5a316550177 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #define PXA2XX_CS_ASSERT (0x01) | 23 | #define PXA2XX_CS_ASSERT (0x01) |
24 | #define PXA2XX_CS_DEASSERT (0x02) | 24 | #define PXA2XX_CS_DEASSERT (0x02) |
25 | 25 | ||
26 | struct dma_chan; | ||
27 | |||
26 | /* device.platform_data for SSP controller devices */ | 28 | /* device.platform_data for SSP controller devices */ |
27 | struct pxa2xx_spi_master { | 29 | struct pxa2xx_spi_master { |
28 | u32 clock_enable; | 30 | u32 clock_enable; |
@@ -30,10 +32,9 @@ struct pxa2xx_spi_master { | |||
30 | u8 enable_dma; | 32 | u8 enable_dma; |
31 | 33 | ||
32 | /* DMA engine specific config */ | 34 | /* DMA engine specific config */ |
33 | int rx_chan_id; | 35 | bool (*dma_filter)(struct dma_chan *chan, void *param); |
34 | int tx_chan_id; | 36 | void *tx_param; |
35 | int rx_slave_id; | 37 | void *rx_param; |
36 | int tx_slave_id; | ||
37 | 38 | ||
38 | /* For non-PXA arches */ | 39 | /* For non-PXA arches */ |
39 | struct ssp_device ssp; | 40 | struct ssp_device ssp; |
diff --git a/include/sound/atmel-abdac.h b/include/sound/atmel-abdac.h index edff6a8ba1b5..a8f735d677fa 100644 --- a/include/sound/atmel-abdac.h +++ b/include/sound/atmel-abdac.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #ifndef __INCLUDE_SOUND_ATMEL_ABDAC_H | 10 | #ifndef __INCLUDE_SOUND_ATMEL_ABDAC_H |
11 | #define __INCLUDE_SOUND_ATMEL_ABDAC_H | 11 | #define __INCLUDE_SOUND_ATMEL_ABDAC_H |
12 | 12 | ||
13 | #include <linux/dw_dmac.h> | 13 | #include <linux/platform_data/dma-dw.h> |
14 | 14 | ||
15 | /** | 15 | /** |
16 | * struct atmel_abdac_pdata - board specific ABDAC configuration | 16 | * struct atmel_abdac_pdata - board specific ABDAC configuration |
diff --git a/include/sound/atmel-ac97c.h b/include/sound/atmel-ac97c.h index 00e6c289a936..f2a1cdc37661 100644 --- a/include/sound/atmel-ac97c.h +++ b/include/sound/atmel-ac97c.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #ifndef __INCLUDE_SOUND_ATMEL_AC97C_H | 10 | #ifndef __INCLUDE_SOUND_ATMEL_AC97C_H |
11 | #define __INCLUDE_SOUND_ATMEL_AC97C_H | 11 | #define __INCLUDE_SOUND_ATMEL_AC97C_H |
12 | 12 | ||
13 | #include <linux/dw_dmac.h> | 13 | #include <linux/platform_data/dma-dw.h> |
14 | 14 | ||
15 | #define AC97C_CAPTURE 0x01 | 15 | #define AC97C_CAPTURE 0x01 |
16 | #define AC97C_PLAYBACK 0x02 | 16 | #define AC97C_PLAYBACK 0x02 |
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index edf2ca72d518..31061e3521d4 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
11 | #include <linux/bitmap.h> | 11 | #include <linux/bitmap.h> |
12 | #include <linux/dw_dmac.h> | ||
13 | #include <linux/dmaengine.h> | 12 | #include <linux/dmaengine.h> |
14 | #include <linux/dma-mapping.h> | 13 | #include <linux/dma-mapping.h> |
15 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -25,6 +24,9 @@ | |||
25 | #include <sound/pcm_params.h> | 24 | #include <sound/pcm_params.h> |
26 | #include <sound/atmel-abdac.h> | 25 | #include <sound/atmel-abdac.h> |
27 | 26 | ||
27 | #include <linux/platform_data/dma-dw.h> | ||
28 | #include <linux/dma/dw.h> | ||
29 | |||
28 | /* DAC register offsets */ | 30 | /* DAC register offsets */ |
29 | #define DAC_DATA 0x0000 | 31 | #define DAC_DATA 0x0000 |
30 | #define DAC_CTRL 0x0008 | 32 | #define DAC_CTRL 0x0008 |
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c index a04d23174dc2..b59427d5a697 100644 --- a/sound/atmel/ac97c.c +++ b/sound/atmel/ac97c.c | |||
@@ -31,7 +31,8 @@ | |||
31 | #include <sound/atmel-ac97c.h> | 31 | #include <sound/atmel-ac97c.h> |
32 | #include <sound/memalloc.h> | 32 | #include <sound/memalloc.h> |
33 | 33 | ||
34 | #include <linux/dw_dmac.h> | 34 | #include <linux/platform_data/dma-dw.h> |
35 | #include <linux/dma/dw.h> | ||
35 | 36 | ||
36 | #include <mach/cpu.h> | 37 | #include <mach/cpu.h> |
37 | 38 | ||
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c index 5e8d81330173..64e8b949a1a3 100644 --- a/sound/soc/pxa/mmp-pcm.c +++ b/sound/soc/pxa/mmp-pcm.c | |||
@@ -34,7 +34,8 @@ struct mmp_dma_data { | |||
34 | SNDRV_PCM_INFO_MMAP_VALID | \ | 34 | SNDRV_PCM_INFO_MMAP_VALID | \ |
35 | SNDRV_PCM_INFO_INTERLEAVED | \ | 35 | SNDRV_PCM_INFO_INTERLEAVED | \ |
36 | SNDRV_PCM_INFO_PAUSE | \ | 36 | SNDRV_PCM_INFO_PAUSE | \ |
37 | SNDRV_PCM_INFO_RESUME) | 37 | SNDRV_PCM_INFO_RESUME | \ |
38 | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) | ||
38 | 39 | ||
39 | static struct snd_pcm_hardware mmp_pcm_hardware[] = { | 40 | static struct snd_pcm_hardware mmp_pcm_hardware[] = { |
40 | { | 41 | { |