diff options
56 files changed, 2037 insertions, 705 deletions
diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt index 36e27d54260b..267565894db9 100644 --- a/Documentation/devicetree/bindings/dma/arm-pl330.txt +++ b/Documentation/devicetree/bindings/dma/arm-pl330.txt | |||
@@ -10,7 +10,11 @@ Required properties: | |||
10 | - interrupts: interrupt number to the cpu. | 10 | - interrupts: interrupt number to the cpu. |
11 | 11 | ||
12 | Optional properties: | 12 | Optional properties: |
13 | - dma-coherent : Present if dma operations are coherent | 13 | - dma-coherent : Present if dma operations are coherent |
14 | - #dma-cells: must be <1>. used to represent the number of integer | ||
15 | cells in the dmas property of client device. | ||
16 | - dma-channels: contains the total number of DMA channels supported by the DMAC | ||
17 | - dma-requests: contains the total number of DMA requests supported by the DMAC | ||
14 | 18 | ||
15 | Example: | 19 | Example: |
16 | 20 | ||
@@ -18,16 +22,23 @@ Example: | |||
18 | compatible = "arm,pl330", "arm,primecell"; | 22 | compatible = "arm,pl330", "arm,primecell"; |
19 | reg = <0x12680000 0x1000>; | 23 | reg = <0x12680000 0x1000>; |
20 | interrupts = <99>; | 24 | interrupts = <99>; |
25 | #dma-cells = <1>; | ||
26 | #dma-channels = <8>; | ||
27 | #dma-requests = <32>; | ||
21 | }; | 28 | }; |
22 | 29 | ||
23 | Client drivers (device nodes requiring dma transfers from dev-to-mem or | 30 | Client drivers (device nodes requiring dma transfers from dev-to-mem or |
24 | mem-to-dev) should specify the DMA channel numbers using a two-value pair | 31 | mem-to-dev) should specify the DMA channel numbers and dma channel names |
25 | as shown below. | 32 | as shown below. |
26 | 33 | ||
27 | [property name] = <[phandle of the dma controller] [dma request id]>; | 34 | [property name] = <[phandle of the dma controller] [dma request id]>; |
35 | [property name] = <[dma channel name]> | ||
28 | 36 | ||
29 | where 'dma request id' is the dma request number which is connected | 37 | where 'dma request id' is the dma request number which is connected |
30 | to the client controller. The 'property name' is recommended to be | 38 | to the client controller. The 'property name' 'dmas' and 'dma-names' |
31 | of the form <name>-dma-channel. | 39 | as required by the generic dma device tree binding helpers. The dma |
40 | names correspond 1:1 with the dma request ids in the dmas property. | ||
32 | 41 | ||
33 | Example: tx-dma-channel = <&pdma0 12>; | 42 | Example: dmas = <&pdma0 12 |
43 | &pdma1 11>; | ||
44 | dma-names = "tx", "rx"; | ||
diff --git a/Documentation/devicetree/bindings/dma/dma.txt b/Documentation/devicetree/bindings/dma/dma.txt new file mode 100644 index 000000000000..8f504e6bae14 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/dma.txt | |||
@@ -0,0 +1,81 @@ | |||
1 | * Generic DMA Controller and DMA request bindings | ||
2 | |||
3 | Generic binding to provide a way for a driver using DMA Engine to retrieve the | ||
4 | DMA request or channel information that goes from a hardware device to a DMA | ||
5 | controller. | ||
6 | |||
7 | |||
8 | * DMA controller | ||
9 | |||
10 | Required property: | ||
11 | - #dma-cells: Must be at least 1. Used to provide DMA controller | ||
12 | specific information. See DMA client binding below for | ||
13 | more details. | ||
14 | |||
15 | Optional properties: | ||
16 | - dma-channels: Number of DMA channels supported by the controller. | ||
17 | - dma-requests: Number of DMA requests signals supported by the | ||
18 | controller. | ||
19 | |||
20 | Example: | ||
21 | |||
22 | dma: dma@48000000 { | ||
23 | compatible = "ti,omap-sdma"; | ||
24 | reg = <0x48000000 0x1000>; | ||
25 | interrupts = <0 12 0x4 | ||
26 | 0 13 0x4 | ||
27 | 0 14 0x4 | ||
28 | 0 15 0x4>; | ||
29 | #dma-cells = <1>; | ||
30 | dma-channels = <32>; | ||
31 | dma-requests = <127>; | ||
32 | }; | ||
33 | |||
34 | |||
35 | * DMA client | ||
36 | |||
37 | Client drivers should specify the DMA property using a phandle to the controller | ||
38 | followed by DMA controller specific data. | ||
39 | |||
40 | Required property: | ||
41 | - dmas: List of one or more DMA specifiers, each consisting of | ||
42 | - A phandle pointing to DMA controller node | ||
43 | - A number of integer cells, as determined by the | ||
44 | #dma-cells property in the node referenced by phandle | ||
45 | containing DMA controller specific information. This | ||
46 | typically contains a DMA request line number or a | ||
47 | channel number, but can contain any data that is used | ||
48 | required for configuring a channel. | ||
49 | - dma-names: Contains one identifier string for each DMA specifier in | ||
50 | the dmas property. The specific strings that can be used | ||
51 | are defined in the binding of the DMA client device. | ||
52 | Multiple DMA specifiers can be used to represent | ||
53 | alternatives and in this case the dma-names for those | ||
54 | DMA specifiers must be identical (see examples). | ||
55 | |||
56 | Examples: | ||
57 | |||
58 | 1. A device with one DMA read channel, one DMA write channel: | ||
59 | |||
60 | i2c1: i2c@1 { | ||
61 | ... | ||
62 | dmas = <&dma 2 /* read channel */ | ||
63 | &dma 3>; /* write channel */ | ||
64 | dma-names = "rx", "tx"; | ||
65 | ... | ||
66 | }; | ||
67 | |||
68 | 2. A single read-write channel with three alternative DMA controllers: | ||
69 | |||
70 | dmas = <&dma1 5 | ||
71 | &dma2 7 | ||
72 | &dma3 2>; | ||
73 | dma-names = "rx-tx", "rx-tx", "rx-tx"; | ||
74 | |||
75 | 3. A device with three channels, one of which has two alternatives: | ||
76 | |||
77 | dmas = <&dma1 2 /* read channel */ | ||
78 | &dma1 3 /* write channel */ | ||
79 | &dma2 0 /* error read */ | ||
80 | &dma3 0>; /* alternative error read */ | ||
81 | dma-names = "rx", "tx", "error", "error"; | ||
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index c0d85dbcada5..5bb3dfb6f1d8 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt | |||
@@ -6,6 +6,26 @@ Required properties: | |||
6 | - interrupt-parent: Should be the phandle for the interrupt controller | 6 | - interrupt-parent: Should be the phandle for the interrupt controller |
7 | that services interrupts for this device | 7 | that services interrupts for this device |
8 | - interrupt: Should contain the DMAC interrupt number | 8 | - interrupt: Should contain the DMAC interrupt number |
9 | - nr_channels: Number of channels supported by hardware | ||
10 | - is_private: The device channels should be marked as private and not for by the | ||
11 | general purpose DMA channel allocator. False if not passed. | ||
12 | - chan_allocation_order: order of allocation of channel, 0 (default): ascending, | ||
13 | 1: descending | ||
14 | - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1: | ||
15 | increase from chan n->0 | ||
16 | - block_size: Maximum block size supported by the controller | ||
17 | - nr_masters: Number of AHB masters supported by the controller | ||
18 | - data_width: Maximum data width supported by hardware per AHB master | ||
19 | (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | ||
20 | - slave_info: | ||
21 | - bus_id: name of this device channel, not just a device name since | ||
22 | devices may have more than one channel e.g. "foo_tx". For using the | ||
23 | dw_generic_filter(), slave drivers must pass exactly this string as | ||
24 | param to filter function. | ||
25 | - cfg_hi: Platform-specific initializer for the CFG_HI register | ||
26 | - cfg_lo: Platform-specific initializer for the CFG_LO register | ||
27 | - src_master: src master for transfers on allocated channel. | ||
28 | - dst_master: dest master for transfers on allocated channel. | ||
9 | 29 | ||
10 | Example: | 30 | Example: |
11 | 31 | ||
@@ -14,4 +34,28 @@ Example: | |||
14 | reg = <0xfc000000 0x1000>; | 34 | reg = <0xfc000000 0x1000>; |
15 | interrupt-parent = <&vic1>; | 35 | interrupt-parent = <&vic1>; |
16 | interrupts = <12>; | 36 | interrupts = <12>; |
37 | |||
38 | nr_channels = <8>; | ||
39 | chan_allocation_order = <1>; | ||
40 | chan_priority = <1>; | ||
41 | block_size = <0xfff>; | ||
42 | nr_masters = <2>; | ||
43 | data_width = <3 3 0 0>; | ||
44 | |||
45 | slave_info { | ||
46 | uart0-tx { | ||
47 | bus_id = "uart0-tx"; | ||
48 | cfg_hi = <0x4000>; /* 0x8 << 11 */ | ||
49 | cfg_lo = <0>; | ||
50 | src_master = <0>; | ||
51 | dst_master = <1>; | ||
52 | }; | ||
53 | spi0-tx { | ||
54 | bus_id = "spi0-tx"; | ||
55 | cfg_hi = <0x2000>; /* 0x4 << 11 */ | ||
56 | cfg_lo = <0>; | ||
57 | src_master = <0>; | ||
58 | dst_master = <0>; | ||
59 | }; | ||
60 | }; | ||
17 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index f50b4e854355..b1ac73e21c80 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -312,24 +312,36 @@ | |||
312 | compatible = "arm,pl330", "arm,primecell"; | 312 | compatible = "arm,pl330", "arm,primecell"; |
313 | reg = <0x121A0000 0x1000>; | 313 | reg = <0x121A0000 0x1000>; |
314 | interrupts = <0 34 0>; | 314 | interrupts = <0 34 0>; |
315 | #dma-cells = <1>; | ||
316 | #dma-channels = <8>; | ||
317 | #dma-requests = <32>; | ||
315 | }; | 318 | }; |
316 | 319 | ||
317 | pdma1: pdma@121B0000 { | 320 | pdma1: pdma@121B0000 { |
318 | compatible = "arm,pl330", "arm,primecell"; | 321 | compatible = "arm,pl330", "arm,primecell"; |
319 | reg = <0x121B0000 0x1000>; | 322 | reg = <0x121B0000 0x1000>; |
320 | interrupts = <0 35 0>; | 323 | interrupts = <0 35 0>; |
324 | #dma-cells = <1>; | ||
325 | #dma-channels = <8>; | ||
326 | #dma-requests = <32>; | ||
321 | }; | 327 | }; |
322 | 328 | ||
323 | mdma0: mdma@10800000 { | 329 | mdma0: mdma@10800000 { |
324 | compatible = "arm,pl330", "arm,primecell"; | 330 | compatible = "arm,pl330", "arm,primecell"; |
325 | reg = <0x10800000 0x1000>; | 331 | reg = <0x10800000 0x1000>; |
326 | interrupts = <0 33 0>; | 332 | interrupts = <0 33 0>; |
333 | #dma-cells = <1>; | ||
334 | #dma-channels = <8>; | ||
335 | #dma-requests = <1>; | ||
327 | }; | 336 | }; |
328 | 337 | ||
329 | mdma1: mdma@11C10000 { | 338 | mdma1: mdma@11C10000 { |
330 | compatible = "arm,pl330", "arm,primecell"; | 339 | compatible = "arm,pl330", "arm,primecell"; |
331 | reg = <0x11C10000 0x1000>; | 340 | reg = <0x11C10000 0x1000>; |
332 | interrupts = <0 124 0>; | 341 | interrupts = <0 124 0>; |
342 | #dma-cells = <1>; | ||
343 | #dma-channels = <8>; | ||
344 | #dma-requests = <1>; | ||
333 | }; | 345 | }; |
334 | }; | 346 | }; |
335 | 347 | ||
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index ec29b35f25c0..6af1aa1ef213 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c | |||
@@ -23,13 +23,12 @@ | |||
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/amba/pl080.h> | ||
26 | 27 | ||
27 | #include <mach/dma.h> | 28 | #include <mach/dma.h> |
28 | #include <mach/map.h> | 29 | #include <mach/map.h> |
29 | #include <mach/irqs.h> | 30 | #include <mach/irqs.h> |
30 | 31 | ||
31 | #include <asm/hardware/pl080.h> | ||
32 | |||
33 | #include "regs-sys.h" | 32 | #include "regs-sys.h" |
34 | 33 | ||
35 | /* dma channel state information */ | 34 | /* dma channel state information */ |
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c index b2ba516ca2d4..f9d754f90c59 100644 --- a/arch/arm/mach-spear3xx/spear3xx.c +++ b/arch/arm/mach-spear3xx/spear3xx.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/amba/pl022.h> | 16 | #include <linux/amba/pl022.h> |
17 | #include <linux/amba/pl08x.h> | 17 | #include <linux/amba/pl08x.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/hardware/pl080.h> | ||
20 | #include <plat/pl080.h> | 19 | #include <plat/pl080.h> |
21 | #include <mach/generic.h> | 20 | #include <mach/generic.h> |
22 | #include <mach/spear.h> | 21 | #include <mach/spear.h> |
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c index b8bd33ca88bd..8904d8a52d84 100644 --- a/arch/arm/mach-spear6xx/spear6xx.c +++ b/arch/arm/mach-spear6xx/spear6xx.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
22 | #include <linux/of_platform.h> | 22 | #include <linux/of_platform.h> |
23 | #include <asm/hardware/pl080.h> | 23 | #include <linux/amba/pl080.h> |
24 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
25 | #include <asm/mach/time.h> | 25 | #include <asm/mach/time.h> |
26 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 361b5e8239bc..9e62feffb374 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -67,6 +67,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
67 | 67 | ||
68 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, | 68 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, |
69 | len, dma_prep_flags); | 69 | len, dma_prep_flags); |
70 | if (!tx) { | ||
71 | dma_unmap_page(device->dev, dma_dest, len, | ||
72 | DMA_FROM_DEVICE); | ||
73 | dma_unmap_page(device->dev, dma_src, len, | ||
74 | DMA_TO_DEVICE); | ||
75 | } | ||
70 | } | 76 | } |
71 | 77 | ||
72 | if (tx) { | 78 | if (tx) { |
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 58e4a8752aee..05a4d1e00148 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/module.h> | ||
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
29 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
30 | #include <linux/async_tx.h> | 31 | #include <linux/async_tx.h> |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 842120979374..7be34248b450 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -128,8 +128,8 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
128 | } | 128 | } |
129 | device->device_issue_pending(chan); | 129 | device->device_issue_pending(chan); |
130 | } else { | 130 | } else { |
131 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 131 | if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) |
132 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 132 | panic("%s: DMA error waiting for depend_tx\n", |
133 | __func__); | 133 | __func__); |
134 | tx->tx_submit(tx); | 134 | tx->tx_submit(tx); |
135 | } | 135 | } |
@@ -280,8 +280,9 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | |||
280 | * we are referring to the correct operation | 280 | * we are referring to the correct operation |
281 | */ | 281 | */ |
282 | BUG_ON(async_tx_test_ack(*tx)); | 282 | BUG_ON(async_tx_test_ack(*tx)); |
283 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) | 283 | if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) |
284 | panic("DMA_ERROR waiting for transaction\n"); | 284 | panic("%s: DMA error waiting for transaction\n", |
285 | __func__); | ||
285 | async_tx_ack(*tx); | 286 | async_tx_ack(*tx); |
286 | *tx = NULL; | 287 | *tx = NULL; |
287 | } | 288 | } |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 154cc84381c2..8ade0a0481c6 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -230,9 +230,7 @@ EXPORT_SYMBOL_GPL(async_xor); | |||
230 | 230 | ||
231 | static int page_is_zero(struct page *p, unsigned int offset, size_t len) | 231 | static int page_is_zero(struct page *p, unsigned int offset, size_t len) |
232 | { | 232 | { |
233 | char *a = page_address(p) + offset; | 233 | return !memchr_inv(page_address(p) + offset, 0, len); |
234 | return ((*(u32 *) a) == 0 && | ||
235 | memcmp(a, a + 4, len - 4) == 0); | ||
236 | } | 234 | } |
237 | 235 | ||
238 | static inline struct dma_chan * | 236 | static inline struct dma_chan * |
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index bc6f5faa1e9e..819dfda88236 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
420 | 420 | ||
421 | raw_spin_lock_irqsave(&dca_lock, flags); | 421 | raw_spin_lock_irqsave(&dca_lock, flags); |
422 | 422 | ||
423 | if (list_empty(&dca_domains)) { | ||
424 | raw_spin_unlock_irqrestore(&dca_lock, flags); | ||
425 | return; | ||
426 | } | ||
427 | |||
423 | list_del(&dca->node); | 428 | list_del(&dca->node); |
424 | 429 | ||
425 | pci_rc = dca_pci_rc_from_dev(dev); | 430 | pci_rc = dca_pci_rc_from_dev(dev); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 40179e749f08..80b69971cf28 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -51,7 +51,7 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH | |||
51 | 51 | ||
52 | config AMBA_PL08X | 52 | config AMBA_PL08X |
53 | bool "ARM PrimeCell PL080 or PL081 support" | 53 | bool "ARM PrimeCell PL080 or PL081 support" |
54 | depends on ARM_AMBA && EXPERIMENTAL | 54 | depends on ARM_AMBA |
55 | select DMA_ENGINE | 55 | select DMA_ENGINE |
56 | select DMA_VIRTUAL_CHANNELS | 56 | select DMA_VIRTUAL_CHANNELS |
57 | help | 57 | help |
@@ -83,7 +83,6 @@ config INTEL_IOP_ADMA | |||
83 | 83 | ||
84 | config DW_DMAC | 84 | config DW_DMAC |
85 | tristate "Synopsys DesignWare AHB DMA support" | 85 | tristate "Synopsys DesignWare AHB DMA support" |
86 | depends on HAVE_CLK | ||
87 | select DMA_ENGINE | 86 | select DMA_ENGINE |
88 | default y if CPU_AT32AP7000 | 87 | default y if CPU_AT32AP7000 |
89 | help | 88 | help |
@@ -215,8 +214,8 @@ config TIMB_DMA | |||
215 | Enable support for the Timberdale FPGA DMA engine. | 214 | Enable support for the Timberdale FPGA DMA engine. |
216 | 215 | ||
217 | config SIRF_DMA | 216 | config SIRF_DMA |
218 | tristate "CSR SiRFprimaII DMA support" | 217 | tristate "CSR SiRFprimaII/SiRFmarco DMA support" |
219 | depends on ARCH_PRIMA2 | 218 | depends on ARCH_SIRF |
220 | select DMA_ENGINE | 219 | select DMA_ENGINE |
221 | help | 220 | help |
222 | Enable support for the CSR SiRFprimaII DMA engine. | 221 | Enable support for the CSR SiRFprimaII DMA engine. |
@@ -328,6 +327,10 @@ config DMA_ENGINE | |||
328 | config DMA_VIRTUAL_CHANNELS | 327 | config DMA_VIRTUAL_CHANNELS |
329 | tristate | 328 | tristate |
330 | 329 | ||
330 | config DMA_OF | ||
331 | def_bool y | ||
332 | depends on OF | ||
333 | |||
331 | comment "DMA Clients" | 334 | comment "DMA Clients" |
332 | depends on DMA_ENGINE | 335 | depends on DMA_ENGINE |
333 | 336 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 642d96736cf5..488e3ff85b52 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -3,6 +3,8 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG | |||
3 | 3 | ||
4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | 5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o |
6 | obj-$(CONFIG_DMA_OF) += of-dma.o | ||
7 | |||
6 | obj-$(CONFIG_NET_DMA) += iovlock.o | 8 | obj-$(CONFIG_NET_DMA) += iovlock.o |
7 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o | 9 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o |
8 | obj-$(CONFIG_DMATEST) += dmatest.o | 10 | obj-$(CONFIG_DMATEST) += dmatest.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index d1cc5791476b..8bad254a498d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -83,7 +83,7 @@ | |||
83 | #include <linux/pm_runtime.h> | 83 | #include <linux/pm_runtime.h> |
84 | #include <linux/seq_file.h> | 84 | #include <linux/seq_file.h> |
85 | #include <linux/slab.h> | 85 | #include <linux/slab.h> |
86 | #include <asm/hardware/pl080.h> | 86 | #include <linux/amba/pl080.h> |
87 | 87 | ||
88 | #include "dmaengine.h" | 88 | #include "dmaengine.h" |
89 | #include "virt-dma.h" | 89 | #include "virt-dma.h" |
@@ -1096,15 +1096,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |||
1096 | struct pl08x_dma_chan *plchan) | 1096 | struct pl08x_dma_chan *plchan) |
1097 | { | 1097 | { |
1098 | LIST_HEAD(head); | 1098 | LIST_HEAD(head); |
1099 | struct pl08x_txd *txd; | ||
1100 | 1099 | ||
1101 | vchan_get_all_descriptors(&plchan->vc, &head); | 1100 | vchan_get_all_descriptors(&plchan->vc, &head); |
1102 | 1101 | vchan_dma_desc_free_list(&plchan->vc, &head); | |
1103 | while (!list_empty(&head)) { | ||
1104 | txd = list_first_entry(&head, struct pl08x_txd, vd.node); | ||
1105 | list_del(&txd->vd.node); | ||
1106 | pl08x_desc_free(&txd->vd); | ||
1107 | } | ||
1108 | } | 1102 | } |
1109 | 1103 | ||
1110 | /* | 1104 | /* |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 13a02f4425b0..6e13f262139a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -778,7 +778,7 @@ err: | |||
778 | */ | 778 | */ |
779 | static int | 779 | static int |
780 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | 780 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, |
781 | size_t period_len, enum dma_transfer_direction direction) | 781 | size_t period_len) |
782 | { | 782 | { |
783 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | 783 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) |
784 | goto err_out; | 784 | goto err_out; |
@@ -786,8 +786,6 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | |||
786 | goto err_out; | 786 | goto err_out; |
787 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 787 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
788 | goto err_out; | 788 | goto err_out; |
789 | if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) | ||
790 | goto err_out; | ||
791 | 789 | ||
792 | return 0; | 790 | return 0; |
793 | 791 | ||
@@ -886,14 +884,16 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
886 | return NULL; | 884 | return NULL; |
887 | } | 885 | } |
888 | 886 | ||
887 | if (unlikely(!is_slave_direction(direction))) | ||
888 | goto err_out; | ||
889 | |||
889 | if (sconfig->direction == DMA_MEM_TO_DEV) | 890 | if (sconfig->direction == DMA_MEM_TO_DEV) |
890 | reg_width = convert_buswidth(sconfig->dst_addr_width); | 891 | reg_width = convert_buswidth(sconfig->dst_addr_width); |
891 | else | 892 | else |
892 | reg_width = convert_buswidth(sconfig->src_addr_width); | 893 | reg_width = convert_buswidth(sconfig->src_addr_width); |
893 | 894 | ||
894 | /* Check for too big/unaligned periods and unaligned DMA buffer */ | 895 | /* Check for too big/unaligned periods and unaligned DMA buffer */ |
895 | if (atc_dma_cyclic_check_values(reg_width, buf_addr, | 896 | if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) |
896 | period_len, direction)) | ||
897 | goto err_out; | 897 | goto err_out; |
898 | 898 | ||
899 | /* build cyclic linked list */ | 899 | /* build cyclic linked list */ |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 116e4adffb08..0eb3c1388667 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -369,10 +369,10 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} | |||
369 | 369 | ||
370 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) | 370 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) |
371 | { | 371 | { |
372 | dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common), | 372 | dev_crit(chan2dev(&atchan->chan_common), |
373 | " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", | 373 | " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", |
374 | lli->saddr, lli->daddr, | 374 | lli->saddr, lli->daddr, |
375 | lli->ctrla, lli->ctrlb, lli->dscr); | 375 | lli->ctrla, lli->ctrlb, lli->dscr); |
376 | } | 376 | } |
377 | 377 | ||
378 | 378 | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index a2f079aca550..797940e532ff 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -2355,7 +2355,9 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
2355 | enum dma_status ret; | 2355 | enum dma_status ret; |
2356 | 2356 | ||
2357 | ret = dma_cookie_status(chan, cookie, txstate); | 2357 | ret = dma_cookie_status(chan, cookie, txstate); |
2358 | /* FIXME: should be conditional on ret != DMA_SUCCESS? */ | 2358 | if (ret == DMA_SUCCESS) |
2359 | return ret; | ||
2360 | |||
2359 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); | 2361 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); |
2360 | 2362 | ||
2361 | if (ret == DMA_IN_PROGRESS && cohc->stopped) | 2363 | if (ret == DMA_IN_PROGRESS && cohc->stopped) |
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 3e96610e18e2..702112d547c8 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c | |||
@@ -61,7 +61,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) | |||
61 | dma_addr_t phy; | 61 | dma_addr_t phy; |
62 | 62 | ||
63 | if (len == 0) | 63 | if (len == 0) |
64 | goto err; | 64 | return NULL; |
65 | 65 | ||
66 | spin_lock(&pool->lock); | 66 | spin_lock(&pool->lock); |
67 | 67 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a815d44c70a4..242b8c0a3de8 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <linux/rculist.h> | 62 | #include <linux/rculist.h> |
63 | #include <linux/idr.h> | 63 | #include <linux/idr.h> |
64 | #include <linux/slab.h> | 64 | #include <linux/slab.h> |
65 | #include <linux/of_dma.h> | ||
65 | 66 | ||
66 | static DEFINE_MUTEX(dma_list_mutex); | 67 | static DEFINE_MUTEX(dma_list_mutex); |
67 | static DEFINE_IDR(dma_idr); | 68 | static DEFINE_IDR(dma_idr); |
@@ -266,7 +267,10 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
266 | pr_err("%s: timeout!\n", __func__); | 267 | pr_err("%s: timeout!\n", __func__); |
267 | return DMA_ERROR; | 268 | return DMA_ERROR; |
268 | } | 269 | } |
269 | } while (status == DMA_IN_PROGRESS); | 270 | if (status != DMA_IN_PROGRESS) |
271 | break; | ||
272 | cpu_relax(); | ||
273 | } while (1); | ||
270 | 274 | ||
271 | return status; | 275 | return status; |
272 | } | 276 | } |
@@ -546,6 +550,21 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
546 | } | 550 | } |
547 | EXPORT_SYMBOL_GPL(__dma_request_channel); | 551 | EXPORT_SYMBOL_GPL(__dma_request_channel); |
548 | 552 | ||
553 | /** | ||
554 | * dma_request_slave_channel - try to allocate an exclusive slave channel | ||
555 | * @dev: pointer to client device structure | ||
556 | * @name: slave channel name | ||
557 | */ | ||
558 | struct dma_chan *dma_request_slave_channel(struct device *dev, char *name) | ||
559 | { | ||
560 | /* If device-tree is present get slave info from here */ | ||
561 | if (dev->of_node) | ||
562 | return of_dma_request_slave_channel(dev->of_node, name); | ||
563 | |||
564 | return NULL; | ||
565 | } | ||
566 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | ||
567 | |||
549 | void dma_release_channel(struct dma_chan *chan) | 568 | void dma_release_channel(struct dma_chan *chan) |
550 | { | 569 | { |
551 | mutex_lock(&dma_list_mutex); | 570 | mutex_lock(&dma_list_mutex); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 64b048d7fba7..a2c8904b63ea 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, | |||
242 | dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); | 242 | dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); |
243 | } | 243 | } |
244 | 244 | ||
245 | static unsigned int min_odd(unsigned int x, unsigned int y) | ||
246 | { | ||
247 | unsigned int val = min(x, y); | ||
248 | |||
249 | return val % 2 ? val : val - 1; | ||
250 | } | ||
251 | |||
245 | /* | 252 | /* |
246 | * This function repeatedly tests DMA transfers of various lengths and | 253 | * This function repeatedly tests DMA transfers of various lengths and |
247 | * offsets for a given operation type until it is told to exit by | 254 | * offsets for a given operation type until it is told to exit by |
@@ -262,6 +269,7 @@ static int dmatest_func(void *data) | |||
262 | struct dmatest_thread *thread = data; | 269 | struct dmatest_thread *thread = data; |
263 | struct dmatest_done done = { .wait = &done_wait }; | 270 | struct dmatest_done done = { .wait = &done_wait }; |
264 | struct dma_chan *chan; | 271 | struct dma_chan *chan; |
272 | struct dma_device *dev; | ||
265 | const char *thread_name; | 273 | const char *thread_name; |
266 | unsigned int src_off, dst_off, len; | 274 | unsigned int src_off, dst_off, len; |
267 | unsigned int error_count; | 275 | unsigned int error_count; |
@@ -283,13 +291,16 @@ static int dmatest_func(void *data) | |||
283 | 291 | ||
284 | smp_rmb(); | 292 | smp_rmb(); |
285 | chan = thread->chan; | 293 | chan = thread->chan; |
294 | dev = chan->device; | ||
286 | if (thread->type == DMA_MEMCPY) | 295 | if (thread->type == DMA_MEMCPY) |
287 | src_cnt = dst_cnt = 1; | 296 | src_cnt = dst_cnt = 1; |
288 | else if (thread->type == DMA_XOR) { | 297 | else if (thread->type == DMA_XOR) { |
289 | src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ | 298 | /* force odd to ensure dst = src */ |
299 | src_cnt = min_odd(xor_sources | 1, dev->max_xor); | ||
290 | dst_cnt = 1; | 300 | dst_cnt = 1; |
291 | } else if (thread->type == DMA_PQ) { | 301 | } else if (thread->type == DMA_PQ) { |
292 | src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ | 302 | /* force odd to ensure dst = src */ |
303 | src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); | ||
293 | dst_cnt = 2; | 304 | dst_cnt = 2; |
294 | for (i = 0; i < src_cnt; i++) | 305 | for (i = 0; i < src_cnt; i++) |
295 | pq_coefs[i] = 1; | 306 | pq_coefs[i] = 1; |
@@ -327,7 +338,6 @@ static int dmatest_func(void *data) | |||
327 | 338 | ||
328 | while (!kthread_should_stop() | 339 | while (!kthread_should_stop() |
329 | && !(iterations && total_tests >= iterations)) { | 340 | && !(iterations && total_tests >= iterations)) { |
330 | struct dma_device *dev = chan->device; | ||
331 | struct dma_async_tx_descriptor *tx = NULL; | 341 | struct dma_async_tx_descriptor *tx = NULL; |
332 | dma_addr_t dma_srcs[src_cnt]; | 342 | dma_addr_t dma_srcs[src_cnt]; |
333 | dma_addr_t dma_dsts[dst_cnt]; | 343 | dma_addr_t dma_dsts[dst_cnt]; |
@@ -526,7 +536,9 @@ err_srcs: | |||
526 | thread_name, total_tests, failed_tests, ret); | 536 | thread_name, total_tests, failed_tests, ret); |
527 | 537 | ||
528 | /* terminate all transfers on specified channels */ | 538 | /* terminate all transfers on specified channels */ |
529 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 539 | if (ret) |
540 | dmaengine_terminate_all(chan); | ||
541 | |||
530 | if (iterations > 0) | 542 | if (iterations > 0) |
531 | while (!kthread_should_stop()) { | 543 | while (!kthread_should_stop()) { |
532 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); | 544 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); |
@@ -551,7 +563,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
551 | } | 563 | } |
552 | 564 | ||
553 | /* terminate all transfers on specified channels */ | 565 | /* terminate all transfers on specified channels */ |
554 | dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0); | 566 | dmaengine_terminate_all(dtc->chan); |
555 | 567 | ||
556 | kfree(dtc); | 568 | kfree(dtc); |
557 | } | 569 | } |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index b33d1f6e1333..51c3ea2ed41a 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -1,6 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | 2 | * Core driver for the Synopsys DesignWare DMA Controller |
3 | * AVR32 systems.) | ||
4 | * | 3 | * |
5 | * Copyright (C) 2007-2008 Atmel Corporation | 4 | * Copyright (C) 2007-2008 Atmel Corporation |
6 | * Copyright (C) 2010-2011 ST Microelectronics | 5 | * Copyright (C) 2010-2011 ST Microelectronics |
@@ -9,11 +8,13 @@ | |||
9 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
11 | */ | 10 | */ |
11 | |||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
16 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
17 | #include <linux/dmapool.h> | ||
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
@@ -47,15 +48,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | |||
47 | return slave ? slave->src_master : 1; | 48 | return slave ? slave->src_master : 1; |
48 | } | 49 | } |
49 | 50 | ||
51 | #define SRC_MASTER 0 | ||
52 | #define DST_MASTER 1 | ||
53 | |||
54 | static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) | ||
55 | { | ||
56 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
57 | struct dw_dma_slave *dws = chan->private; | ||
58 | unsigned int m; | ||
59 | |||
60 | if (master == SRC_MASTER) | ||
61 | m = dwc_get_sms(dws); | ||
62 | else | ||
63 | m = dwc_get_dms(dws); | ||
64 | |||
65 | return min_t(unsigned int, dw->nr_masters - 1, m); | ||
66 | } | ||
67 | |||
50 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 68 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
51 | struct dw_dma_slave *__slave = (_chan->private); \ | ||
52 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | 69 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
53 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | 70 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
54 | int _dms = dwc_get_dms(__slave); \ | 71 | bool _is_slave = is_slave_direction(_dwc->direction); \ |
55 | int _sms = dwc_get_sms(__slave); \ | 72 | int _dms = dwc_get_master(_chan, DST_MASTER); \ |
56 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | 73 | int _sms = dwc_get_master(_chan, SRC_MASTER); \ |
74 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ | ||
57 | DW_DMA_MSIZE_16; \ | 75 | DW_DMA_MSIZE_16; \ |
58 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | 76 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
59 | DW_DMA_MSIZE_16; \ | 77 | DW_DMA_MSIZE_16; \ |
60 | \ | 78 | \ |
61 | (DWC_CTLL_DST_MSIZE(_dmsize) \ | 79 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
@@ -73,15 +91,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | |||
73 | */ | 91 | */ |
74 | #define NR_DESCS_PER_CHANNEL 64 | 92 | #define NR_DESCS_PER_CHANNEL 64 |
75 | 93 | ||
76 | /*----------------------------------------------------------------------*/ | 94 | static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) |
95 | { | ||
96 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
77 | 97 | ||
78 | /* | 98 | return dw->data_width[dwc_get_master(chan, master)]; |
79 | * Because we're not relying on writeback from the controller (it may not | 99 | } |
80 | * even be configured into the core!) we don't need to use dma_pool. These | 100 | |
81 | * descriptors -- and associated data -- are cacheable. We do need to make | 101 | /*----------------------------------------------------------------------*/ |
82 | * sure their dcache entries are written back before handing them off to | ||
83 | * the controller, though. | ||
84 | */ | ||
85 | 102 | ||
86 | static struct device *chan2dev(struct dma_chan *chan) | 103 | static struct device *chan2dev(struct dma_chan *chan) |
87 | { | 104 | { |
@@ -94,7 +111,7 @@ static struct device *chan2parent(struct dma_chan *chan) | |||
94 | 111 | ||
95 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 112 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
96 | { | 113 | { |
97 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 114 | return to_dw_desc(dwc->active_list.next); |
98 | } | 115 | } |
99 | 116 | ||
100 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 117 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
@@ -121,19 +138,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
121 | return ret; | 138 | return ret; |
122 | } | 139 | } |
123 | 140 | ||
124 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
125 | { | ||
126 | struct dw_desc *child; | ||
127 | |||
128 | list_for_each_entry(child, &desc->tx_list, desc_node) | ||
129 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | ||
130 | child->txd.phys, sizeof(child->lli), | ||
131 | DMA_TO_DEVICE); | ||
132 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | ||
133 | desc->txd.phys, sizeof(desc->lli), | ||
134 | DMA_TO_DEVICE); | ||
135 | } | ||
136 | |||
137 | /* | 141 | /* |
138 | * Move a descriptor, including any children, to the free list. | 142 | * Move a descriptor, including any children, to the free list. |
139 | * `desc' must not be on any lists. | 143 | * `desc' must not be on any lists. |
@@ -145,8 +149,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
145 | if (desc) { | 149 | if (desc) { |
146 | struct dw_desc *child; | 150 | struct dw_desc *child; |
147 | 151 | ||
148 | dwc_sync_desc_for_cpu(dwc, desc); | ||
149 | |||
150 | spin_lock_irqsave(&dwc->lock, flags); | 152 | spin_lock_irqsave(&dwc->lock, flags); |
151 | list_for_each_entry(child, &desc->tx_list, desc_node) | 153 | list_for_each_entry(child, &desc->tx_list, desc_node) |
152 | dev_vdbg(chan2dev(&dwc->chan), | 154 | dev_vdbg(chan2dev(&dwc->chan), |
@@ -179,9 +181,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
179 | cfghi = dws->cfg_hi; | 181 | cfghi = dws->cfg_hi; |
180 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | 182 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
181 | } else { | 183 | } else { |
182 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | 184 | if (dwc->direction == DMA_MEM_TO_DEV) |
183 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); | 185 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); |
184 | else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) | 186 | else if (dwc->direction == DMA_DEV_TO_MEM) |
185 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); | 187 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); |
186 | } | 188 | } |
187 | 189 | ||
@@ -223,7 +225,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | |||
223 | channel_readl(dwc, CTL_LO)); | 225 | channel_readl(dwc, CTL_LO)); |
224 | } | 226 | } |
225 | 227 | ||
226 | |||
227 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | 228 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
228 | { | 229 | { |
229 | channel_clear_bit(dw, CH_EN, dwc->mask); | 230 | channel_clear_bit(dw, CH_EN, dwc->mask); |
@@ -249,6 +250,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |||
249 | channel_writel(dwc, CTL_LO, ctllo); | 250 | channel_writel(dwc, CTL_LO, ctllo); |
250 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | 251 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); |
251 | channel_set_bit(dw, CH_EN, dwc->mask); | 252 | channel_set_bit(dw, CH_EN, dwc->mask); |
253 | |||
254 | /* Move pointer to next descriptor */ | ||
255 | dwc->tx_node_active = dwc->tx_node_active->next; | ||
252 | } | 256 | } |
253 | 257 | ||
254 | /* Called with dwc->lock held and bh disabled */ | 258 | /* Called with dwc->lock held and bh disabled */ |
@@ -279,9 +283,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
279 | 283 | ||
280 | dwc_initialize(dwc); | 284 | dwc_initialize(dwc); |
281 | 285 | ||
282 | dwc->tx_list = &first->tx_list; | 286 | dwc->residue = first->total_len; |
283 | dwc->tx_node_active = first->tx_list.next; | 287 | dwc->tx_node_active = &first->tx_list; |
284 | 288 | ||
289 | /* Submit first block */ | ||
285 | dwc_do_single_block(dwc, first); | 290 | dwc_do_single_block(dwc, first); |
286 | 291 | ||
287 | return; | 292 | return; |
@@ -317,8 +322,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
317 | param = txd->callback_param; | 322 | param = txd->callback_param; |
318 | } | 323 | } |
319 | 324 | ||
320 | dwc_sync_desc_for_cpu(dwc, desc); | ||
321 | |||
322 | /* async_tx_ack */ | 325 | /* async_tx_ack */ |
323 | list_for_each_entry(child, &desc->tx_list, desc_node) | 326 | list_for_each_entry(child, &desc->tx_list, desc_node) |
324 | async_tx_ack(&child->txd); | 327 | async_tx_ack(&child->txd); |
@@ -327,29 +330,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
327 | list_splice_init(&desc->tx_list, &dwc->free_list); | 330 | list_splice_init(&desc->tx_list, &dwc->free_list); |
328 | list_move(&desc->desc_node, &dwc->free_list); | 331 | list_move(&desc->desc_node, &dwc->free_list); |
329 | 332 | ||
330 | if (!dwc->chan.private) { | 333 | if (!is_slave_direction(dwc->direction)) { |
331 | struct device *parent = chan2parent(&dwc->chan); | 334 | struct device *parent = chan2parent(&dwc->chan); |
332 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 335 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
333 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 336 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
334 | dma_unmap_single(parent, desc->lli.dar, | 337 | dma_unmap_single(parent, desc->lli.dar, |
335 | desc->len, DMA_FROM_DEVICE); | 338 | desc->total_len, DMA_FROM_DEVICE); |
336 | else | 339 | else |
337 | dma_unmap_page(parent, desc->lli.dar, | 340 | dma_unmap_page(parent, desc->lli.dar, |
338 | desc->len, DMA_FROM_DEVICE); | 341 | desc->total_len, DMA_FROM_DEVICE); |
339 | } | 342 | } |
340 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 343 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
341 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 344 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
342 | dma_unmap_single(parent, desc->lli.sar, | 345 | dma_unmap_single(parent, desc->lli.sar, |
343 | desc->len, DMA_TO_DEVICE); | 346 | desc->total_len, DMA_TO_DEVICE); |
344 | else | 347 | else |
345 | dma_unmap_page(parent, desc->lli.sar, | 348 | dma_unmap_page(parent, desc->lli.sar, |
346 | desc->len, DMA_TO_DEVICE); | 349 | desc->total_len, DMA_TO_DEVICE); |
347 | } | 350 | } |
348 | } | 351 | } |
349 | 352 | ||
350 | spin_unlock_irqrestore(&dwc->lock, flags); | 353 | spin_unlock_irqrestore(&dwc->lock, flags); |
351 | 354 | ||
352 | if (callback_required && callback) | 355 | if (callback) |
353 | callback(param); | 356 | callback(param); |
354 | } | 357 | } |
355 | 358 | ||
@@ -384,6 +387,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
384 | dwc_descriptor_complete(dwc, desc, true); | 387 | dwc_descriptor_complete(dwc, desc, true); |
385 | } | 388 | } |
386 | 389 | ||
390 | /* Returns how many bytes were already received from source */ | ||
391 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | ||
392 | { | ||
393 | u32 ctlhi = channel_readl(dwc, CTL_HI); | ||
394 | u32 ctllo = channel_readl(dwc, CTL_LO); | ||
395 | |||
396 | return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); | ||
397 | } | ||
398 | |||
387 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 399 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
388 | { | 400 | { |
389 | dma_addr_t llp; | 401 | dma_addr_t llp; |
@@ -399,6 +411,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
399 | if (status_xfer & dwc->mask) { | 411 | if (status_xfer & dwc->mask) { |
400 | /* Everything we've submitted is done */ | 412 | /* Everything we've submitted is done */ |
401 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 413 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
414 | |||
415 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
416 | struct list_head *head, *active = dwc->tx_node_active; | ||
417 | |||
418 | /* | ||
419 | * We are inside first active descriptor. | ||
420 | * Otherwise something is really wrong. | ||
421 | */ | ||
422 | desc = dwc_first_active(dwc); | ||
423 | |||
424 | head = &desc->tx_list; | ||
425 | if (active != head) { | ||
426 | /* Update desc to reflect last sent one */ | ||
427 | if (active != head->next) | ||
428 | desc = to_dw_desc(active->prev); | ||
429 | |||
430 | dwc->residue -= desc->len; | ||
431 | |||
432 | child = to_dw_desc(active); | ||
433 | |||
434 | /* Submit next block */ | ||
435 | dwc_do_single_block(dwc, child); | ||
436 | |||
437 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
438 | return; | ||
439 | } | ||
440 | |||
441 | /* We are done here */ | ||
442 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
443 | } | ||
444 | |||
445 | dwc->residue = 0; | ||
446 | |||
402 | spin_unlock_irqrestore(&dwc->lock, flags); | 447 | spin_unlock_irqrestore(&dwc->lock, flags); |
403 | 448 | ||
404 | dwc_complete_all(dw, dwc); | 449 | dwc_complete_all(dw, dwc); |
@@ -406,6 +451,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
406 | } | 451 | } |
407 | 452 | ||
408 | if (list_empty(&dwc->active_list)) { | 453 | if (list_empty(&dwc->active_list)) { |
454 | dwc->residue = 0; | ||
455 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
456 | return; | ||
457 | } | ||
458 | |||
459 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
460 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); | ||
409 | spin_unlock_irqrestore(&dwc->lock, flags); | 461 | spin_unlock_irqrestore(&dwc->lock, flags); |
410 | return; | 462 | return; |
411 | } | 463 | } |
@@ -414,6 +466,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
414 | (unsigned long long)llp); | 466 | (unsigned long long)llp); |
415 | 467 | ||
416 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 468 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
469 | /* initial residue value */ | ||
470 | dwc->residue = desc->total_len; | ||
471 | |||
417 | /* check first descriptors addr */ | 472 | /* check first descriptors addr */ |
418 | if (desc->txd.phys == llp) { | 473 | if (desc->txd.phys == llp) { |
419 | spin_unlock_irqrestore(&dwc->lock, flags); | 474 | spin_unlock_irqrestore(&dwc->lock, flags); |
@@ -423,16 +478,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
423 | /* check first descriptors llp */ | 478 | /* check first descriptors llp */ |
424 | if (desc->lli.llp == llp) { | 479 | if (desc->lli.llp == llp) { |
425 | /* This one is currently in progress */ | 480 | /* This one is currently in progress */ |
481 | dwc->residue -= dwc_get_sent(dwc); | ||
426 | spin_unlock_irqrestore(&dwc->lock, flags); | 482 | spin_unlock_irqrestore(&dwc->lock, flags); |
427 | return; | 483 | return; |
428 | } | 484 | } |
429 | 485 | ||
430 | list_for_each_entry(child, &desc->tx_list, desc_node) | 486 | dwc->residue -= desc->len; |
487 | list_for_each_entry(child, &desc->tx_list, desc_node) { | ||
431 | if (child->lli.llp == llp) { | 488 | if (child->lli.llp == llp) { |
432 | /* Currently in progress */ | 489 | /* Currently in progress */ |
490 | dwc->residue -= dwc_get_sent(dwc); | ||
433 | spin_unlock_irqrestore(&dwc->lock, flags); | 491 | spin_unlock_irqrestore(&dwc->lock, flags); |
434 | return; | 492 | return; |
435 | } | 493 | } |
494 | dwc->residue -= child->len; | ||
495 | } | ||
436 | 496 | ||
437 | /* | 497 | /* |
438 | * No descriptors so far seem to be in progress, i.e. | 498 | * No descriptors so far seem to be in progress, i.e. |
@@ -458,9 +518,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
458 | 518 | ||
459 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 519 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
460 | { | 520 | { |
461 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 521 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
462 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 522 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); |
463 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); | ||
464 | } | 523 | } |
465 | 524 | ||
466 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 525 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -488,16 +547,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
488 | dwc_dostart(dwc, dwc_first_active(dwc)); | 547 | dwc_dostart(dwc, dwc_first_active(dwc)); |
489 | 548 | ||
490 | /* | 549 | /* |
491 | * KERN_CRITICAL may seem harsh, but since this only happens | 550 | * WARN may seem harsh, but since this only happens |
492 | * when someone submits a bad physical address in a | 551 | * when someone submits a bad physical address in a |
493 | * descriptor, we should consider ourselves lucky that the | 552 | * descriptor, we should consider ourselves lucky that the |
494 | * controller flagged an error instead of scribbling over | 553 | * controller flagged an error instead of scribbling over |
495 | * random memory locations. | 554 | * random memory locations. |
496 | */ | 555 | */ |
497 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 556 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
498 | "Bad descriptor submitted for DMA!\n"); | 557 | " cookie: %d\n", bad_desc->txd.cookie); |
499 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | ||
500 | " cookie: %d\n", bad_desc->txd.cookie); | ||
501 | dwc_dump_lli(dwc, &bad_desc->lli); | 558 | dwc_dump_lli(dwc, &bad_desc->lli); |
502 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 559 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
503 | dwc_dump_lli(dwc, &child->lli); | 560 | dwc_dump_lli(dwc, &child->lli); |
@@ -598,36 +655,8 @@ static void dw_dma_tasklet(unsigned long data) | |||
598 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); | 655 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
599 | else if (status_err & (1 << i)) | 656 | else if (status_err & (1 << i)) |
600 | dwc_handle_error(dw, dwc); | 657 | dwc_handle_error(dw, dwc); |
601 | else if (status_xfer & (1 << i)) { | 658 | else if (status_xfer & (1 << i)) |
602 | unsigned long flags; | ||
603 | |||
604 | spin_lock_irqsave(&dwc->lock, flags); | ||
605 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | ||
606 | if (dwc->tx_node_active != dwc->tx_list) { | ||
607 | struct dw_desc *desc = | ||
608 | list_entry(dwc->tx_node_active, | ||
609 | struct dw_desc, | ||
610 | desc_node); | ||
611 | |||
612 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
613 | |||
614 | /* move pointer to next descriptor */ | ||
615 | dwc->tx_node_active = | ||
616 | dwc->tx_node_active->next; | ||
617 | |||
618 | dwc_do_single_block(dwc, desc); | ||
619 | |||
620 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
621 | continue; | ||
622 | } else { | ||
623 | /* we are done here */ | ||
624 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | ||
625 | } | ||
626 | } | ||
627 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
628 | |||
629 | dwc_scan_descriptors(dw, dwc); | 659 | dwc_scan_descriptors(dw, dwc); |
630 | } | ||
631 | } | 660 | } |
632 | 661 | ||
633 | /* | 662 | /* |
@@ -709,7 +738,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
709 | size_t len, unsigned long flags) | 738 | size_t len, unsigned long flags) |
710 | { | 739 | { |
711 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 740 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
712 | struct dw_dma_slave *dws = chan->private; | ||
713 | struct dw_desc *desc; | 741 | struct dw_desc *desc; |
714 | struct dw_desc *first; | 742 | struct dw_desc *first; |
715 | struct dw_desc *prev; | 743 | struct dw_desc *prev; |
@@ -730,8 +758,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
730 | return NULL; | 758 | return NULL; |
731 | } | 759 | } |
732 | 760 | ||
733 | data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], | 761 | dwc->direction = DMA_MEM_TO_MEM; |
734 | dwc->dw->data_width[dwc_get_dms(dws)]); | 762 | |
763 | data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), | ||
764 | dwc_get_data_width(chan, DST_MASTER)); | ||
735 | 765 | ||
736 | src_width = dst_width = min_t(unsigned int, data_width, | 766 | src_width = dst_width = min_t(unsigned int, data_width, |
737 | dwc_fast_fls(src | dest | len)); | 767 | dwc_fast_fls(src | dest | len)); |
@@ -756,32 +786,25 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
756 | desc->lli.dar = dest + offset; | 786 | desc->lli.dar = dest + offset; |
757 | desc->lli.ctllo = ctllo; | 787 | desc->lli.ctllo = ctllo; |
758 | desc->lli.ctlhi = xfer_count; | 788 | desc->lli.ctlhi = xfer_count; |
789 | desc->len = xfer_count << src_width; | ||
759 | 790 | ||
760 | if (!first) { | 791 | if (!first) { |
761 | first = desc; | 792 | first = desc; |
762 | } else { | 793 | } else { |
763 | prev->lli.llp = desc->txd.phys; | 794 | prev->lli.llp = desc->txd.phys; |
764 | dma_sync_single_for_device(chan2parent(chan), | ||
765 | prev->txd.phys, sizeof(prev->lli), | ||
766 | DMA_TO_DEVICE); | ||
767 | list_add_tail(&desc->desc_node, | 795 | list_add_tail(&desc->desc_node, |
768 | &first->tx_list); | 796 | &first->tx_list); |
769 | } | 797 | } |
770 | prev = desc; | 798 | prev = desc; |
771 | } | 799 | } |
772 | 800 | ||
773 | |||
774 | if (flags & DMA_PREP_INTERRUPT) | 801 | if (flags & DMA_PREP_INTERRUPT) |
775 | /* Trigger interrupt after last block */ | 802 | /* Trigger interrupt after last block */ |
776 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 803 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
777 | 804 | ||
778 | prev->lli.llp = 0; | 805 | prev->lli.llp = 0; |
779 | dma_sync_single_for_device(chan2parent(chan), | ||
780 | prev->txd.phys, sizeof(prev->lli), | ||
781 | DMA_TO_DEVICE); | ||
782 | |||
783 | first->txd.flags = flags; | 806 | first->txd.flags = flags; |
784 | first->len = len; | 807 | first->total_len = len; |
785 | 808 | ||
786 | return &first->txd; | 809 | return &first->txd; |
787 | 810 | ||
@@ -796,7 +819,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
796 | unsigned long flags, void *context) | 819 | unsigned long flags, void *context) |
797 | { | 820 | { |
798 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 821 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
799 | struct dw_dma_slave *dws = chan->private; | ||
800 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | 822 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
801 | struct dw_desc *prev; | 823 | struct dw_desc *prev; |
802 | struct dw_desc *first; | 824 | struct dw_desc *first; |
@@ -811,9 +833,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
811 | 833 | ||
812 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 834 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
813 | 835 | ||
814 | if (unlikely(!dws || !sg_len)) | 836 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
815 | return NULL; | 837 | return NULL; |
816 | 838 | ||
839 | dwc->direction = direction; | ||
840 | |||
817 | prev = first = NULL; | 841 | prev = first = NULL; |
818 | 842 | ||
819 | switch (direction) { | 843 | switch (direction) { |
@@ -828,7 +852,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
828 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 852 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
829 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 853 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
830 | 854 | ||
831 | data_width = dwc->dw->data_width[dwc_get_sms(dws)]; | 855 | data_width = dwc_get_data_width(chan, SRC_MASTER); |
832 | 856 | ||
833 | for_each_sg(sgl, sg, sg_len, i) { | 857 | for_each_sg(sgl, sg, sg_len, i) { |
834 | struct dw_desc *desc; | 858 | struct dw_desc *desc; |
@@ -861,15 +885,12 @@ slave_sg_todev_fill_desc: | |||
861 | } | 885 | } |
862 | 886 | ||
863 | desc->lli.ctlhi = dlen >> mem_width; | 887 | desc->lli.ctlhi = dlen >> mem_width; |
888 | desc->len = dlen; | ||
864 | 889 | ||
865 | if (!first) { | 890 | if (!first) { |
866 | first = desc; | 891 | first = desc; |
867 | } else { | 892 | } else { |
868 | prev->lli.llp = desc->txd.phys; | 893 | prev->lli.llp = desc->txd.phys; |
869 | dma_sync_single_for_device(chan2parent(chan), | ||
870 | prev->txd.phys, | ||
871 | sizeof(prev->lli), | ||
872 | DMA_TO_DEVICE); | ||
873 | list_add_tail(&desc->desc_node, | 894 | list_add_tail(&desc->desc_node, |
874 | &first->tx_list); | 895 | &first->tx_list); |
875 | } | 896 | } |
@@ -891,7 +912,7 @@ slave_sg_todev_fill_desc: | |||
891 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 912 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
892 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 913 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
893 | 914 | ||
894 | data_width = dwc->dw->data_width[dwc_get_dms(dws)]; | 915 | data_width = dwc_get_data_width(chan, DST_MASTER); |
895 | 916 | ||
896 | for_each_sg(sgl, sg, sg_len, i) { | 917 | for_each_sg(sgl, sg, sg_len, i) { |
897 | struct dw_desc *desc; | 918 | struct dw_desc *desc; |
@@ -923,15 +944,12 @@ slave_sg_fromdev_fill_desc: | |||
923 | len = 0; | 944 | len = 0; |
924 | } | 945 | } |
925 | desc->lli.ctlhi = dlen >> reg_width; | 946 | desc->lli.ctlhi = dlen >> reg_width; |
947 | desc->len = dlen; | ||
926 | 948 | ||
927 | if (!first) { | 949 | if (!first) { |
928 | first = desc; | 950 | first = desc; |
929 | } else { | 951 | } else { |
930 | prev->lli.llp = desc->txd.phys; | 952 | prev->lli.llp = desc->txd.phys; |
931 | dma_sync_single_for_device(chan2parent(chan), | ||
932 | prev->txd.phys, | ||
933 | sizeof(prev->lli), | ||
934 | DMA_TO_DEVICE); | ||
935 | list_add_tail(&desc->desc_node, | 953 | list_add_tail(&desc->desc_node, |
936 | &first->tx_list); | 954 | &first->tx_list); |
937 | } | 955 | } |
@@ -951,11 +969,7 @@ slave_sg_fromdev_fill_desc: | |||
951 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 969 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
952 | 970 | ||
953 | prev->lli.llp = 0; | 971 | prev->lli.llp = 0; |
954 | dma_sync_single_for_device(chan2parent(chan), | 972 | first->total_len = total_len; |
955 | prev->txd.phys, sizeof(prev->lli), | ||
956 | DMA_TO_DEVICE); | ||
957 | |||
958 | first->len = total_len; | ||
959 | 973 | ||
960 | return &first->txd; | 974 | return &first->txd; |
961 | 975 | ||
@@ -985,11 +999,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
985 | { | 999 | { |
986 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1000 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
987 | 1001 | ||
988 | /* Check if it is chan is configured for slave transfers */ | 1002 | /* Check if chan will be configured for slave transfers */ |
989 | if (!chan->private) | 1003 | if (!is_slave_direction(sconfig->direction)) |
990 | return -EINVAL; | 1004 | return -EINVAL; |
991 | 1005 | ||
992 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | 1006 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
1007 | dwc->direction = sconfig->direction; | ||
993 | 1008 | ||
994 | convert_burst(&dwc->dma_sconfig.src_maxburst); | 1009 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
995 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | 1010 | convert_burst(&dwc->dma_sconfig.dst_maxburst); |
@@ -997,6 +1012,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
997 | return 0; | 1012 | return 0; |
998 | } | 1013 | } |
999 | 1014 | ||
1015 | static inline void dwc_chan_pause(struct dw_dma_chan *dwc) | ||
1016 | { | ||
1017 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
1018 | |||
1019 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
1020 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | ||
1021 | cpu_relax(); | ||
1022 | |||
1023 | dwc->paused = true; | ||
1024 | } | ||
1025 | |||
1026 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | ||
1027 | { | ||
1028 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
1029 | |||
1030 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
1031 | |||
1032 | dwc->paused = false; | ||
1033 | } | ||
1034 | |||
1000 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1035 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1001 | unsigned long arg) | 1036 | unsigned long arg) |
1002 | { | 1037 | { |
@@ -1004,18 +1039,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1004 | struct dw_dma *dw = to_dw_dma(chan->device); | 1039 | struct dw_dma *dw = to_dw_dma(chan->device); |
1005 | struct dw_desc *desc, *_desc; | 1040 | struct dw_desc *desc, *_desc; |
1006 | unsigned long flags; | 1041 | unsigned long flags; |
1007 | u32 cfglo; | ||
1008 | LIST_HEAD(list); | 1042 | LIST_HEAD(list); |
1009 | 1043 | ||
1010 | if (cmd == DMA_PAUSE) { | 1044 | if (cmd == DMA_PAUSE) { |
1011 | spin_lock_irqsave(&dwc->lock, flags); | 1045 | spin_lock_irqsave(&dwc->lock, flags); |
1012 | 1046 | ||
1013 | cfglo = channel_readl(dwc, CFG_LO); | 1047 | dwc_chan_pause(dwc); |
1014 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
1015 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | ||
1016 | cpu_relax(); | ||
1017 | 1048 | ||
1018 | dwc->paused = true; | ||
1019 | spin_unlock_irqrestore(&dwc->lock, flags); | 1049 | spin_unlock_irqrestore(&dwc->lock, flags); |
1020 | } else if (cmd == DMA_RESUME) { | 1050 | } else if (cmd == DMA_RESUME) { |
1021 | if (!dwc->paused) | 1051 | if (!dwc->paused) |
@@ -1023,9 +1053,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1023 | 1053 | ||
1024 | spin_lock_irqsave(&dwc->lock, flags); | 1054 | spin_lock_irqsave(&dwc->lock, flags); |
1025 | 1055 | ||
1026 | cfglo = channel_readl(dwc, CFG_LO); | 1056 | dwc_chan_resume(dwc); |
1027 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
1028 | dwc->paused = false; | ||
1029 | 1057 | ||
1030 | spin_unlock_irqrestore(&dwc->lock, flags); | 1058 | spin_unlock_irqrestore(&dwc->lock, flags); |
1031 | } else if (cmd == DMA_TERMINATE_ALL) { | 1059 | } else if (cmd == DMA_TERMINATE_ALL) { |
@@ -1035,7 +1063,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1035 | 1063 | ||
1036 | dwc_chan_disable(dw, dwc); | 1064 | dwc_chan_disable(dw, dwc); |
1037 | 1065 | ||
1038 | dwc->paused = false; | 1066 | dwc_chan_resume(dwc); |
1039 | 1067 | ||
1040 | /* active_list entries will end up before queued entries */ | 1068 | /* active_list entries will end up before queued entries */ |
1041 | list_splice_init(&dwc->queue, &list); | 1069 | list_splice_init(&dwc->queue, &list); |
@@ -1055,6 +1083,21 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1055 | return 0; | 1083 | return 0; |
1056 | } | 1084 | } |
1057 | 1085 | ||
1086 | static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) | ||
1087 | { | ||
1088 | unsigned long flags; | ||
1089 | u32 residue; | ||
1090 | |||
1091 | spin_lock_irqsave(&dwc->lock, flags); | ||
1092 | |||
1093 | residue = dwc->residue; | ||
1094 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | ||
1095 | residue -= dwc_get_sent(dwc); | ||
1096 | |||
1097 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1098 | return residue; | ||
1099 | } | ||
1100 | |||
1058 | static enum dma_status | 1101 | static enum dma_status |
1059 | dwc_tx_status(struct dma_chan *chan, | 1102 | dwc_tx_status(struct dma_chan *chan, |
1060 | dma_cookie_t cookie, | 1103 | dma_cookie_t cookie, |
@@ -1071,7 +1114,7 @@ dwc_tx_status(struct dma_chan *chan, | |||
1071 | } | 1114 | } |
1072 | 1115 | ||
1073 | if (ret != DMA_SUCCESS) | 1116 | if (ret != DMA_SUCCESS) |
1074 | dma_set_residue(txstate, dwc_first_active(dwc)->len); | 1117 | dma_set_residue(txstate, dwc_get_residue(dwc)); |
1075 | 1118 | ||
1076 | if (dwc->paused) | 1119 | if (dwc->paused) |
1077 | return DMA_PAUSED; | 1120 | return DMA_PAUSED; |
@@ -1114,22 +1157,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1114 | spin_lock_irqsave(&dwc->lock, flags); | 1157 | spin_lock_irqsave(&dwc->lock, flags); |
1115 | i = dwc->descs_allocated; | 1158 | i = dwc->descs_allocated; |
1116 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | 1159 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { |
1160 | dma_addr_t phys; | ||
1161 | |||
1117 | spin_unlock_irqrestore(&dwc->lock, flags); | 1162 | spin_unlock_irqrestore(&dwc->lock, flags); |
1118 | 1163 | ||
1119 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 1164 | desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); |
1120 | if (!desc) { | 1165 | if (!desc) |
1121 | dev_info(chan2dev(chan), | 1166 | goto err_desc_alloc; |
1122 | "only allocated %d descriptors\n", i); | 1167 | |
1123 | spin_lock_irqsave(&dwc->lock, flags); | 1168 | memset(desc, 0, sizeof(struct dw_desc)); |
1124 | break; | ||
1125 | } | ||
1126 | 1169 | ||
1127 | INIT_LIST_HEAD(&desc->tx_list); | 1170 | INIT_LIST_HEAD(&desc->tx_list); |
1128 | dma_async_tx_descriptor_init(&desc->txd, chan); | 1171 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1129 | desc->txd.tx_submit = dwc_tx_submit; | 1172 | desc->txd.tx_submit = dwc_tx_submit; |
1130 | desc->txd.flags = DMA_CTRL_ACK; | 1173 | desc->txd.flags = DMA_CTRL_ACK; |
1131 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, | 1174 | desc->txd.phys = phys; |
1132 | sizeof(desc->lli), DMA_TO_DEVICE); | 1175 | |
1133 | dwc_desc_put(dwc, desc); | 1176 | dwc_desc_put(dwc, desc); |
1134 | 1177 | ||
1135 | spin_lock_irqsave(&dwc->lock, flags); | 1178 | spin_lock_irqsave(&dwc->lock, flags); |
@@ -1141,6 +1184,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1141 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | 1184 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
1142 | 1185 | ||
1143 | return i; | 1186 | return i; |
1187 | |||
1188 | err_desc_alloc: | ||
1189 | dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); | ||
1190 | |||
1191 | return i; | ||
1144 | } | 1192 | } |
1145 | 1193 | ||
1146 | static void dwc_free_chan_resources(struct dma_chan *chan) | 1194 | static void dwc_free_chan_resources(struct dma_chan *chan) |
@@ -1172,14 +1220,56 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1172 | 1220 | ||
1173 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 1221 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1174 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 1222 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1175 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | 1223 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); |
1176 | sizeof(desc->lli), DMA_TO_DEVICE); | ||
1177 | kfree(desc); | ||
1178 | } | 1224 | } |
1179 | 1225 | ||
1180 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1226 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1181 | } | 1227 | } |
1182 | 1228 | ||
1229 | bool dw_dma_generic_filter(struct dma_chan *chan, void *param) | ||
1230 | { | ||
1231 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
1232 | static struct dw_dma *last_dw; | ||
1233 | static char *last_bus_id; | ||
1234 | int i = -1; | ||
1235 | |||
1236 | /* | ||
1237 | * dmaengine framework calls this routine for all channels of all dma | ||
1238 | * controller, until true is returned. If 'param' bus_id is not | ||
1239 | * registered with a dma controller (dw), then there is no need of | ||
1240 | * running below function for all channels of dw. | ||
1241 | * | ||
1242 | * This block of code does this by saving the parameters of last | ||
1243 | * failure. If dw and param are same, i.e. trying on same dw with | ||
1244 | * different channel, return false. | ||
1245 | */ | ||
1246 | if ((last_dw == dw) && (last_bus_id == param)) | ||
1247 | return false; | ||
1248 | /* | ||
1249 | * Return true: | ||
1250 | * - If dw_dma's platform data is not filled with slave info, then all | ||
1251 | * dma controllers are fine for transfer. | ||
1252 | * - Or if param is NULL | ||
1253 | */ | ||
1254 | if (!dw->sd || !param) | ||
1255 | return true; | ||
1256 | |||
1257 | while (++i < dw->sd_count) { | ||
1258 | if (!strcmp(dw->sd[i].bus_id, param)) { | ||
1259 | chan->private = &dw->sd[i]; | ||
1260 | last_dw = NULL; | ||
1261 | last_bus_id = NULL; | ||
1262 | |||
1263 | return true; | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | last_dw = dw; | ||
1268 | last_bus_id = param; | ||
1269 | return false; | ||
1270 | } | ||
1271 | EXPORT_SYMBOL(dw_dma_generic_filter); | ||
1272 | |||
1183 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 1273 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1184 | 1274 | ||
1185 | /** | 1275 | /** |
@@ -1299,6 +1389,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1299 | 1389 | ||
1300 | retval = ERR_PTR(-EINVAL); | 1390 | retval = ERR_PTR(-EINVAL); |
1301 | 1391 | ||
1392 | if (unlikely(!is_slave_direction(direction))) | ||
1393 | goto out_err; | ||
1394 | |||
1395 | dwc->direction = direction; | ||
1396 | |||
1302 | if (direction == DMA_MEM_TO_DEV) | 1397 | if (direction == DMA_MEM_TO_DEV) |
1303 | reg_width = __ffs(sconfig->dst_addr_width); | 1398 | reg_width = __ffs(sconfig->dst_addr_width); |
1304 | else | 1399 | else |
@@ -1313,8 +1408,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1313 | goto out_err; | 1408 | goto out_err; |
1314 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 1409 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1315 | goto out_err; | 1410 | goto out_err; |
1316 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) | ||
1317 | goto out_err; | ||
1318 | 1411 | ||
1319 | retval = ERR_PTR(-ENOMEM); | 1412 | retval = ERR_PTR(-ENOMEM); |
1320 | 1413 | ||
@@ -1372,20 +1465,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1372 | desc->lli.ctlhi = (period_len >> reg_width); | 1465 | desc->lli.ctlhi = (period_len >> reg_width); |
1373 | cdesc->desc[i] = desc; | 1466 | cdesc->desc[i] = desc; |
1374 | 1467 | ||
1375 | if (last) { | 1468 | if (last) |
1376 | last->lli.llp = desc->txd.phys; | 1469 | last->lli.llp = desc->txd.phys; |
1377 | dma_sync_single_for_device(chan2parent(chan), | ||
1378 | last->txd.phys, sizeof(last->lli), | ||
1379 | DMA_TO_DEVICE); | ||
1380 | } | ||
1381 | 1470 | ||
1382 | last = desc; | 1471 | last = desc; |
1383 | } | 1472 | } |
1384 | 1473 | ||
1385 | /* lets make a cyclic list */ | 1474 | /* lets make a cyclic list */ |
1386 | last->lli.llp = cdesc->desc[0]->txd.phys; | 1475 | last->lli.llp = cdesc->desc[0]->txd.phys; |
1387 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | ||
1388 | sizeof(last->lli), DMA_TO_DEVICE); | ||
1389 | 1476 | ||
1390 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " | 1477 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1391 | "period %zu periods %d\n", (unsigned long long)buf_addr, | 1478 | "period %zu periods %d\n", (unsigned long long)buf_addr, |
@@ -1463,6 +1550,91 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1463 | dw->chan[i].initialized = false; | 1550 | dw->chan[i].initialized = false; |
1464 | } | 1551 | } |
1465 | 1552 | ||
1553 | #ifdef CONFIG_OF | ||
1554 | static struct dw_dma_platform_data * | ||
1555 | dw_dma_parse_dt(struct platform_device *pdev) | ||
1556 | { | ||
1557 | struct device_node *sn, *cn, *np = pdev->dev.of_node; | ||
1558 | struct dw_dma_platform_data *pdata; | ||
1559 | struct dw_dma_slave *sd; | ||
1560 | u32 tmp, arr[4]; | ||
1561 | |||
1562 | if (!np) { | ||
1563 | dev_err(&pdev->dev, "Missing DT data\n"); | ||
1564 | return NULL; | ||
1565 | } | ||
1566 | |||
1567 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
1568 | if (!pdata) | ||
1569 | return NULL; | ||
1570 | |||
1571 | if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels)) | ||
1572 | return NULL; | ||
1573 | |||
1574 | if (of_property_read_bool(np, "is_private")) | ||
1575 | pdata->is_private = true; | ||
1576 | |||
1577 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) | ||
1578 | pdata->chan_allocation_order = (unsigned char)tmp; | ||
1579 | |||
1580 | if (!of_property_read_u32(np, "chan_priority", &tmp)) | ||
1581 | pdata->chan_priority = tmp; | ||
1582 | |||
1583 | if (!of_property_read_u32(np, "block_size", &tmp)) | ||
1584 | pdata->block_size = tmp; | ||
1585 | |||
1586 | if (!of_property_read_u32(np, "nr_masters", &tmp)) { | ||
1587 | if (tmp > 4) | ||
1588 | return NULL; | ||
1589 | |||
1590 | pdata->nr_masters = tmp; | ||
1591 | } | ||
1592 | |||
1593 | if (!of_property_read_u32_array(np, "data_width", arr, | ||
1594 | pdata->nr_masters)) | ||
1595 | for (tmp = 0; tmp < pdata->nr_masters; tmp++) | ||
1596 | pdata->data_width[tmp] = arr[tmp]; | ||
1597 | |||
1598 | /* parse slave data */ | ||
1599 | sn = of_find_node_by_name(np, "slave_info"); | ||
1600 | if (!sn) | ||
1601 | return pdata; | ||
1602 | |||
1603 | /* calculate number of slaves */ | ||
1604 | tmp = of_get_child_count(sn); | ||
1605 | if (!tmp) | ||
1606 | return NULL; | ||
1607 | |||
1608 | sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL); | ||
1609 | if (!sd) | ||
1610 | return NULL; | ||
1611 | |||
1612 | pdata->sd = sd; | ||
1613 | pdata->sd_count = tmp; | ||
1614 | |||
1615 | for_each_child_of_node(sn, cn) { | ||
1616 | sd->dma_dev = &pdev->dev; | ||
1617 | of_property_read_string(cn, "bus_id", &sd->bus_id); | ||
1618 | of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi); | ||
1619 | of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo); | ||
1620 | if (!of_property_read_u32(cn, "src_master", &tmp)) | ||
1621 | sd->src_master = tmp; | ||
1622 | |||
1623 | if (!of_property_read_u32(cn, "dst_master", &tmp)) | ||
1624 | sd->dst_master = tmp; | ||
1625 | sd++; | ||
1626 | } | ||
1627 | |||
1628 | return pdata; | ||
1629 | } | ||
1630 | #else | ||
1631 | static inline struct dw_dma_platform_data * | ||
1632 | dw_dma_parse_dt(struct platform_device *pdev) | ||
1633 | { | ||
1634 | return NULL; | ||
1635 | } | ||
1636 | #endif | ||
1637 | |||
1466 | static int dw_probe(struct platform_device *pdev) | 1638 | static int dw_probe(struct platform_device *pdev) |
1467 | { | 1639 | { |
1468 | struct dw_dma_platform_data *pdata; | 1640 | struct dw_dma_platform_data *pdata; |
@@ -1478,10 +1650,6 @@ static int dw_probe(struct platform_device *pdev) | |||
1478 | int err; | 1650 | int err; |
1479 | int i; | 1651 | int i; |
1480 | 1652 | ||
1481 | pdata = dev_get_platdata(&pdev->dev); | ||
1482 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | ||
1483 | return -EINVAL; | ||
1484 | |||
1485 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1653 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1486 | if (!io) | 1654 | if (!io) |
1487 | return -EINVAL; | 1655 | return -EINVAL; |
@@ -1494,9 +1662,33 @@ static int dw_probe(struct platform_device *pdev) | |||
1494 | if (IS_ERR(regs)) | 1662 | if (IS_ERR(regs)) |
1495 | return PTR_ERR(regs); | 1663 | return PTR_ERR(regs); |
1496 | 1664 | ||
1665 | /* Apply default dma_mask if needed */ | ||
1666 | if (!pdev->dev.dma_mask) { | ||
1667 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | ||
1668 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
1669 | } | ||
1670 | |||
1497 | dw_params = dma_read_byaddr(regs, DW_PARAMS); | 1671 | dw_params = dma_read_byaddr(regs, DW_PARAMS); |
1498 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; | 1672 | autocfg = dw_params >> DW_PARAMS_EN & 0x1; |
1499 | 1673 | ||
1674 | dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params); | ||
1675 | |||
1676 | pdata = dev_get_platdata(&pdev->dev); | ||
1677 | if (!pdata) | ||
1678 | pdata = dw_dma_parse_dt(pdev); | ||
1679 | |||
1680 | if (!pdata && autocfg) { | ||
1681 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
1682 | if (!pdata) | ||
1683 | return -ENOMEM; | ||
1684 | |||
1685 | /* Fill platform data with the default values */ | ||
1686 | pdata->is_private = true; | ||
1687 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | ||
1688 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | ||
1689 | } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | ||
1690 | return -EINVAL; | ||
1691 | |||
1500 | if (autocfg) | 1692 | if (autocfg) |
1501 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; | 1693 | nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; |
1502 | else | 1694 | else |
@@ -1513,6 +1705,8 @@ static int dw_probe(struct platform_device *pdev) | |||
1513 | clk_prepare_enable(dw->clk); | 1705 | clk_prepare_enable(dw->clk); |
1514 | 1706 | ||
1515 | dw->regs = regs; | 1707 | dw->regs = regs; |
1708 | dw->sd = pdata->sd; | ||
1709 | dw->sd_count = pdata->sd_count; | ||
1516 | 1710 | ||
1517 | /* get hardware configuration parameters */ | 1711 | /* get hardware configuration parameters */ |
1518 | if (autocfg) { | 1712 | if (autocfg) { |
@@ -1544,6 +1738,14 @@ static int dw_probe(struct platform_device *pdev) | |||
1544 | 1738 | ||
1545 | platform_set_drvdata(pdev, dw); | 1739 | platform_set_drvdata(pdev, dw); |
1546 | 1740 | ||
1741 | /* create a pool of consistent memory blocks for hardware descriptors */ | ||
1742 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, | ||
1743 | sizeof(struct dw_desc), 4, 0); | ||
1744 | if (!dw->desc_pool) { | ||
1745 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); | ||
1746 | return -ENOMEM; | ||
1747 | } | ||
1748 | |||
1547 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1749 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1548 | 1750 | ||
1549 | INIT_LIST_HEAD(&dw->dma.channels); | 1751 | INIT_LIST_HEAD(&dw->dma.channels); |
@@ -1575,7 +1777,7 @@ static int dw_probe(struct platform_device *pdev) | |||
1575 | 1777 | ||
1576 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1778 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1577 | 1779 | ||
1578 | dwc->dw = dw; | 1780 | dwc->direction = DMA_TRANS_NONE; |
1579 | 1781 | ||
1580 | /* hardware configuration */ | 1782 | /* hardware configuration */ |
1581 | if (autocfg) { | 1783 | if (autocfg) { |
@@ -1584,6 +1786,9 @@ static int dw_probe(struct platform_device *pdev) | |||
1584 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), | 1786 | dwc_params = dma_read_byaddr(regs + r * sizeof(u32), |
1585 | DWC_PARAMS); | 1787 | DWC_PARAMS); |
1586 | 1788 | ||
1789 | dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, | ||
1790 | dwc_params); | ||
1791 | |||
1587 | /* Decode maximum block size for given channel. The | 1792 | /* Decode maximum block size for given channel. The |
1588 | * stored 4 bit value represents blocks from 0x00 for 3 | 1793 | * stored 4 bit value represents blocks from 0x00 for 3 |
1589 | * up to 0x0a for 4095. */ | 1794 | * up to 0x0a for 4095. */ |
@@ -1627,8 +1832,8 @@ static int dw_probe(struct platform_device *pdev) | |||
1627 | 1832 | ||
1628 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1833 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1629 | 1834 | ||
1630 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | 1835 | dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", |
1631 | dev_name(&pdev->dev), nr_channels); | 1836 | nr_channels); |
1632 | 1837 | ||
1633 | dma_async_device_register(&dw->dma); | 1838 | dma_async_device_register(&dw->dma); |
1634 | 1839 | ||
@@ -1658,7 +1863,7 @@ static void dw_shutdown(struct platform_device *pdev) | |||
1658 | { | 1863 | { |
1659 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1864 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1660 | 1865 | ||
1661 | dw_dma_off(platform_get_drvdata(pdev)); | 1866 | dw_dma_off(dw); |
1662 | clk_disable_unprepare(dw->clk); | 1867 | clk_disable_unprepare(dw->clk); |
1663 | } | 1868 | } |
1664 | 1869 | ||
@@ -1667,7 +1872,7 @@ static int dw_suspend_noirq(struct device *dev) | |||
1667 | struct platform_device *pdev = to_platform_device(dev); | 1872 | struct platform_device *pdev = to_platform_device(dev); |
1668 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1873 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1669 | 1874 | ||
1670 | dw_dma_off(platform_get_drvdata(pdev)); | 1875 | dw_dma_off(dw); |
1671 | clk_disable_unprepare(dw->clk); | 1876 | clk_disable_unprepare(dw->clk); |
1672 | 1877 | ||
1673 | return 0; | 1878 | return 0; |
@@ -1680,6 +1885,7 @@ static int dw_resume_noirq(struct device *dev) | |||
1680 | 1885 | ||
1681 | clk_prepare_enable(dw->clk); | 1886 | clk_prepare_enable(dw->clk); |
1682 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1887 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1888 | |||
1683 | return 0; | 1889 | return 0; |
1684 | } | 1890 | } |
1685 | 1891 | ||
@@ -1700,7 +1906,13 @@ static const struct of_device_id dw_dma_id_table[] = { | |||
1700 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); | 1906 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); |
1701 | #endif | 1907 | #endif |
1702 | 1908 | ||
1909 | static const struct platform_device_id dw_dma_ids[] = { | ||
1910 | { "INTL9C60", 0 }, | ||
1911 | { } | ||
1912 | }; | ||
1913 | |||
1703 | static struct platform_driver dw_driver = { | 1914 | static struct platform_driver dw_driver = { |
1915 | .probe = dw_probe, | ||
1704 | .remove = dw_remove, | 1916 | .remove = dw_remove, |
1705 | .shutdown = dw_shutdown, | 1917 | .shutdown = dw_shutdown, |
1706 | .driver = { | 1918 | .driver = { |
@@ -1708,11 +1920,12 @@ static struct platform_driver dw_driver = { | |||
1708 | .pm = &dw_dev_pm_ops, | 1920 | .pm = &dw_dev_pm_ops, |
1709 | .of_match_table = of_match_ptr(dw_dma_id_table), | 1921 | .of_match_table = of_match_ptr(dw_dma_id_table), |
1710 | }, | 1922 | }, |
1923 | .id_table = dw_dma_ids, | ||
1711 | }; | 1924 | }; |
1712 | 1925 | ||
1713 | static int __init dw_init(void) | 1926 | static int __init dw_init(void) |
1714 | { | 1927 | { |
1715 | return platform_driver_probe(&dw_driver, dw_probe); | 1928 | return platform_driver_register(&dw_driver); |
1716 | } | 1929 | } |
1717 | subsys_initcall(dw_init); | 1930 | subsys_initcall(dw_init); |
1718 | 1931 | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 88965597b7d0..88dd8eb31957 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -9,6 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/dmaengine.h> | ||
12 | #include <linux/dw_dmac.h> | 13 | #include <linux/dw_dmac.h> |
13 | 14 | ||
14 | #define DW_DMA_MAX_NR_CHANNELS 8 | 15 | #define DW_DMA_MAX_NR_CHANNELS 8 |
@@ -184,15 +185,15 @@ enum dw_dmac_flags { | |||
184 | }; | 185 | }; |
185 | 186 | ||
186 | struct dw_dma_chan { | 187 | struct dw_dma_chan { |
187 | struct dma_chan chan; | 188 | struct dma_chan chan; |
188 | void __iomem *ch_regs; | 189 | void __iomem *ch_regs; |
189 | u8 mask; | 190 | u8 mask; |
190 | u8 priority; | 191 | u8 priority; |
191 | bool paused; | 192 | enum dma_transfer_direction direction; |
192 | bool initialized; | 193 | bool paused; |
194 | bool initialized; | ||
193 | 195 | ||
194 | /* software emulation of the LLP transfers */ | 196 | /* software emulation of the LLP transfers */ |
195 | struct list_head *tx_list; | ||
196 | struct list_head *tx_node_active; | 197 | struct list_head *tx_node_active; |
197 | 198 | ||
198 | spinlock_t lock; | 199 | spinlock_t lock; |
@@ -202,6 +203,7 @@ struct dw_dma_chan { | |||
202 | struct list_head active_list; | 203 | struct list_head active_list; |
203 | struct list_head queue; | 204 | struct list_head queue; |
204 | struct list_head free_list; | 205 | struct list_head free_list; |
206 | u32 residue; | ||
205 | struct dw_cyclic_desc *cdesc; | 207 | struct dw_cyclic_desc *cdesc; |
206 | 208 | ||
207 | unsigned int descs_allocated; | 209 | unsigned int descs_allocated; |
@@ -212,9 +214,6 @@ struct dw_dma_chan { | |||
212 | 214 | ||
213 | /* configuration passed via DMA_SLAVE_CONFIG */ | 215 | /* configuration passed via DMA_SLAVE_CONFIG */ |
214 | struct dma_slave_config dma_sconfig; | 216 | struct dma_slave_config dma_sconfig; |
215 | |||
216 | /* backlink to dw_dma */ | ||
217 | struct dw_dma *dw; | ||
218 | }; | 217 | }; |
219 | 218 | ||
220 | static inline struct dw_dma_chan_regs __iomem * | 219 | static inline struct dw_dma_chan_regs __iomem * |
@@ -236,9 +235,14 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) | |||
236 | struct dw_dma { | 235 | struct dw_dma { |
237 | struct dma_device dma; | 236 | struct dma_device dma; |
238 | void __iomem *regs; | 237 | void __iomem *regs; |
238 | struct dma_pool *desc_pool; | ||
239 | struct tasklet_struct tasklet; | 239 | struct tasklet_struct tasklet; |
240 | struct clk *clk; | 240 | struct clk *clk; |
241 | 241 | ||
242 | /* slave information */ | ||
243 | struct dw_dma_slave *sd; | ||
244 | unsigned int sd_count; | ||
245 | |||
242 | u8 all_chan_mask; | 246 | u8 all_chan_mask; |
243 | 247 | ||
244 | /* hardware configuration */ | 248 | /* hardware configuration */ |
@@ -293,8 +297,11 @@ struct dw_desc { | |||
293 | struct list_head tx_list; | 297 | struct list_head tx_list; |
294 | struct dma_async_tx_descriptor txd; | 298 | struct dma_async_tx_descriptor txd; |
295 | size_t len; | 299 | size_t len; |
300 | size_t total_len; | ||
296 | }; | 301 | }; |
297 | 302 | ||
303 | #define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) | ||
304 | |||
298 | static inline struct dw_desc * | 305 | static inline struct dw_desc * |
299 | txd_to_dw_desc(struct dma_async_tx_descriptor *txd) | 306 | txd_to_dw_desc(struct dma_async_tx_descriptor *txd) |
300 | { | 307 | { |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index f424298f1ac5..cd7e3280fadd 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -69,9 +69,7 @@ struct edma_chan { | |||
69 | int ch_num; | 69 | int ch_num; |
70 | bool alloced; | 70 | bool alloced; |
71 | int slot[EDMA_MAX_SLOTS]; | 71 | int slot[EDMA_MAX_SLOTS]; |
72 | dma_addr_t addr; | 72 | struct dma_slave_config cfg; |
73 | int addr_width; | ||
74 | int maxburst; | ||
75 | }; | 73 | }; |
76 | 74 | ||
77 | struct edma_cc { | 75 | struct edma_cc { |
@@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan) | |||
178 | return 0; | 176 | return 0; |
179 | } | 177 | } |
180 | 178 | ||
181 | |||
182 | static int edma_slave_config(struct edma_chan *echan, | 179 | static int edma_slave_config(struct edma_chan *echan, |
183 | struct dma_slave_config *config) | 180 | struct dma_slave_config *cfg) |
184 | { | 181 | { |
185 | if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || | 182 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
186 | (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | 183 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
187 | return -EINVAL; | 184 | return -EINVAL; |
188 | 185 | ||
189 | if (config->direction == DMA_MEM_TO_DEV) { | 186 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); |
190 | if (config->dst_addr) | ||
191 | echan->addr = config->dst_addr; | ||
192 | if (config->dst_addr_width) | ||
193 | echan->addr_width = config->dst_addr_width; | ||
194 | if (config->dst_maxburst) | ||
195 | echan->maxburst = config->dst_maxburst; | ||
196 | } else if (config->direction == DMA_DEV_TO_MEM) { | ||
197 | if (config->src_addr) | ||
198 | echan->addr = config->src_addr; | ||
199 | if (config->src_addr_width) | ||
200 | echan->addr_width = config->src_addr_width; | ||
201 | if (config->src_maxburst) | ||
202 | echan->maxburst = config->src_maxburst; | ||
203 | } | ||
204 | 187 | ||
205 | return 0; | 188 | return 0; |
206 | } | 189 | } |
@@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
235 | struct edma_chan *echan = to_edma_chan(chan); | 218 | struct edma_chan *echan = to_edma_chan(chan); |
236 | struct device *dev = chan->device->dev; | 219 | struct device *dev = chan->device->dev; |
237 | struct edma_desc *edesc; | 220 | struct edma_desc *edesc; |
221 | dma_addr_t dev_addr; | ||
222 | enum dma_slave_buswidth dev_width; | ||
223 | u32 burst; | ||
238 | struct scatterlist *sg; | 224 | struct scatterlist *sg; |
239 | int i; | 225 | int i; |
240 | int acnt, bcnt, ccnt, src, dst, cidx; | 226 | int acnt, bcnt, ccnt, src, dst, cidx; |
@@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
243 | if (unlikely(!echan || !sgl || !sg_len)) | 229 | if (unlikely(!echan || !sgl || !sg_len)) |
244 | return NULL; | 230 | return NULL; |
245 | 231 | ||
246 | if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | 232 | if (direction == DMA_DEV_TO_MEM) { |
233 | dev_addr = echan->cfg.src_addr; | ||
234 | dev_width = echan->cfg.src_addr_width; | ||
235 | burst = echan->cfg.src_maxburst; | ||
236 | } else if (direction == DMA_MEM_TO_DEV) { | ||
237 | dev_addr = echan->cfg.dst_addr; | ||
238 | dev_width = echan->cfg.dst_addr_width; | ||
239 | burst = echan->cfg.dst_maxburst; | ||
240 | } else { | ||
241 | dev_err(dev, "%s: bad direction?\n", __func__); | ||
242 | return NULL; | ||
243 | } | ||
244 | |||
245 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
247 | dev_err(dev, "Undefined slave buswidth\n"); | 246 | dev_err(dev, "Undefined slave buswidth\n"); |
248 | return NULL; | 247 | return NULL; |
249 | } | 248 | } |
@@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
275 | } | 274 | } |
276 | } | 275 | } |
277 | 276 | ||
278 | acnt = echan->addr_width; | 277 | acnt = dev_width; |
279 | 278 | ||
280 | /* | 279 | /* |
281 | * If the maxburst is equal to the fifo width, use | 280 | * If the maxburst is equal to the fifo width, use |
282 | * A-synced transfers. This allows for large contiguous | 281 | * A-synced transfers. This allows for large contiguous |
283 | * buffer transfers using only one PaRAM set. | 282 | * buffer transfers using only one PaRAM set. |
284 | */ | 283 | */ |
285 | if (echan->maxburst == 1) { | 284 | if (burst == 1) { |
286 | edesc->absync = false; | 285 | edesc->absync = false; |
287 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | 286 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); |
288 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | 287 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); |
@@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
302 | */ | 301 | */ |
303 | } else { | 302 | } else { |
304 | edesc->absync = true; | 303 | edesc->absync = true; |
305 | bcnt = echan->maxburst; | 304 | bcnt = burst; |
306 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | 305 | ccnt = sg_dma_len(sg) / (acnt * bcnt); |
307 | if (ccnt > (SZ_64K - 1)) { | 306 | if (ccnt > (SZ_64K - 1)) { |
308 | dev_err(dev, "Exceeded max SG segment size\n"); | 307 | dev_err(dev, "Exceeded max SG segment size\n"); |
@@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
313 | 312 | ||
314 | if (direction == DMA_MEM_TO_DEV) { | 313 | if (direction == DMA_MEM_TO_DEV) { |
315 | src = sg_dma_address(sg); | 314 | src = sg_dma_address(sg); |
316 | dst = echan->addr; | 315 | dst = dev_addr; |
317 | src_bidx = acnt; | 316 | src_bidx = acnt; |
318 | src_cidx = cidx; | 317 | src_cidx = cidx; |
319 | dst_bidx = 0; | 318 | dst_bidx = 0; |
320 | dst_cidx = 0; | 319 | dst_cidx = 0; |
321 | } else { | 320 | } else { |
322 | src = echan->addr; | 321 | src = dev_addr; |
323 | dst = sg_dma_address(sg); | 322 | dst = sg_dma_address(sg); |
324 | src_bidx = 0; | 323 | src_bidx = 0; |
325 | src_cidx = 0; | 324 | src_cidx = 0; |
@@ -621,13 +620,11 @@ static struct platform_device *pdev0, *pdev1; | |||
621 | static const struct platform_device_info edma_dev_info0 = { | 620 | static const struct platform_device_info edma_dev_info0 = { |
622 | .name = "edma-dma-engine", | 621 | .name = "edma-dma-engine", |
623 | .id = 0, | 622 | .id = 0, |
624 | .dma_mask = DMA_BIT_MASK(32), | ||
625 | }; | 623 | }; |
626 | 624 | ||
627 | static const struct platform_device_info edma_dev_info1 = { | 625 | static const struct platform_device_info edma_dev_info1 = { |
628 | .name = "edma-dma-engine", | 626 | .name = "edma-dma-engine", |
629 | .id = 1, | 627 | .id = 1, |
630 | .dma_mask = DMA_BIT_MASK(32), | ||
631 | }; | 628 | }; |
632 | 629 | ||
633 | static int edma_init(void) | 630 | static int edma_init(void) |
@@ -641,6 +638,8 @@ static int edma_init(void) | |||
641 | ret = PTR_ERR(pdev0); | 638 | ret = PTR_ERR(pdev0); |
642 | goto out; | 639 | goto out; |
643 | } | 640 | } |
641 | pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask; | ||
642 | pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
644 | } | 643 | } |
645 | 644 | ||
646 | if (EDMA_CTLRS == 2) { | 645 | if (EDMA_CTLRS == 2) { |
@@ -650,6 +649,8 @@ static int edma_init(void) | |||
650 | platform_device_unregister(pdev0); | 649 | platform_device_unregister(pdev0); |
651 | ret = PTR_ERR(pdev1); | 650 | ret = PTR_ERR(pdev1); |
652 | } | 651 | } |
652 | pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask; | ||
653 | pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
653 | } | 654 | } |
654 | 655 | ||
655 | out: | 656 | out: |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index bcfde400904f..f2bf8c0c4675 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -903,8 +903,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | |||
903 | switch (data->port) { | 903 | switch (data->port) { |
904 | case EP93XX_DMA_SSP: | 904 | case EP93XX_DMA_SSP: |
905 | case EP93XX_DMA_IDE: | 905 | case EP93XX_DMA_IDE: |
906 | if (data->direction != DMA_MEM_TO_DEV && | 906 | if (!is_slave_direction(data->direction)) |
907 | data->direction != DMA_DEV_TO_MEM) | ||
908 | return -EINVAL; | 907 | return -EINVAL; |
909 | break; | 908 | break; |
910 | default: | 909 | default: |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a68a8ba87e6..1879a5942bfc 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -833,14 +833,14 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
833 | 833 | ||
834 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | 834 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
835 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 835 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
836 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | | 836 | flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | |
837 | DMA_PREP_INTERRUPT; | 837 | DMA_PREP_INTERRUPT; |
838 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 838 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
839 | IOAT_TEST_SIZE, flags); | 839 | IOAT_TEST_SIZE, flags); |
840 | if (!tx) { | 840 | if (!tx) { |
841 | dev_err(dev, "Self-test prep failed, disabling\n"); | 841 | dev_err(dev, "Self-test prep failed, disabling\n"); |
842 | err = -ENODEV; | 842 | err = -ENODEV; |
843 | goto free_resources; | 843 | goto unmap_dma; |
844 | } | 844 | } |
845 | 845 | ||
846 | async_tx_ack(tx); | 846 | async_tx_ack(tx); |
@@ -851,7 +851,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
851 | if (cookie < 0) { | 851 | if (cookie < 0) { |
852 | dev_err(dev, "Self-test setup failed, disabling\n"); | 852 | dev_err(dev, "Self-test setup failed, disabling\n"); |
853 | err = -ENODEV; | 853 | err = -ENODEV; |
854 | goto free_resources; | 854 | goto unmap_dma; |
855 | } | 855 | } |
856 | dma->device_issue_pending(dma_chan); | 856 | dma->device_issue_pending(dma_chan); |
857 | 857 | ||
@@ -862,7 +862,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
862 | != DMA_SUCCESS) { | 862 | != DMA_SUCCESS) { |
863 | dev_err(dev, "Self-test copy timed out, disabling\n"); | 863 | dev_err(dev, "Self-test copy timed out, disabling\n"); |
864 | err = -ENODEV; | 864 | err = -ENODEV; |
865 | goto free_resources; | 865 | goto unmap_dma; |
866 | } | 866 | } |
867 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | 867 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { |
868 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | 868 | dev_err(dev, "Self-test copy failed compare, disabling\n"); |
@@ -870,6 +870,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
870 | goto free_resources; | 870 | goto free_resources; |
871 | } | 871 | } |
872 | 872 | ||
873 | unmap_dma: | ||
874 | dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | ||
875 | dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | ||
873 | free_resources: | 876 | free_resources: |
874 | dma->device_free_chan_resources(dma_chan); | 877 | dma->device_free_chan_resources(dma_chan); |
875 | out: | 878 | out: |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 087935f1565f..53a4cbb78f47 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -97,6 +97,7 @@ struct ioat_chan_common { | |||
97 | #define IOAT_KOBJ_INIT_FAIL 3 | 97 | #define IOAT_KOBJ_INIT_FAIL 3 |
98 | #define IOAT_RESHAPE_PENDING 4 | 98 | #define IOAT_RESHAPE_PENDING 4 |
99 | #define IOAT_RUN 5 | 99 | #define IOAT_RUN 5 |
100 | #define IOAT_CHAN_ACTIVE 6 | ||
100 | struct timer_list timer; | 101 | struct timer_list timer; |
101 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | 102 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) |
102 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) | 103 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 82d4e306c32e..b925e1b1d139 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -269,61 +269,22 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |||
269 | __ioat2_restart_chan(ioat); | 269 | __ioat2_restart_chan(ioat); |
270 | } | 270 | } |
271 | 271 | ||
272 | void ioat2_timer_event(unsigned long data) | 272 | static void check_active(struct ioat2_dma_chan *ioat) |
273 | { | 273 | { |
274 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
275 | struct ioat_chan_common *chan = &ioat->base; | 274 | struct ioat_chan_common *chan = &ioat->base; |
276 | 275 | ||
277 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 276 | if (ioat2_ring_active(ioat)) { |
278 | dma_addr_t phys_complete; | 277 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
279 | u64 status; | 278 | return; |
280 | 279 | } | |
281 | status = ioat_chansts(chan); | ||
282 | |||
283 | /* when halted due to errors check for channel | ||
284 | * programming errors before advancing the completion state | ||
285 | */ | ||
286 | if (is_ioat_halted(status)) { | ||
287 | u32 chanerr; | ||
288 | |||
289 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
290 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
291 | __func__, chanerr); | ||
292 | if (test_bit(IOAT_RUN, &chan->state)) | ||
293 | BUG_ON(is_ioat_bug(chanerr)); | ||
294 | else /* we never got off the ground */ | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | /* if we haven't made progress and we have already | ||
299 | * acknowledged a pending completion once, then be more | ||
300 | * forceful with a restart | ||
301 | */ | ||
302 | spin_lock_bh(&chan->cleanup_lock); | ||
303 | if (ioat_cleanup_preamble(chan, &phys_complete)) { | ||
304 | __cleanup(ioat, phys_complete); | ||
305 | } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
306 | spin_lock_bh(&ioat->prep_lock); | ||
307 | ioat2_restart_channel(ioat); | ||
308 | spin_unlock_bh(&ioat->prep_lock); | ||
309 | } else { | ||
310 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
311 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
312 | } | ||
313 | spin_unlock_bh(&chan->cleanup_lock); | ||
314 | } else { | ||
315 | u16 active; | ||
316 | 280 | ||
281 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) | ||
282 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
283 | else if (ioat->alloc_order > ioat_get_alloc_order()) { | ||
317 | /* if the ring is idle, empty, and oversized try to step | 284 | /* if the ring is idle, empty, and oversized try to step |
318 | * down the size | 285 | * down the size |
319 | */ | 286 | */ |
320 | spin_lock_bh(&chan->cleanup_lock); | 287 | reshape_ring(ioat, ioat->alloc_order - 1); |
321 | spin_lock_bh(&ioat->prep_lock); | ||
322 | active = ioat2_ring_active(ioat); | ||
323 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | ||
324 | reshape_ring(ioat, ioat->alloc_order-1); | ||
325 | spin_unlock_bh(&ioat->prep_lock); | ||
326 | spin_unlock_bh(&chan->cleanup_lock); | ||
327 | 288 | ||
328 | /* keep shrinking until we get back to our minimum | 289 | /* keep shrinking until we get back to our minimum |
329 | * default size | 290 | * default size |
@@ -331,6 +292,60 @@ void ioat2_timer_event(unsigned long data) | |||
331 | if (ioat->alloc_order > ioat_get_alloc_order()) | 292 | if (ioat->alloc_order > ioat_get_alloc_order()) |
332 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 293 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
333 | } | 294 | } |
295 | |||
296 | } | ||
297 | |||
298 | void ioat2_timer_event(unsigned long data) | ||
299 | { | ||
300 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
301 | struct ioat_chan_common *chan = &ioat->base; | ||
302 | dma_addr_t phys_complete; | ||
303 | u64 status; | ||
304 | |||
305 | status = ioat_chansts(chan); | ||
306 | |||
307 | /* when halted due to errors check for channel | ||
308 | * programming errors before advancing the completion state | ||
309 | */ | ||
310 | if (is_ioat_halted(status)) { | ||
311 | u32 chanerr; | ||
312 | |||
313 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
314 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
315 | __func__, chanerr); | ||
316 | if (test_bit(IOAT_RUN, &chan->state)) | ||
317 | BUG_ON(is_ioat_bug(chanerr)); | ||
318 | else /* we never got off the ground */ | ||
319 | return; | ||
320 | } | ||
321 | |||
322 | /* if we haven't made progress and we have already | ||
323 | * acknowledged a pending completion once, then be more | ||
324 | * forceful with a restart | ||
325 | */ | ||
326 | spin_lock_bh(&chan->cleanup_lock); | ||
327 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
328 | __cleanup(ioat, phys_complete); | ||
329 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
330 | spin_lock_bh(&ioat->prep_lock); | ||
331 | ioat2_restart_channel(ioat); | ||
332 | spin_unlock_bh(&ioat->prep_lock); | ||
333 | spin_unlock_bh(&chan->cleanup_lock); | ||
334 | return; | ||
335 | } else { | ||
336 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
337 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
338 | } | ||
339 | |||
340 | |||
341 | if (ioat2_ring_active(ioat)) | ||
342 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
343 | else { | ||
344 | spin_lock_bh(&ioat->prep_lock); | ||
345 | check_active(ioat); | ||
346 | spin_unlock_bh(&ioat->prep_lock); | ||
347 | } | ||
348 | spin_unlock_bh(&chan->cleanup_lock); | ||
334 | } | 349 | } |
335 | 350 | ||
336 | static int ioat2_reset_hw(struct ioat_chan_common *chan) | 351 | static int ioat2_reset_hw(struct ioat_chan_common *chan) |
@@ -404,7 +419,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
404 | cookie = dma_cookie_assign(tx); | 419 | cookie = dma_cookie_assign(tx); |
405 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 420 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
406 | 421 | ||
407 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | 422 | if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state)) |
408 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 423 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
409 | 424 | ||
410 | /* make descriptor updates visible before advancing ioat->head, | 425 | /* make descriptor updates visible before advancing ioat->head, |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 3e9d66920eb3..e8336cce360b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -342,61 +342,22 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | |||
342 | __ioat2_restart_chan(ioat); | 342 | __ioat2_restart_chan(ioat); |
343 | } | 343 | } |
344 | 344 | ||
345 | static void ioat3_timer_event(unsigned long data) | 345 | static void check_active(struct ioat2_dma_chan *ioat) |
346 | { | 346 | { |
347 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
348 | struct ioat_chan_common *chan = &ioat->base; | 347 | struct ioat_chan_common *chan = &ioat->base; |
349 | 348 | ||
350 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 349 | if (ioat2_ring_active(ioat)) { |
351 | dma_addr_t phys_complete; | 350 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
352 | u64 status; | 351 | return; |
353 | 352 | } | |
354 | status = ioat_chansts(chan); | ||
355 | |||
356 | /* when halted due to errors check for channel | ||
357 | * programming errors before advancing the completion state | ||
358 | */ | ||
359 | if (is_ioat_halted(status)) { | ||
360 | u32 chanerr; | ||
361 | |||
362 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
363 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
364 | __func__, chanerr); | ||
365 | if (test_bit(IOAT_RUN, &chan->state)) | ||
366 | BUG_ON(is_ioat_bug(chanerr)); | ||
367 | else /* we never got off the ground */ | ||
368 | return; | ||
369 | } | ||
370 | |||
371 | /* if we haven't made progress and we have already | ||
372 | * acknowledged a pending completion once, then be more | ||
373 | * forceful with a restart | ||
374 | */ | ||
375 | spin_lock_bh(&chan->cleanup_lock); | ||
376 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
377 | __cleanup(ioat, phys_complete); | ||
378 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
379 | spin_lock_bh(&ioat->prep_lock); | ||
380 | ioat3_restart_channel(ioat); | ||
381 | spin_unlock_bh(&ioat->prep_lock); | ||
382 | } else { | ||
383 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
384 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
385 | } | ||
386 | spin_unlock_bh(&chan->cleanup_lock); | ||
387 | } else { | ||
388 | u16 active; | ||
389 | 353 | ||
354 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) | ||
355 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
356 | else if (ioat->alloc_order > ioat_get_alloc_order()) { | ||
390 | /* if the ring is idle, empty, and oversized try to step | 357 | /* if the ring is idle, empty, and oversized try to step |
391 | * down the size | 358 | * down the size |
392 | */ | 359 | */ |
393 | spin_lock_bh(&chan->cleanup_lock); | 360 | reshape_ring(ioat, ioat->alloc_order - 1); |
394 | spin_lock_bh(&ioat->prep_lock); | ||
395 | active = ioat2_ring_active(ioat); | ||
396 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | ||
397 | reshape_ring(ioat, ioat->alloc_order-1); | ||
398 | spin_unlock_bh(&ioat->prep_lock); | ||
399 | spin_unlock_bh(&chan->cleanup_lock); | ||
400 | 361 | ||
401 | /* keep shrinking until we get back to our minimum | 362 | /* keep shrinking until we get back to our minimum |
402 | * default size | 363 | * default size |
@@ -404,6 +365,60 @@ static void ioat3_timer_event(unsigned long data) | |||
404 | if (ioat->alloc_order > ioat_get_alloc_order()) | 365 | if (ioat->alloc_order > ioat_get_alloc_order()) |
405 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 366 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
406 | } | 367 | } |
368 | |||
369 | } | ||
370 | |||
371 | static void ioat3_timer_event(unsigned long data) | ||
372 | { | ||
373 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
374 | struct ioat_chan_common *chan = &ioat->base; | ||
375 | dma_addr_t phys_complete; | ||
376 | u64 status; | ||
377 | |||
378 | status = ioat_chansts(chan); | ||
379 | |||
380 | /* when halted due to errors check for channel | ||
381 | * programming errors before advancing the completion state | ||
382 | */ | ||
383 | if (is_ioat_halted(status)) { | ||
384 | u32 chanerr; | ||
385 | |||
386 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
387 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
388 | __func__, chanerr); | ||
389 | if (test_bit(IOAT_RUN, &chan->state)) | ||
390 | BUG_ON(is_ioat_bug(chanerr)); | ||
391 | else /* we never got off the ground */ | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | /* if we haven't made progress and we have already | ||
396 | * acknowledged a pending completion once, then be more | ||
397 | * forceful with a restart | ||
398 | */ | ||
399 | spin_lock_bh(&chan->cleanup_lock); | ||
400 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
401 | __cleanup(ioat, phys_complete); | ||
402 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
403 | spin_lock_bh(&ioat->prep_lock); | ||
404 | ioat3_restart_channel(ioat); | ||
405 | spin_unlock_bh(&ioat->prep_lock); | ||
406 | spin_unlock_bh(&chan->cleanup_lock); | ||
407 | return; | ||
408 | } else { | ||
409 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
410 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
411 | } | ||
412 | |||
413 | |||
414 | if (ioat2_ring_active(ioat)) | ||
415 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
416 | else { | ||
417 | spin_lock_bh(&ioat->prep_lock); | ||
418 | check_active(ioat); | ||
419 | spin_unlock_bh(&ioat->prep_lock); | ||
420 | } | ||
421 | spin_unlock_bh(&chan->cleanup_lock); | ||
407 | } | 422 | } |
408 | 423 | ||
409 | static enum dma_status | 424 | static enum dma_status |
@@ -863,6 +878,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
863 | unsigned long tmo; | 878 | unsigned long tmo; |
864 | struct device *dev = &device->pdev->dev; | 879 | struct device *dev = &device->pdev->dev; |
865 | struct dma_device *dma = &device->common; | 880 | struct dma_device *dma = &device->common; |
881 | u8 op = 0; | ||
866 | 882 | ||
867 | dev_dbg(dev, "%s\n", __func__); | 883 | dev_dbg(dev, "%s\n", __func__); |
868 | 884 | ||
@@ -908,18 +924,22 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
908 | } | 924 | } |
909 | 925 | ||
910 | /* test xor */ | 926 | /* test xor */ |
927 | op = IOAT_OP_XOR; | ||
928 | |||
911 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 929 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
912 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 930 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) |
913 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | 931 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, |
914 | DMA_TO_DEVICE); | 932 | DMA_TO_DEVICE); |
915 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 933 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
916 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | 934 | IOAT_NUM_SRC_TEST, PAGE_SIZE, |
917 | DMA_PREP_INTERRUPT); | 935 | DMA_PREP_INTERRUPT | |
936 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
937 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
918 | 938 | ||
919 | if (!tx) { | 939 | if (!tx) { |
920 | dev_err(dev, "Self-test xor prep failed\n"); | 940 | dev_err(dev, "Self-test xor prep failed\n"); |
921 | err = -ENODEV; | 941 | err = -ENODEV; |
922 | goto free_resources; | 942 | goto dma_unmap; |
923 | } | 943 | } |
924 | 944 | ||
925 | async_tx_ack(tx); | 945 | async_tx_ack(tx); |
@@ -930,7 +950,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
930 | if (cookie < 0) { | 950 | if (cookie < 0) { |
931 | dev_err(dev, "Self-test xor setup failed\n"); | 951 | dev_err(dev, "Self-test xor setup failed\n"); |
932 | err = -ENODEV; | 952 | err = -ENODEV; |
933 | goto free_resources; | 953 | goto dma_unmap; |
934 | } | 954 | } |
935 | dma->device_issue_pending(dma_chan); | 955 | dma->device_issue_pending(dma_chan); |
936 | 956 | ||
@@ -939,9 +959,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
939 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 959 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
940 | dev_err(dev, "Self-test xor timed out\n"); | 960 | dev_err(dev, "Self-test xor timed out\n"); |
941 | err = -ENODEV; | 961 | err = -ENODEV; |
942 | goto free_resources; | 962 | goto dma_unmap; |
943 | } | 963 | } |
944 | 964 | ||
965 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
966 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
967 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
968 | |||
945 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | 969 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); |
946 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | 970 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { |
947 | u32 *ptr = page_address(dest); | 971 | u32 *ptr = page_address(dest); |
@@ -957,6 +981,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
957 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | 981 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) |
958 | goto free_resources; | 982 | goto free_resources; |
959 | 983 | ||
984 | op = IOAT_OP_XOR_VAL; | ||
985 | |||
960 | /* validate the sources with the destintation page */ | 986 | /* validate the sources with the destintation page */ |
961 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 987 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) |
962 | xor_val_srcs[i] = xor_srcs[i]; | 988 | xor_val_srcs[i] = xor_srcs[i]; |
@@ -969,11 +995,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
969 | DMA_TO_DEVICE); | 995 | DMA_TO_DEVICE); |
970 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 996 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
971 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 997 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
972 | &xor_val_result, DMA_PREP_INTERRUPT); | 998 | &xor_val_result, DMA_PREP_INTERRUPT | |
999 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1000 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
973 | if (!tx) { | 1001 | if (!tx) { |
974 | dev_err(dev, "Self-test zero prep failed\n"); | 1002 | dev_err(dev, "Self-test zero prep failed\n"); |
975 | err = -ENODEV; | 1003 | err = -ENODEV; |
976 | goto free_resources; | 1004 | goto dma_unmap; |
977 | } | 1005 | } |
978 | 1006 | ||
979 | async_tx_ack(tx); | 1007 | async_tx_ack(tx); |
@@ -984,7 +1012,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
984 | if (cookie < 0) { | 1012 | if (cookie < 0) { |
985 | dev_err(dev, "Self-test zero setup failed\n"); | 1013 | dev_err(dev, "Self-test zero setup failed\n"); |
986 | err = -ENODEV; | 1014 | err = -ENODEV; |
987 | goto free_resources; | 1015 | goto dma_unmap; |
988 | } | 1016 | } |
989 | dma->device_issue_pending(dma_chan); | 1017 | dma->device_issue_pending(dma_chan); |
990 | 1018 | ||
@@ -993,9 +1021,12 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
993 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1021 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
994 | dev_err(dev, "Self-test validate timed out\n"); | 1022 | dev_err(dev, "Self-test validate timed out\n"); |
995 | err = -ENODEV; | 1023 | err = -ENODEV; |
996 | goto free_resources; | 1024 | goto dma_unmap; |
997 | } | 1025 | } |
998 | 1026 | ||
1027 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1028 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
1029 | |||
999 | if (xor_val_result != 0) { | 1030 | if (xor_val_result != 0) { |
1000 | dev_err(dev, "Self-test validate failed compare\n"); | 1031 | dev_err(dev, "Self-test validate failed compare\n"); |
1001 | err = -ENODEV; | 1032 | err = -ENODEV; |
@@ -1007,14 +1038,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1007 | goto free_resources; | 1038 | goto free_resources; |
1008 | 1039 | ||
1009 | /* test memset */ | 1040 | /* test memset */ |
1041 | op = IOAT_OP_FILL; | ||
1042 | |||
1010 | dma_addr = dma_map_page(dev, dest, 0, | 1043 | dma_addr = dma_map_page(dev, dest, 0, |
1011 | PAGE_SIZE, DMA_FROM_DEVICE); | 1044 | PAGE_SIZE, DMA_FROM_DEVICE); |
1012 | tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | 1045 | tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, |
1013 | DMA_PREP_INTERRUPT); | 1046 | DMA_PREP_INTERRUPT | |
1047 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1048 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
1014 | if (!tx) { | 1049 | if (!tx) { |
1015 | dev_err(dev, "Self-test memset prep failed\n"); | 1050 | dev_err(dev, "Self-test memset prep failed\n"); |
1016 | err = -ENODEV; | 1051 | err = -ENODEV; |
1017 | goto free_resources; | 1052 | goto dma_unmap; |
1018 | } | 1053 | } |
1019 | 1054 | ||
1020 | async_tx_ack(tx); | 1055 | async_tx_ack(tx); |
@@ -1025,7 +1060,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1025 | if (cookie < 0) { | 1060 | if (cookie < 0) { |
1026 | dev_err(dev, "Self-test memset setup failed\n"); | 1061 | dev_err(dev, "Self-test memset setup failed\n"); |
1027 | err = -ENODEV; | 1062 | err = -ENODEV; |
1028 | goto free_resources; | 1063 | goto dma_unmap; |
1029 | } | 1064 | } |
1030 | dma->device_issue_pending(dma_chan); | 1065 | dma->device_issue_pending(dma_chan); |
1031 | 1066 | ||
@@ -1034,9 +1069,11 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1034 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1069 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
1035 | dev_err(dev, "Self-test memset timed out\n"); | 1070 | dev_err(dev, "Self-test memset timed out\n"); |
1036 | err = -ENODEV; | 1071 | err = -ENODEV; |
1037 | goto free_resources; | 1072 | goto dma_unmap; |
1038 | } | 1073 | } |
1039 | 1074 | ||
1075 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1076 | |||
1040 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | 1077 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { |
1041 | u32 *ptr = page_address(dest); | 1078 | u32 *ptr = page_address(dest); |
1042 | if (ptr[i]) { | 1079 | if (ptr[i]) { |
@@ -1047,17 +1084,21 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1047 | } | 1084 | } |
1048 | 1085 | ||
1049 | /* test for non-zero parity sum */ | 1086 | /* test for non-zero parity sum */ |
1087 | op = IOAT_OP_XOR_VAL; | ||
1088 | |||
1050 | xor_val_result = 0; | 1089 | xor_val_result = 0; |
1051 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | 1090 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) |
1052 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 1091 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
1053 | DMA_TO_DEVICE); | 1092 | DMA_TO_DEVICE); |
1054 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 1093 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
1055 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 1094 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
1056 | &xor_val_result, DMA_PREP_INTERRUPT); | 1095 | &xor_val_result, DMA_PREP_INTERRUPT | |
1096 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1097 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
1057 | if (!tx) { | 1098 | if (!tx) { |
1058 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | 1099 | dev_err(dev, "Self-test 2nd zero prep failed\n"); |
1059 | err = -ENODEV; | 1100 | err = -ENODEV; |
1060 | goto free_resources; | 1101 | goto dma_unmap; |
1061 | } | 1102 | } |
1062 | 1103 | ||
1063 | async_tx_ack(tx); | 1104 | async_tx_ack(tx); |
@@ -1068,7 +1109,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1068 | if (cookie < 0) { | 1109 | if (cookie < 0) { |
1069 | dev_err(dev, "Self-test 2nd zero setup failed\n"); | 1110 | dev_err(dev, "Self-test 2nd zero setup failed\n"); |
1070 | err = -ENODEV; | 1111 | err = -ENODEV; |
1071 | goto free_resources; | 1112 | goto dma_unmap; |
1072 | } | 1113 | } |
1073 | dma->device_issue_pending(dma_chan); | 1114 | dma->device_issue_pending(dma_chan); |
1074 | 1115 | ||
@@ -1077,15 +1118,31 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1077 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1118 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
1078 | dev_err(dev, "Self-test 2nd validate timed out\n"); | 1119 | dev_err(dev, "Self-test 2nd validate timed out\n"); |
1079 | err = -ENODEV; | 1120 | err = -ENODEV; |
1080 | goto free_resources; | 1121 | goto dma_unmap; |
1081 | } | 1122 | } |
1082 | 1123 | ||
1083 | if (xor_val_result != SUM_CHECK_P_RESULT) { | 1124 | if (xor_val_result != SUM_CHECK_P_RESULT) { |
1084 | dev_err(dev, "Self-test validate failed compare\n"); | 1125 | dev_err(dev, "Self-test validate failed compare\n"); |
1085 | err = -ENODEV; | 1126 | err = -ENODEV; |
1086 | goto free_resources; | 1127 | goto dma_unmap; |
1087 | } | 1128 | } |
1088 | 1129 | ||
1130 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1131 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); | ||
1132 | |||
1133 | goto free_resources; | ||
1134 | dma_unmap: | ||
1135 | if (op == IOAT_OP_XOR) { | ||
1136 | dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1137 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | ||
1138 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | ||
1139 | DMA_TO_DEVICE); | ||
1140 | } else if (op == IOAT_OP_XOR_VAL) { | ||
1141 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | ||
1142 | dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, | ||
1143 | DMA_TO_DEVICE); | ||
1144 | } else if (op == IOAT_OP_FILL) | ||
1145 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); | ||
1089 | free_resources: | 1146 | free_resources: |
1090 | dma->device_free_chan_resources(dma_chan); | 1147 | dma->device_free_chan_resources(dma_chan); |
1091 | out: | 1148 | out: |
@@ -1126,12 +1183,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) | |||
1126 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | 1183 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
1127 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | 1184 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); |
1128 | 1185 | ||
1129 | /* -= IOAT ver.3 workarounds =- */ | 1186 | /* clear any pending errors */ |
1130 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
1131 | * that can cause stability issues for IOAT ver.3, and clear any | ||
1132 | * pending errors | ||
1133 | */ | ||
1134 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
1135 | err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | 1187 | err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); |
1136 | if (err) { | 1188 | if (err) { |
1137 | dev_err(&pdev->dev, "channel error register unreachable\n"); | 1189 | dev_err(&pdev->dev, "channel error register unreachable\n"); |
@@ -1187,6 +1239,26 @@ static bool is_snb_ioat(struct pci_dev *pdev) | |||
1187 | } | 1239 | } |
1188 | } | 1240 | } |
1189 | 1241 | ||
1242 | static bool is_ivb_ioat(struct pci_dev *pdev) | ||
1243 | { | ||
1244 | switch (pdev->device) { | ||
1245 | case PCI_DEVICE_ID_INTEL_IOAT_IVB0: | ||
1246 | case PCI_DEVICE_ID_INTEL_IOAT_IVB1: | ||
1247 | case PCI_DEVICE_ID_INTEL_IOAT_IVB2: | ||
1248 | case PCI_DEVICE_ID_INTEL_IOAT_IVB3: | ||
1249 | case PCI_DEVICE_ID_INTEL_IOAT_IVB4: | ||
1250 | case PCI_DEVICE_ID_INTEL_IOAT_IVB5: | ||
1251 | case PCI_DEVICE_ID_INTEL_IOAT_IVB6: | ||
1252 | case PCI_DEVICE_ID_INTEL_IOAT_IVB7: | ||
1253 | case PCI_DEVICE_ID_INTEL_IOAT_IVB8: | ||
1254 | case PCI_DEVICE_ID_INTEL_IOAT_IVB9: | ||
1255 | return true; | ||
1256 | default: | ||
1257 | return false; | ||
1258 | } | ||
1259 | |||
1260 | } | ||
1261 | |||
1190 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1262 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) |
1191 | { | 1263 | { |
1192 | struct pci_dev *pdev = device->pdev; | 1264 | struct pci_dev *pdev = device->pdev; |
@@ -1207,7 +1279,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1207 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 1279 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
1208 | dma->device_free_chan_resources = ioat2_free_chan_resources; | 1280 | dma->device_free_chan_resources = ioat2_free_chan_resources; |
1209 | 1281 | ||
1210 | if (is_jf_ioat(pdev) || is_snb_ioat(pdev)) | 1282 | if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev)) |
1211 | dma->copy_align = 6; | 1283 | dma->copy_align = 6; |
1212 | 1284 | ||
1213 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | 1285 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index d2ff3fda0b18..7cb74c62c719 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -35,6 +35,17 @@ | |||
35 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | 35 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ |
36 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | 36 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ |
37 | 37 | ||
38 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 | ||
39 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 | ||
40 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 | ||
41 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 | ||
42 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 | ||
43 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 | ||
44 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 | ||
45 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 | ||
46 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e | ||
47 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f | ||
48 | |||
38 | int system_has_dca_enabled(struct pci_dev *pdev); | 49 | int system_has_dca_enabled(struct pci_dev *pdev); |
39 | 50 | ||
40 | struct ioat_dma_descriptor { | 51 | struct ioat_dma_descriptor { |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 4f686c527ab6..71c7ecd80fac 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION); | |||
40 | MODULE_LICENSE("Dual BSD/GPL"); | 40 | MODULE_LICENSE("Dual BSD/GPL"); |
41 | MODULE_AUTHOR("Intel Corporation"); | 41 | MODULE_AUTHOR("Intel Corporation"); |
42 | 42 | ||
43 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 | ||
44 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 | ||
45 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 | ||
46 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 | ||
47 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 | ||
48 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 | ||
49 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 | ||
50 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 | ||
51 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e | ||
52 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f | ||
53 | |||
54 | static struct pci_device_id ioat_pci_tbl[] = { | 43 | static struct pci_device_id ioat_pci_tbl[] = { |
55 | /* I/OAT v1 platforms */ | 44 | /* I/OAT v1 platforms */ |
56 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, | 45 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index eacb8be99812..7dafb9f3785f 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -936,7 +936,7 @@ static irqreturn_t iop_adma_err_handler(int irq, void *data) | |||
936 | struct iop_adma_chan *chan = data; | 936 | struct iop_adma_chan *chan = data; |
937 | unsigned long status = iop_chan_get_status(chan); | 937 | unsigned long status = iop_chan_get_status(chan); |
938 | 938 | ||
939 | dev_printk(KERN_ERR, chan->device->common.dev, | 939 | dev_err(chan->device->common.dev, |
940 | "error ( %s%s%s%s%s%s%s)\n", | 940 | "error ( %s%s%s%s%s%s%s)\n", |
941 | iop_is_err_int_parity(status, chan) ? "int_parity " : "", | 941 | iop_is_err_int_parity(status, chan) ? "int_parity " : "", |
942 | iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", | 942 | iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", |
@@ -1017,7 +1017,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
1017 | 1017 | ||
1018 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1018 | if (iop_adma_status(dma_chan, cookie, NULL) != |
1019 | DMA_SUCCESS) { | 1019 | DMA_SUCCESS) { |
1020 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1020 | dev_err(dma_chan->device->dev, |
1021 | "Self-test copy timed out, disabling\n"); | 1021 | "Self-test copy timed out, disabling\n"); |
1022 | err = -ENODEV; | 1022 | err = -ENODEV; |
1023 | goto free_resources; | 1023 | goto free_resources; |
@@ -1027,7 +1027,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
1027 | dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, | 1027 | dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, |
1028 | IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); | 1028 | IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); |
1029 | if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { | 1029 | if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { |
1030 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1030 | dev_err(dma_chan->device->dev, |
1031 | "Self-test copy failed compare, disabling\n"); | 1031 | "Self-test copy failed compare, disabling\n"); |
1032 | err = -ENODEV; | 1032 | err = -ENODEV; |
1033 | goto free_resources; | 1033 | goto free_resources; |
@@ -1117,7 +1117,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1117 | 1117 | ||
1118 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1118 | if (iop_adma_status(dma_chan, cookie, NULL) != |
1119 | DMA_SUCCESS) { | 1119 | DMA_SUCCESS) { |
1120 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1120 | dev_err(dma_chan->device->dev, |
1121 | "Self-test xor timed out, disabling\n"); | 1121 | "Self-test xor timed out, disabling\n"); |
1122 | err = -ENODEV; | 1122 | err = -ENODEV; |
1123 | goto free_resources; | 1123 | goto free_resources; |
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1129 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | 1129 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { |
1130 | u32 *ptr = page_address(dest); | 1130 | u32 *ptr = page_address(dest); |
1131 | if (ptr[i] != cmp_word) { | 1131 | if (ptr[i] != cmp_word) { |
1132 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1132 | dev_err(dma_chan->device->dev, |
1133 | "Self-test xor failed compare, disabling\n"); | 1133 | "Self-test xor failed compare, disabling\n"); |
1134 | err = -ENODEV; | 1134 | err = -ENODEV; |
1135 | goto free_resources; | 1135 | goto free_resources; |
@@ -1163,14 +1163,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1163 | msleep(8); | 1163 | msleep(8); |
1164 | 1164 | ||
1165 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1165 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
1166 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1166 | dev_err(dma_chan->device->dev, |
1167 | "Self-test zero sum timed out, disabling\n"); | 1167 | "Self-test zero sum timed out, disabling\n"); |
1168 | err = -ENODEV; | 1168 | err = -ENODEV; |
1169 | goto free_resources; | 1169 | goto free_resources; |
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | if (zero_sum_result != 0) { | 1172 | if (zero_sum_result != 0) { |
1173 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1173 | dev_err(dma_chan->device->dev, |
1174 | "Self-test zero sum failed compare, disabling\n"); | 1174 | "Self-test zero sum failed compare, disabling\n"); |
1175 | err = -ENODEV; | 1175 | err = -ENODEV; |
1176 | goto free_resources; | 1176 | goto free_resources; |
@@ -1187,7 +1187,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1187 | msleep(8); | 1187 | msleep(8); |
1188 | 1188 | ||
1189 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1189 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
1190 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1190 | dev_err(dma_chan->device->dev, |
1191 | "Self-test memset timed out, disabling\n"); | 1191 | "Self-test memset timed out, disabling\n"); |
1192 | err = -ENODEV; | 1192 | err = -ENODEV; |
1193 | goto free_resources; | 1193 | goto free_resources; |
@@ -1196,7 +1196,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1196 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | 1196 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { |
1197 | u32 *ptr = page_address(dest); | 1197 | u32 *ptr = page_address(dest); |
1198 | if (ptr[i]) { | 1198 | if (ptr[i]) { |
1199 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1199 | dev_err(dma_chan->device->dev, |
1200 | "Self-test memset failed compare, disabling\n"); | 1200 | "Self-test memset failed compare, disabling\n"); |
1201 | err = -ENODEV; | 1201 | err = -ENODEV; |
1202 | goto free_resources; | 1202 | goto free_resources; |
@@ -1219,14 +1219,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1219 | msleep(8); | 1219 | msleep(8); |
1220 | 1220 | ||
1221 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1221 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { |
1222 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1222 | dev_err(dma_chan->device->dev, |
1223 | "Self-test non-zero sum timed out, disabling\n"); | 1223 | "Self-test non-zero sum timed out, disabling\n"); |
1224 | err = -ENODEV; | 1224 | err = -ENODEV; |
1225 | goto free_resources; | 1225 | goto free_resources; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | if (zero_sum_result != 1) { | 1228 | if (zero_sum_result != 1) { |
1229 | dev_printk(KERN_ERR, dma_chan->device->dev, | 1229 | dev_err(dma_chan->device->dev, |
1230 | "Self-test non-zero sum failed compare, disabling\n"); | 1230 | "Self-test non-zero sum failed compare, disabling\n"); |
1231 | err = -ENODEV; | 1231 | err = -ENODEV; |
1232 | goto free_resources; | 1232 | goto free_resources; |
@@ -1579,15 +1579,14 @@ static int iop_adma_probe(struct platform_device *pdev) | |||
1579 | goto err_free_iop_chan; | 1579 | goto err_free_iop_chan; |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " | 1582 | dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n", |
1583 | "( %s%s%s%s%s%s%s)\n", | 1583 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", |
1584 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", | 1584 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", |
1585 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", | 1585 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1586 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1586 | dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", |
1587 | dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", | 1587 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", |
1588 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | 1588 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1589 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1589 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1590 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | ||
1591 | 1590 | ||
1592 | dma_async_device_register(dma_dev); | 1591 | dma_async_device_register(dma_dev); |
1593 | goto out; | 1592 | goto out; |
@@ -1651,8 +1650,8 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1651 | /* run the descriptor */ | 1650 | /* run the descriptor */ |
1652 | iop_chan_enable(iop_chan); | 1651 | iop_chan_enable(iop_chan); |
1653 | } else | 1652 | } else |
1654 | dev_printk(KERN_ERR, iop_chan->device->common.dev, | 1653 | dev_err(iop_chan->device->common.dev, |
1655 | "failed to allocate null descriptor\n"); | 1654 | "failed to allocate null descriptor\n"); |
1656 | spin_unlock_bh(&iop_chan->lock); | 1655 | spin_unlock_bh(&iop_chan->lock); |
1657 | } | 1656 | } |
1658 | 1657 | ||
@@ -1704,7 +1703,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1704 | /* run the descriptor */ | 1703 | /* run the descriptor */ |
1705 | iop_chan_enable(iop_chan); | 1704 | iop_chan_enable(iop_chan); |
1706 | } else | 1705 | } else |
1707 | dev_printk(KERN_ERR, iop_chan->device->common.dev, | 1706 | dev_err(iop_chan->device->common.dev, |
1708 | "failed to allocate null descriptor\n"); | 1707 | "failed to allocate null descriptor\n"); |
1709 | spin_unlock_bh(&iop_chan->lock); | 1708 | spin_unlock_bh(&iop_chan->lock); |
1710 | } | 1709 | } |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 65855373cee6..8c61d17a86bf 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan | |||
1347 | chan->chan_id != IDMAC_IC_7) | 1347 | chan->chan_id != IDMAC_IC_7) |
1348 | return NULL; | 1348 | return NULL; |
1349 | 1349 | ||
1350 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { | 1350 | if (!is_slave_direction(direction)) { |
1351 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); | 1351 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); |
1352 | return NULL; | 1352 | return NULL; |
1353 | } | 1353 | } |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index a5ee37d5320f..2e284a4438bc 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -44,7 +44,6 @@ static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg) | |||
44 | struct ipu_irq_bank { | 44 | struct ipu_irq_bank { |
45 | unsigned int control; | 45 | unsigned int control; |
46 | unsigned int status; | 46 | unsigned int status; |
47 | spinlock_t lock; | ||
48 | struct ipu *ipu; | 47 | struct ipu *ipu; |
49 | }; | 48 | }; |
50 | 49 | ||
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index dc7466563507..c26699f9c4df 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -618,10 +618,8 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |||
618 | else if (maxburst == 32) | 618 | else if (maxburst == 32) |
619 | chan->dcmd |= DCMD_BURST32; | 619 | chan->dcmd |= DCMD_BURST32; |
620 | 620 | ||
621 | if (cfg) { | 621 | chan->dir = cfg->direction; |
622 | chan->dir = cfg->direction; | 622 | chan->drcmr = cfg->slave_id; |
623 | chan->drcmr = cfg->slave_id; | ||
624 | } | ||
625 | chan->dev_addr = addr; | 623 | chan->dev_addr = addr; |
626 | break; | 624 | break; |
627 | default: | 625 | default: |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index e17fad03cb80..d64ae14f2706 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -210,7 +210,7 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
210 | break; | 210 | break; |
211 | default: | 211 | default: |
212 | dev_err(mv_chan_to_devp(chan), | 212 | dev_err(mv_chan_to_devp(chan), |
213 | "error: unsupported operation %d.\n", | 213 | "error: unsupported operation %d\n", |
214 | type); | 214 | type); |
215 | BUG(); | 215 | BUG(); |
216 | return; | 216 | return; |
@@ -828,28 +828,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |||
828 | u32 val; | 828 | u32 val; |
829 | 829 | ||
830 | val = __raw_readl(XOR_CONFIG(chan)); | 830 | val = __raw_readl(XOR_CONFIG(chan)); |
831 | dev_err(mv_chan_to_devp(chan), | 831 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); |
832 | "config 0x%08x.\n", val); | ||
833 | 832 | ||
834 | val = __raw_readl(XOR_ACTIVATION(chan)); | 833 | val = __raw_readl(XOR_ACTIVATION(chan)); |
835 | dev_err(mv_chan_to_devp(chan), | 834 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); |
836 | "activation 0x%08x.\n", val); | ||
837 | 835 | ||
838 | val = __raw_readl(XOR_INTR_CAUSE(chan)); | 836 | val = __raw_readl(XOR_INTR_CAUSE(chan)); |
839 | dev_err(mv_chan_to_devp(chan), | 837 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); |
840 | "intr cause 0x%08x.\n", val); | ||
841 | 838 | ||
842 | val = __raw_readl(XOR_INTR_MASK(chan)); | 839 | val = __raw_readl(XOR_INTR_MASK(chan)); |
843 | dev_err(mv_chan_to_devp(chan), | 840 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); |
844 | "intr mask 0x%08x.\n", val); | ||
845 | 841 | ||
846 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); | 842 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); |
847 | dev_err(mv_chan_to_devp(chan), | 843 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); |
848 | "error cause 0x%08x.\n", val); | ||
849 | 844 | ||
850 | val = __raw_readl(XOR_ERROR_ADDR(chan)); | 845 | val = __raw_readl(XOR_ERROR_ADDR(chan)); |
851 | dev_err(mv_chan_to_devp(chan), | 846 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); |
852 | "error addr 0x%08x.\n", val); | ||
853 | } | 847 | } |
854 | 848 | ||
855 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | 849 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, |
@@ -862,7 +856,7 @@ static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | |||
862 | } | 856 | } |
863 | 857 | ||
864 | dev_err(mv_chan_to_devp(chan), | 858 | dev_err(mv_chan_to_devp(chan), |
865 | "error on chan %d. intr cause 0x%08x.\n", | 859 | "error on chan %d. intr cause 0x%08x\n", |
866 | chan->idx, intr_cause); | 860 | chan->idx, intr_cause); |
867 | 861 | ||
868 | mv_dump_xor_regs(chan); | 862 | mv_dump_xor_regs(chan); |
@@ -1052,9 +1046,8 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
1052 | u32 *ptr = page_address(dest); | 1046 | u32 *ptr = page_address(dest); |
1053 | if (ptr[i] != cmp_word) { | 1047 | if (ptr[i] != cmp_word) { |
1054 | dev_err(dma_chan->device->dev, | 1048 | dev_err(dma_chan->device->dev, |
1055 | "Self-test xor failed compare, disabling." | 1049 | "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", |
1056 | " index %d, data %x, expected %x\n", i, | 1050 | i, ptr[i], cmp_word); |
1057 | ptr[i], cmp_word); | ||
1058 | err = -ENODEV; | 1051 | err = -ENODEV; |
1059 | goto free_resources; | 1052 | goto free_resources; |
1060 | } | 1053 | } |
@@ -1194,12 +1187,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1194 | goto err_free_irq; | 1187 | goto err_free_irq; |
1195 | } | 1188 | } |
1196 | 1189 | ||
1197 | dev_info(&pdev->dev, "Marvell XOR: " | 1190 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", |
1198 | "( %s%s%s%s)\n", | 1191 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1199 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1192 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", |
1200 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | 1193 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1201 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1194 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1202 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | ||
1203 | 1195 | ||
1204 | dma_async_device_register(dma_dev); | 1196 | dma_async_device_register(dma_dev); |
1205 | return mv_chan; | 1197 | return mv_chan; |
@@ -1253,7 +1245,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1253 | struct resource *res; | 1245 | struct resource *res; |
1254 | int i, ret; | 1246 | int i, ret; |
1255 | 1247 | ||
1256 | dev_notice(&pdev->dev, "Marvell XOR driver\n"); | 1248 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
1257 | 1249 | ||
1258 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); | 1250 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); |
1259 | if (!xordev) | 1251 | if (!xordev) |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 9f02e794b12b..8f6d30d37c45 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -109,7 +109,7 @@ struct mxs_dma_chan { | |||
109 | struct dma_chan chan; | 109 | struct dma_chan chan; |
110 | struct dma_async_tx_descriptor desc; | 110 | struct dma_async_tx_descriptor desc; |
111 | struct tasklet_struct tasklet; | 111 | struct tasklet_struct tasklet; |
112 | int chan_irq; | 112 | unsigned int chan_irq; |
113 | struct mxs_dma_ccw *ccw; | 113 | struct mxs_dma_ccw *ccw; |
114 | dma_addr_t ccw_phys; | 114 | dma_addr_t ccw_phys; |
115 | int desc_count; | 115 | int desc_count; |
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
441 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 441 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
442 | struct mxs_dma_ccw *ccw; | 442 | struct mxs_dma_ccw *ccw; |
443 | struct scatterlist *sg; | 443 | struct scatterlist *sg; |
444 | int i, j; | 444 | u32 i, j; |
445 | u32 *pio; | 445 | u32 *pio; |
446 | bool append = flags & DMA_PREP_INTERRUPT; | 446 | bool append = flags & DMA_PREP_INTERRUPT; |
447 | int idx = append ? mxs_chan->desc_count : 0; | 447 | int idx = append ? mxs_chan->desc_count : 0; |
@@ -537,8 +537,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
537 | { | 537 | { |
538 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 538 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
539 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 539 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
540 | int num_periods = buf_len / period_len; | 540 | u32 num_periods = buf_len / period_len; |
541 | int i = 0, buf = 0; | 541 | u32 i = 0, buf = 0; |
542 | 542 | ||
543 | if (mxs_chan->status == DMA_IN_PROGRESS) | 543 | if (mxs_chan->status == DMA_IN_PROGRESS) |
544 | return NULL; | 544 | return NULL; |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c new file mode 100644 index 000000000000..69d04d28b1ef --- /dev/null +++ b/drivers/dma/of-dma.c | |||
@@ -0,0 +1,267 @@ | |||
1 | /* | ||
2 | * Device tree helpers for DMA request / controller | ||
3 | * | ||
4 | * Based on of_gpio.c | ||
5 | * | ||
6 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/device.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/rculist.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/of_dma.h> | ||
20 | |||
21 | static LIST_HEAD(of_dma_list); | ||
22 | static DEFINE_SPINLOCK(of_dma_lock); | ||
23 | |||
24 | /** | ||
25 | * of_dma_get_controller - Get a DMA controller in DT DMA helpers list | ||
26 | * @dma_spec: pointer to DMA specifier as found in the device tree | ||
27 | * | ||
28 | * Finds a DMA controller with matching device node and number for dma cells | ||
29 | * in a list of registered DMA controllers. If a match is found the use_count | ||
30 | * variable is increased and a valid pointer to the DMA data stored is retuned. | ||
31 | * A NULL pointer is returned if no match is found. | ||
32 | */ | ||
33 | static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) | ||
34 | { | ||
35 | struct of_dma *ofdma; | ||
36 | |||
37 | spin_lock(&of_dma_lock); | ||
38 | |||
39 | if (list_empty(&of_dma_list)) { | ||
40 | spin_unlock(&of_dma_lock); | ||
41 | return NULL; | ||
42 | } | ||
43 | |||
44 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) | ||
45 | if ((ofdma->of_node == dma_spec->np) && | ||
46 | (ofdma->of_dma_nbcells == dma_spec->args_count)) { | ||
47 | ofdma->use_count++; | ||
48 | spin_unlock(&of_dma_lock); | ||
49 | return ofdma; | ||
50 | } | ||
51 | |||
52 | spin_unlock(&of_dma_lock); | ||
53 | |||
54 | pr_debug("%s: can't find DMA controller %s\n", __func__, | ||
55 | dma_spec->np->full_name); | ||
56 | |||
57 | return NULL; | ||
58 | } | ||
59 | |||
60 | /** | ||
61 | * of_dma_put_controller - Decrement use count for a registered DMA controller | ||
62 | * @of_dma: pointer to DMA controller data | ||
63 | * | ||
64 | * Decrements the use_count variable in the DMA data structure. This function | ||
65 | * should be called only when a valid pointer is returned from | ||
66 | * of_dma_get_controller() and no further accesses to data referenced by that | ||
67 | * pointer are needed. | ||
68 | */ | ||
69 | static void of_dma_put_controller(struct of_dma *ofdma) | ||
70 | { | ||
71 | spin_lock(&of_dma_lock); | ||
72 | ofdma->use_count--; | ||
73 | spin_unlock(&of_dma_lock); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * of_dma_controller_register - Register a DMA controller to DT DMA helpers | ||
78 | * @np: device node of DMA controller | ||
79 | * @of_dma_xlate: translation function which converts a phandle | ||
80 | * arguments list into a dma_chan structure | ||
81 | * @data pointer to controller specific data to be used by | ||
82 | * translation function | ||
83 | * | ||
84 | * Returns 0 on success or appropriate errno value on error. | ||
85 | * | ||
86 | * Allocated memory should be freed with appropriate of_dma_controller_free() | ||
87 | * call. | ||
88 | */ | ||
89 | int of_dma_controller_register(struct device_node *np, | ||
90 | struct dma_chan *(*of_dma_xlate) | ||
91 | (struct of_phandle_args *, struct of_dma *), | ||
92 | void *data) | ||
93 | { | ||
94 | struct of_dma *ofdma; | ||
95 | int nbcells; | ||
96 | |||
97 | if (!np || !of_dma_xlate) { | ||
98 | pr_err("%s: not enough information provided\n", __func__); | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | |||
102 | ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL); | ||
103 | if (!ofdma) | ||
104 | return -ENOMEM; | ||
105 | |||
106 | nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL)); | ||
107 | if (!nbcells) { | ||
108 | pr_err("%s: #dma-cells property is missing or invalid\n", | ||
109 | __func__); | ||
110 | kfree(ofdma); | ||
111 | return -EINVAL; | ||
112 | } | ||
113 | |||
114 | ofdma->of_node = np; | ||
115 | ofdma->of_dma_nbcells = nbcells; | ||
116 | ofdma->of_dma_xlate = of_dma_xlate; | ||
117 | ofdma->of_dma_data = data; | ||
118 | ofdma->use_count = 0; | ||
119 | |||
120 | /* Now queue of_dma controller structure in list */ | ||
121 | spin_lock(&of_dma_lock); | ||
122 | list_add_tail(&ofdma->of_dma_controllers, &of_dma_list); | ||
123 | spin_unlock(&of_dma_lock); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | EXPORT_SYMBOL_GPL(of_dma_controller_register); | ||
128 | |||
129 | /** | ||
130 | * of_dma_controller_free - Remove a DMA controller from DT DMA helpers list | ||
131 | * @np: device node of DMA controller | ||
132 | * | ||
133 | * Memory allocated by of_dma_controller_register() is freed here. | ||
134 | */ | ||
135 | int of_dma_controller_free(struct device_node *np) | ||
136 | { | ||
137 | struct of_dma *ofdma; | ||
138 | |||
139 | spin_lock(&of_dma_lock); | ||
140 | |||
141 | if (list_empty(&of_dma_list)) { | ||
142 | spin_unlock(&of_dma_lock); | ||
143 | return -ENODEV; | ||
144 | } | ||
145 | |||
146 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) | ||
147 | if (ofdma->of_node == np) { | ||
148 | if (ofdma->use_count) { | ||
149 | spin_unlock(&of_dma_lock); | ||
150 | return -EBUSY; | ||
151 | } | ||
152 | |||
153 | list_del(&ofdma->of_dma_controllers); | ||
154 | spin_unlock(&of_dma_lock); | ||
155 | kfree(ofdma); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | spin_unlock(&of_dma_lock); | ||
160 | return -ENODEV; | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(of_dma_controller_free); | ||
163 | |||
164 | /** | ||
165 | * of_dma_match_channel - Check if a DMA specifier matches name | ||
166 | * @np: device node to look for DMA channels | ||
167 | * @name: channel name to be matched | ||
168 | * @index: index of DMA specifier in list of DMA specifiers | ||
169 | * @dma_spec: pointer to DMA specifier as found in the device tree | ||
170 | * | ||
171 | * Check if the DMA specifier pointed to by the index in a list of DMA | ||
172 | * specifiers, matches the name provided. Returns 0 if the name matches and | ||
173 | * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV. | ||
174 | */ | ||
175 | static int of_dma_match_channel(struct device_node *np, char *name, int index, | ||
176 | struct of_phandle_args *dma_spec) | ||
177 | { | ||
178 | const char *s; | ||
179 | |||
180 | if (of_property_read_string_index(np, "dma-names", index, &s)) | ||
181 | return -ENODEV; | ||
182 | |||
183 | if (strcmp(name, s)) | ||
184 | return -ENODEV; | ||
185 | |||
186 | if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index, | ||
187 | dma_spec)) | ||
188 | return -ENODEV; | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * of_dma_request_slave_channel - Get the DMA slave channel | ||
195 | * @np: device node to get DMA request from | ||
196 | * @name: name of desired channel | ||
197 | * | ||
198 | * Returns pointer to appropriate dma channel on success or NULL on error. | ||
199 | */ | ||
200 | struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | ||
201 | char *name) | ||
202 | { | ||
203 | struct of_phandle_args dma_spec; | ||
204 | struct of_dma *ofdma; | ||
205 | struct dma_chan *chan; | ||
206 | int count, i; | ||
207 | |||
208 | if (!np || !name) { | ||
209 | pr_err("%s: not enough information provided\n", __func__); | ||
210 | return NULL; | ||
211 | } | ||
212 | |||
213 | count = of_property_count_strings(np, "dma-names"); | ||
214 | if (count < 0) { | ||
215 | pr_err("%s: dma-names property missing or empty\n", __func__); | ||
216 | return NULL; | ||
217 | } | ||
218 | |||
219 | for (i = 0; i < count; i++) { | ||
220 | if (of_dma_match_channel(np, name, i, &dma_spec)) | ||
221 | continue; | ||
222 | |||
223 | ofdma = of_dma_get_controller(&dma_spec); | ||
224 | |||
225 | if (!ofdma) | ||
226 | continue; | ||
227 | |||
228 | chan = ofdma->of_dma_xlate(&dma_spec, ofdma); | ||
229 | |||
230 | of_dma_put_controller(ofdma); | ||
231 | |||
232 | of_node_put(dma_spec.np); | ||
233 | |||
234 | if (chan) | ||
235 | return chan; | ||
236 | } | ||
237 | |||
238 | return NULL; | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * of_dma_simple_xlate - Simple DMA engine translation function | ||
243 | * @dma_spec: pointer to DMA specifier as found in the device tree | ||
244 | * @of_dma: pointer to DMA controller data | ||
245 | * | ||
246 | * A simple translation function for devices that use a 32-bit value for the | ||
247 | * filter_param when calling the DMA engine dma_request_channel() function. | ||
248 | * Note that this translation function requires that #dma-cells is equal to 1 | ||
249 | * and the argument of the dma specifier is the 32-bit filter_param. Returns | ||
250 | * pointer to appropriate dma channel on success or NULL on error. | ||
251 | */ | ||
252 | struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | ||
253 | struct of_dma *ofdma) | ||
254 | { | ||
255 | int count = dma_spec->args_count; | ||
256 | struct of_dma_filter_info *info = ofdma->of_dma_data; | ||
257 | |||
258 | if (!info || !info->filter_fn) | ||
259 | return NULL; | ||
260 | |||
261 | if (count != 1) | ||
262 | return NULL; | ||
263 | |||
264 | return dma_request_channel(info->dma_cap, info->filter_fn, | ||
265 | &dma_spec->args[0]); | ||
266 | } | ||
267 | EXPORT_SYMBOL_GPL(of_dma_simple_xlate); | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 3f2617255ef2..d01faeb0f27c 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -1029,18 +1029,7 @@ static struct pci_driver pch_dma_driver = { | |||
1029 | #endif | 1029 | #endif |
1030 | }; | 1030 | }; |
1031 | 1031 | ||
1032 | static int __init pch_dma_init(void) | 1032 | module_pci_driver(pch_dma_driver); |
1033 | { | ||
1034 | return pci_register_driver(&pch_dma_driver); | ||
1035 | } | ||
1036 | |||
1037 | static void __exit pch_dma_exit(void) | ||
1038 | { | ||
1039 | pci_unregister_driver(&pch_dma_driver); | ||
1040 | } | ||
1041 | |||
1042 | module_init(pch_dma_init); | ||
1043 | module_exit(pch_dma_exit); | ||
1044 | 1033 | ||
1045 | MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " | 1034 | MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " |
1046 | "DMA controller driver"); | 1035 | "DMA controller driver"); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 80680eee0171..718153122759 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/amba/pl330.h> | 25 | #include <linux/amba/pl330.h> |
26 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
28 | #include <linux/of_dma.h> | ||
28 | 29 | ||
29 | #include "dmaengine.h" | 30 | #include "dmaengine.h" |
30 | #define PL330_MAX_CHAN 8 | 31 | #define PL330_MAX_CHAN 8 |
@@ -606,6 +607,11 @@ struct dma_pl330_desc { | |||
606 | struct dma_pl330_chan *pchan; | 607 | struct dma_pl330_chan *pchan; |
607 | }; | 608 | }; |
608 | 609 | ||
610 | struct dma_pl330_filter_args { | ||
611 | struct dma_pl330_dmac *pdmac; | ||
612 | unsigned int chan_id; | ||
613 | }; | ||
614 | |||
609 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) | 615 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) |
610 | { | 616 | { |
611 | if (r && r->xfer_cb) | 617 | if (r && r->xfer_cb) |
@@ -2352,6 +2358,16 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | |||
2352 | tasklet_schedule(&pch->task); | 2358 | tasklet_schedule(&pch->task); |
2353 | } | 2359 | } |
2354 | 2360 | ||
2361 | static bool pl330_dt_filter(struct dma_chan *chan, void *param) | ||
2362 | { | ||
2363 | struct dma_pl330_filter_args *fargs = param; | ||
2364 | |||
2365 | if (chan->device != &fargs->pdmac->ddma) | ||
2366 | return false; | ||
2367 | |||
2368 | return (chan->chan_id == fargs->chan_id); | ||
2369 | } | ||
2370 | |||
2355 | bool pl330_filter(struct dma_chan *chan, void *param) | 2371 | bool pl330_filter(struct dma_chan *chan, void *param) |
2356 | { | 2372 | { |
2357 | u8 *peri_id; | 2373 | u8 *peri_id; |
@@ -2359,25 +2375,35 @@ bool pl330_filter(struct dma_chan *chan, void *param) | |||
2359 | if (chan->device->dev->driver != &pl330_driver.drv) | 2375 | if (chan->device->dev->driver != &pl330_driver.drv) |
2360 | return false; | 2376 | return false; |
2361 | 2377 | ||
2362 | #ifdef CONFIG_OF | ||
2363 | if (chan->device->dev->of_node) { | ||
2364 | const __be32 *prop_value; | ||
2365 | phandle phandle; | ||
2366 | struct device_node *node; | ||
2367 | |||
2368 | prop_value = ((struct property *)param)->value; | ||
2369 | phandle = be32_to_cpup(prop_value++); | ||
2370 | node = of_find_node_by_phandle(phandle); | ||
2371 | return ((chan->private == node) && | ||
2372 | (chan->chan_id == be32_to_cpup(prop_value))); | ||
2373 | } | ||
2374 | #endif | ||
2375 | |||
2376 | peri_id = chan->private; | 2378 | peri_id = chan->private; |
2377 | return *peri_id == (unsigned)param; | 2379 | return *peri_id == (unsigned)param; |
2378 | } | 2380 | } |
2379 | EXPORT_SYMBOL(pl330_filter); | 2381 | EXPORT_SYMBOL(pl330_filter); |
2380 | 2382 | ||
2383 | static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, | ||
2384 | struct of_dma *ofdma) | ||
2385 | { | ||
2386 | int count = dma_spec->args_count; | ||
2387 | struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; | ||
2388 | struct dma_pl330_filter_args fargs; | ||
2389 | dma_cap_mask_t cap; | ||
2390 | |||
2391 | if (!pdmac) | ||
2392 | return NULL; | ||
2393 | |||
2394 | if (count != 1) | ||
2395 | return NULL; | ||
2396 | |||
2397 | fargs.pdmac = pdmac; | ||
2398 | fargs.chan_id = dma_spec->args[0]; | ||
2399 | |||
2400 | dma_cap_zero(cap); | ||
2401 | dma_cap_set(DMA_SLAVE, cap); | ||
2402 | dma_cap_set(DMA_CYCLIC, cap); | ||
2403 | |||
2404 | return dma_request_channel(cap, pl330_dt_filter, &fargs); | ||
2405 | } | ||
2406 | |||
2381 | static int pl330_alloc_chan_resources(struct dma_chan *chan) | 2407 | static int pl330_alloc_chan_resources(struct dma_chan *chan) |
2382 | { | 2408 | { |
2383 | struct dma_pl330_chan *pch = to_pchan(chan); | 2409 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -2866,7 +2892,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2866 | pdat = adev->dev.platform_data; | 2892 | pdat = adev->dev.platform_data; |
2867 | 2893 | ||
2868 | /* Allocate a new DMAC and its Channels */ | 2894 | /* Allocate a new DMAC and its Channels */ |
2869 | pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); | 2895 | pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); |
2870 | if (!pdmac) { | 2896 | if (!pdmac) { |
2871 | dev_err(&adev->dev, "unable to allocate mem\n"); | 2897 | dev_err(&adev->dev, "unable to allocate mem\n"); |
2872 | return -ENOMEM; | 2898 | return -ENOMEM; |
@@ -2878,13 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2878 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; | 2904 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
2879 | 2905 | ||
2880 | res = &adev->res; | 2906 | res = &adev->res; |
2881 | request_mem_region(res->start, resource_size(res), "dma-pl330"); | 2907 | pi->base = devm_request_and_ioremap(&adev->dev, res); |
2882 | 2908 | if (!pi->base) | |
2883 | pi->base = ioremap(res->start, resource_size(res)); | 2909 | return -ENXIO; |
2884 | if (!pi->base) { | ||
2885 | ret = -ENXIO; | ||
2886 | goto probe_err1; | ||
2887 | } | ||
2888 | 2910 | ||
2889 | amba_set_drvdata(adev, pdmac); | 2911 | amba_set_drvdata(adev, pdmac); |
2890 | 2912 | ||
@@ -2892,11 +2914,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2892 | ret = request_irq(irq, pl330_irq_handler, 0, | 2914 | ret = request_irq(irq, pl330_irq_handler, 0, |
2893 | dev_name(&adev->dev), pi); | 2915 | dev_name(&adev->dev), pi); |
2894 | if (ret) | 2916 | if (ret) |
2895 | goto probe_err2; | 2917 | return ret; |
2896 | 2918 | ||
2897 | ret = pl330_add(pi); | 2919 | ret = pl330_add(pi); |
2898 | if (ret) | 2920 | if (ret) |
2899 | goto probe_err3; | 2921 | goto probe_err1; |
2900 | 2922 | ||
2901 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2923 | INIT_LIST_HEAD(&pdmac->desc_pool); |
2902 | spin_lock_init(&pdmac->pool_lock); | 2924 | spin_lock_init(&pdmac->pool_lock); |
@@ -2918,7 +2940,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2918 | if (!pdmac->peripherals) { | 2940 | if (!pdmac->peripherals) { |
2919 | ret = -ENOMEM; | 2941 | ret = -ENOMEM; |
2920 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); | 2942 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); |
2921 | goto probe_err4; | 2943 | goto probe_err2; |
2922 | } | 2944 | } |
2923 | 2945 | ||
2924 | for (i = 0; i < num_chan; i++) { | 2946 | for (i = 0; i < num_chan; i++) { |
@@ -2962,7 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2962 | ret = dma_async_device_register(pd); | 2984 | ret = dma_async_device_register(pd); |
2963 | if (ret) { | 2985 | if (ret) { |
2964 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2986 | dev_err(&adev->dev, "unable to register DMAC\n"); |
2965 | goto probe_err4; | 2987 | goto probe_err2; |
2966 | } | 2988 | } |
2967 | 2989 | ||
2968 | dev_info(&adev->dev, | 2990 | dev_info(&adev->dev, |
@@ -2973,17 +2995,20 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2973 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, | 2995 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, |
2974 | pi->pcfg.num_peri, pi->pcfg.num_events); | 2996 | pi->pcfg.num_peri, pi->pcfg.num_events); |
2975 | 2997 | ||
2998 | ret = of_dma_controller_register(adev->dev.of_node, | ||
2999 | of_dma_pl330_xlate, pdmac); | ||
3000 | if (ret) { | ||
3001 | dev_err(&adev->dev, | ||
3002 | "unable to register DMA to the generic DT DMA helpers\n"); | ||
3003 | goto probe_err2; | ||
3004 | } | ||
3005 | |||
2976 | return 0; | 3006 | return 0; |
2977 | 3007 | ||
2978 | probe_err4: | ||
2979 | pl330_del(pi); | ||
2980 | probe_err3: | ||
2981 | free_irq(irq, pi); | ||
2982 | probe_err2: | 3008 | probe_err2: |
2983 | iounmap(pi->base); | 3009 | pl330_del(pi); |
2984 | probe_err1: | 3010 | probe_err1: |
2985 | release_mem_region(res->start, resource_size(res)); | 3011 | free_irq(irq, pi); |
2986 | kfree(pdmac); | ||
2987 | 3012 | ||
2988 | return ret; | 3013 | return ret; |
2989 | } | 3014 | } |
@@ -2993,12 +3018,13 @@ static int pl330_remove(struct amba_device *adev) | |||
2993 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); | 3018 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); |
2994 | struct dma_pl330_chan *pch, *_p; | 3019 | struct dma_pl330_chan *pch, *_p; |
2995 | struct pl330_info *pi; | 3020 | struct pl330_info *pi; |
2996 | struct resource *res; | ||
2997 | int irq; | 3021 | int irq; |
2998 | 3022 | ||
2999 | if (!pdmac) | 3023 | if (!pdmac) |
3000 | return 0; | 3024 | return 0; |
3001 | 3025 | ||
3026 | of_dma_controller_free(adev->dev.of_node); | ||
3027 | |||
3002 | amba_set_drvdata(adev, NULL); | 3028 | amba_set_drvdata(adev, NULL); |
3003 | 3029 | ||
3004 | /* Idle the DMAC */ | 3030 | /* Idle the DMAC */ |
@@ -3020,13 +3046,6 @@ static int pl330_remove(struct amba_device *adev) | |||
3020 | irq = adev->irq[0]; | 3046 | irq = adev->irq[0]; |
3021 | free_irq(irq, pi); | 3047 | free_irq(irq, pi); |
3022 | 3048 | ||
3023 | iounmap(pi->base); | ||
3024 | |||
3025 | res = &adev->res; | ||
3026 | release_mem_region(res->start, resource_size(res)); | ||
3027 | |||
3028 | kfree(pdmac); | ||
3029 | |||
3030 | return 0; | 3049 | return 0; |
3031 | } | 3050 | } |
3032 | 3051 | ||
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index f4cd946d259d..4acb85a10250 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -638,9 +638,6 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
638 | unsigned long flags; | 638 | unsigned long flags; |
639 | int ret; | 639 | int ret; |
640 | 640 | ||
641 | if (!chan) | ||
642 | return -EINVAL; | ||
643 | |||
644 | switch (cmd) { | 641 | switch (cmd) { |
645 | case DMA_TERMINATE_ALL: | 642 | case DMA_TERMINATE_ALL: |
646 | spin_lock_irqsave(&schan->chan_lock, flags); | 643 | spin_lock_irqsave(&schan->chan_lock, flags); |
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 3315e4be9b85..b70709b030d8 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c | |||
@@ -326,7 +326,7 @@ static int sh_dmae_set_slave(struct shdma_chan *schan, | |||
326 | shdma_chan); | 326 | shdma_chan); |
327 | const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); | 327 | const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); |
328 | if (!cfg) | 328 | if (!cfg) |
329 | return -ENODEV; | 329 | return -ENXIO; |
330 | 330 | ||
331 | if (!try) | 331 | if (!try) |
332 | sh_chan->config = cfg; | 332 | sh_chan->config = cfg; |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 94674a96c646..1d627e2391f4 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -32,7 +32,9 @@ | |||
32 | #define SIRFSOC_DMA_CH_VALID 0x140 | 32 | #define SIRFSOC_DMA_CH_VALID 0x140 |
33 | #define SIRFSOC_DMA_CH_INT 0x144 | 33 | #define SIRFSOC_DMA_CH_INT 0x144 |
34 | #define SIRFSOC_DMA_INT_EN 0x148 | 34 | #define SIRFSOC_DMA_INT_EN 0x148 |
35 | #define SIRFSOC_DMA_INT_EN_CLR 0x14C | ||
35 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 | 36 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 |
37 | #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C | ||
36 | 38 | ||
37 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | 39 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 |
38 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | 40 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 |
@@ -76,6 +78,7 @@ struct sirfsoc_dma { | |||
76 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | 78 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; |
77 | void __iomem *base; | 79 | void __iomem *base; |
78 | int irq; | 80 | int irq; |
81 | bool is_marco; | ||
79 | }; | 82 | }; |
80 | 83 | ||
81 | #define DRV_NAME "sirfsoc_dma" | 84 | #define DRV_NAME "sirfsoc_dma" |
@@ -288,17 +291,67 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | |||
288 | int cid = schan->chan.chan_id; | 291 | int cid = schan->chan.chan_id; |
289 | unsigned long flags; | 292 | unsigned long flags; |
290 | 293 | ||
291 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | 294 | spin_lock_irqsave(&schan->lock, flags); |
292 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
293 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | ||
294 | 295 | ||
295 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | 296 | if (!sdma->is_marco) { |
296 | & ~((1 << cid) | 1 << (cid + 16)), | 297 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & |
298 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
299 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | ||
300 | & ~((1 << cid) | 1 << (cid + 16)), | ||
297 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | 301 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); |
302 | } else { | ||
303 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); | ||
304 | writel_relaxed((1 << cid) | 1 << (cid + 16), | ||
305 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); | ||
306 | } | ||
307 | |||
308 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | ||
298 | 309 | ||
299 | spin_lock_irqsave(&schan->lock, flags); | ||
300 | list_splice_tail_init(&schan->active, &schan->free); | 310 | list_splice_tail_init(&schan->active, &schan->free); |
301 | list_splice_tail_init(&schan->queued, &schan->free); | 311 | list_splice_tail_init(&schan->queued, &schan->free); |
312 | |||
313 | spin_unlock_irqrestore(&schan->lock, flags); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) | ||
319 | { | ||
320 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
321 | int cid = schan->chan.chan_id; | ||
322 | unsigned long flags; | ||
323 | |||
324 | spin_lock_irqsave(&schan->lock, flags); | ||
325 | |||
326 | if (!sdma->is_marco) | ||
327 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | ||
328 | & ~((1 << cid) | 1 << (cid + 16)), | ||
329 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
330 | else | ||
331 | writel_relaxed((1 << cid) | 1 << (cid + 16), | ||
332 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); | ||
333 | |||
334 | spin_unlock_irqrestore(&schan->lock, flags); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) | ||
340 | { | ||
341 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
342 | int cid = schan->chan.chan_id; | ||
343 | unsigned long flags; | ||
344 | |||
345 | spin_lock_irqsave(&schan->lock, flags); | ||
346 | |||
347 | if (!sdma->is_marco) | ||
348 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | ||
349 | | ((1 << cid) | 1 << (cid + 16)), | ||
350 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
351 | else | ||
352 | writel_relaxed((1 << cid) | 1 << (cid + 16), | ||
353 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
354 | |||
302 | spin_unlock_irqrestore(&schan->lock, flags); | 355 | spin_unlock_irqrestore(&schan->lock, flags); |
303 | 356 | ||
304 | return 0; | 357 | return 0; |
@@ -311,6 +364,10 @@ static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
311 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | 364 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
312 | 365 | ||
313 | switch (cmd) { | 366 | switch (cmd) { |
367 | case DMA_PAUSE: | ||
368 | return sirfsoc_dma_pause_chan(schan); | ||
369 | case DMA_RESUME: | ||
370 | return sirfsoc_dma_resume_chan(schan); | ||
314 | case DMA_TERMINATE_ALL: | 371 | case DMA_TERMINATE_ALL: |
315 | return sirfsoc_dma_terminate_all(schan); | 372 | return sirfsoc_dma_terminate_all(schan); |
316 | case DMA_SLAVE_CONFIG: | 373 | case DMA_SLAVE_CONFIG: |
@@ -568,6 +625,9 @@ static int sirfsoc_dma_probe(struct platform_device *op) | |||
568 | return -ENOMEM; | 625 | return -ENOMEM; |
569 | } | 626 | } |
570 | 627 | ||
628 | if (of_device_is_compatible(dn, "sirf,marco-dmac")) | ||
629 | sdma->is_marco = true; | ||
630 | |||
571 | if (of_property_read_u32(dn, "cell-index", &id)) { | 631 | if (of_property_read_u32(dn, "cell-index", &id)) { |
572 | dev_err(dev, "Fail to get DMAC index\n"); | 632 | dev_err(dev, "Fail to get DMAC index\n"); |
573 | return -ENODEV; | 633 | return -ENODEV; |
@@ -668,6 +728,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) | |||
668 | 728 | ||
669 | static struct of_device_id sirfsoc_dma_match[] = { | 729 | static struct of_device_id sirfsoc_dma_match[] = { |
670 | { .compatible = "sirf,prima2-dmac", }, | 730 | { .compatible = "sirf,prima2-dmac", }, |
731 | { .compatible = "sirf,marco-dmac", }, | ||
671 | {}, | 732 | {}, |
672 | }; | 733 | }; |
673 | 734 | ||
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 23c5573e62dd..1734feec47b1 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -53,6 +53,8 @@ | |||
53 | #define D40_ALLOC_PHY (1 << 30) | 53 | #define D40_ALLOC_PHY (1 << 30) |
54 | #define D40_ALLOC_LOG_FREE 0 | 54 | #define D40_ALLOC_LOG_FREE 0 |
55 | 55 | ||
56 | #define MAX(a, b) (((a) < (b)) ? (b) : (a)) | ||
57 | |||
56 | /** | 58 | /** |
57 | * enum 40_command - The different commands and/or statuses. | 59 | * enum 40_command - The different commands and/or statuses. |
58 | * | 60 | * |
@@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = { | |||
100 | 102 | ||
101 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | 103 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) |
102 | 104 | ||
103 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | 105 | /* |
104 | static u32 d40_backup_regs_v3[] = { | 106 | * since 9540 and 8540 has the same HW revision |
107 | * use v4a for 9540 or ealier | ||
108 | * use v4b for 8540 or later | ||
109 | * HW revision: | ||
110 | * DB8500ed has revision 0 | ||
111 | * DB8500v1 has revision 2 | ||
112 | * DB8500v2 has revision 3 | ||
113 | * AP9540v1 has revision 4 | ||
114 | * DB8540v1 has revision 4 | ||
115 | * TODO: Check if all these registers have to be saved/restored on dma40 v4a | ||
116 | */ | ||
117 | static u32 d40_backup_regs_v4a[] = { | ||
105 | D40_DREG_PSEG1, | 118 | D40_DREG_PSEG1, |
106 | D40_DREG_PSEG2, | 119 | D40_DREG_PSEG2, |
107 | D40_DREG_PSEG3, | 120 | D40_DREG_PSEG3, |
@@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = { | |||
120 | D40_DREG_RCEG4, | 133 | D40_DREG_RCEG4, |
121 | }; | 134 | }; |
122 | 135 | ||
123 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | 136 | #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) |
137 | |||
138 | static u32 d40_backup_regs_v4b[] = { | ||
139 | D40_DREG_CPSEG1, | ||
140 | D40_DREG_CPSEG2, | ||
141 | D40_DREG_CPSEG3, | ||
142 | D40_DREG_CPSEG4, | ||
143 | D40_DREG_CPSEG5, | ||
144 | D40_DREG_CPCEG1, | ||
145 | D40_DREG_CPCEG2, | ||
146 | D40_DREG_CPCEG3, | ||
147 | D40_DREG_CPCEG4, | ||
148 | D40_DREG_CPCEG5, | ||
149 | D40_DREG_CRSEG1, | ||
150 | D40_DREG_CRSEG2, | ||
151 | D40_DREG_CRSEG3, | ||
152 | D40_DREG_CRSEG4, | ||
153 | D40_DREG_CRSEG5, | ||
154 | D40_DREG_CRCEG1, | ||
155 | D40_DREG_CRCEG2, | ||
156 | D40_DREG_CRCEG3, | ||
157 | D40_DREG_CRCEG4, | ||
158 | D40_DREG_CRCEG5, | ||
159 | }; | ||
160 | |||
161 | #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) | ||
124 | 162 | ||
125 | static u32 d40_backup_regs_chan[] = { | 163 | static u32 d40_backup_regs_chan[] = { |
126 | D40_CHAN_REG_SSCFG, | 164 | D40_CHAN_REG_SSCFG, |
@@ -134,6 +172,102 @@ static u32 d40_backup_regs_chan[] = { | |||
134 | }; | 172 | }; |
135 | 173 | ||
136 | /** | 174 | /** |
175 | * struct d40_interrupt_lookup - lookup table for interrupt handler | ||
176 | * | ||
177 | * @src: Interrupt mask register. | ||
178 | * @clr: Interrupt clear register. | ||
179 | * @is_error: true if this is an error interrupt. | ||
180 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | ||
181 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | ||
182 | */ | ||
183 | struct d40_interrupt_lookup { | ||
184 | u32 src; | ||
185 | u32 clr; | ||
186 | bool is_error; | ||
187 | int offset; | ||
188 | }; | ||
189 | |||
190 | |||
191 | static struct d40_interrupt_lookup il_v4a[] = { | ||
192 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | ||
193 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | ||
194 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | ||
195 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | ||
196 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | ||
197 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | ||
198 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | ||
199 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | ||
200 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | ||
201 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | ||
202 | }; | ||
203 | |||
204 | static struct d40_interrupt_lookup il_v4b[] = { | ||
205 | {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, | ||
206 | {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, | ||
207 | {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, | ||
208 | {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, | ||
209 | {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, | ||
210 | {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, | ||
211 | {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, | ||
212 | {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, | ||
213 | {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, | ||
214 | {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, | ||
215 | {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, | ||
216 | {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, | ||
217 | }; | ||
218 | |||
219 | /** | ||
220 | * struct d40_reg_val - simple lookup struct | ||
221 | * | ||
222 | * @reg: The register. | ||
223 | * @val: The value that belongs to the register in reg. | ||
224 | */ | ||
225 | struct d40_reg_val { | ||
226 | unsigned int reg; | ||
227 | unsigned int val; | ||
228 | }; | ||
229 | |||
230 | static __initdata struct d40_reg_val dma_init_reg_v4a[] = { | ||
231 | /* Clock every part of the DMA block from start */ | ||
232 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | ||
233 | |||
234 | /* Interrupts on all logical channels */ | ||
235 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | ||
236 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | ||
237 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | ||
238 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | ||
239 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | ||
240 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | ||
241 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | ||
242 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | ||
243 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | ||
244 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | ||
245 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | ||
246 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | ||
247 | }; | ||
248 | static __initdata struct d40_reg_val dma_init_reg_v4b[] = { | ||
249 | /* Clock every part of the DMA block from start */ | ||
250 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | ||
251 | |||
252 | /* Interrupts on all logical channels */ | ||
253 | { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, | ||
254 | { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, | ||
255 | { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, | ||
256 | { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, | ||
257 | { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, | ||
258 | { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, | ||
259 | { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, | ||
260 | { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, | ||
261 | { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, | ||
262 | { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, | ||
263 | { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, | ||
264 | { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, | ||
265 | { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, | ||
266 | { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, | ||
267 | { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} | ||
268 | }; | ||
269 | |||
270 | /** | ||
137 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 271 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
138 | * | 272 | * |
139 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | 273 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
@@ -221,6 +355,7 @@ struct d40_lcla_pool { | |||
221 | * @allocated_dst: Same as for src but is dst. | 355 | * @allocated_dst: Same as for src but is dst. |
222 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | 356 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as |
223 | * event line number. | 357 | * event line number. |
358 | * @use_soft_lli: To mark if the linked lists of channel are managed by SW. | ||
224 | */ | 359 | */ |
225 | struct d40_phy_res { | 360 | struct d40_phy_res { |
226 | spinlock_t lock; | 361 | spinlock_t lock; |
@@ -228,6 +363,7 @@ struct d40_phy_res { | |||
228 | int num; | 363 | int num; |
229 | u32 allocated_src; | 364 | u32 allocated_src; |
230 | u32 allocated_dst; | 365 | u32 allocated_dst; |
366 | bool use_soft_lli; | ||
231 | }; | 367 | }; |
232 | 368 | ||
233 | struct d40_base; | 369 | struct d40_base; |
@@ -248,6 +384,7 @@ struct d40_base; | |||
248 | * @client: Cliented owned descriptor list. | 384 | * @client: Cliented owned descriptor list. |
249 | * @pending_queue: Submitted jobs, to be issued by issue_pending() | 385 | * @pending_queue: Submitted jobs, to be issued by issue_pending() |
250 | * @active: Active descriptor. | 386 | * @active: Active descriptor. |
387 | * @done: Completed jobs | ||
251 | * @queue: Queued jobs. | 388 | * @queue: Queued jobs. |
252 | * @prepare_queue: Prepared jobs. | 389 | * @prepare_queue: Prepared jobs. |
253 | * @dma_cfg: The client configuration of this dma channel. | 390 | * @dma_cfg: The client configuration of this dma channel. |
@@ -273,6 +410,7 @@ struct d40_chan { | |||
273 | struct list_head client; | 410 | struct list_head client; |
274 | struct list_head pending_queue; | 411 | struct list_head pending_queue; |
275 | struct list_head active; | 412 | struct list_head active; |
413 | struct list_head done; | ||
276 | struct list_head queue; | 414 | struct list_head queue; |
277 | struct list_head prepare_queue; | 415 | struct list_head prepare_queue; |
278 | struct stedma40_chan_cfg dma_cfg; | 416 | struct stedma40_chan_cfg dma_cfg; |
@@ -289,6 +427,38 @@ struct d40_chan { | |||
289 | }; | 427 | }; |
290 | 428 | ||
291 | /** | 429 | /** |
430 | * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA | ||
431 | * controller | ||
432 | * | ||
433 | * @backup: the pointer to the registers address array for backup | ||
434 | * @backup_size: the size of the registers address array for backup | ||
435 | * @realtime_en: the realtime enable register | ||
436 | * @realtime_clear: the realtime clear register | ||
437 | * @high_prio_en: the high priority enable register | ||
438 | * @high_prio_clear: the high priority clear register | ||
439 | * @interrupt_en: the interrupt enable register | ||
440 | * @interrupt_clear: the interrupt clear register | ||
441 | * @il: the pointer to struct d40_interrupt_lookup | ||
442 | * @il_size: the size of d40_interrupt_lookup array | ||
443 | * @init_reg: the pointer to the struct d40_reg_val | ||
444 | * @init_reg_size: the size of d40_reg_val array | ||
445 | */ | ||
446 | struct d40_gen_dmac { | ||
447 | u32 *backup; | ||
448 | u32 backup_size; | ||
449 | u32 realtime_en; | ||
450 | u32 realtime_clear; | ||
451 | u32 high_prio_en; | ||
452 | u32 high_prio_clear; | ||
453 | u32 interrupt_en; | ||
454 | u32 interrupt_clear; | ||
455 | struct d40_interrupt_lookup *il; | ||
456 | u32 il_size; | ||
457 | struct d40_reg_val *init_reg; | ||
458 | u32 init_reg_size; | ||
459 | }; | ||
460 | |||
461 | /** | ||
292 | * struct d40_base - The big global struct, one for each probe'd instance. | 462 | * struct d40_base - The big global struct, one for each probe'd instance. |
293 | * | 463 | * |
294 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | 464 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. |
@@ -326,11 +496,13 @@ struct d40_chan { | |||
326 | * @desc_slab: cache for descriptors. | 496 | * @desc_slab: cache for descriptors. |
327 | * @reg_val_backup: Here the values of some hardware registers are stored | 497 | * @reg_val_backup: Here the values of some hardware registers are stored |
328 | * before the DMA is powered off. They are restored when the power is back on. | 498 | * before the DMA is powered off. They are restored when the power is back on. |
329 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | 499 | * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and |
330 | * later. | 500 | * later |
331 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | 501 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. |
332 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | 502 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. |
333 | * @initialized: true if the dma has been initialized | 503 | * @initialized: true if the dma has been initialized |
504 | * @gen_dmac: the struct for generic registers values to represent u8500/8540 | ||
505 | * DMA controller | ||
334 | */ | 506 | */ |
335 | struct d40_base { | 507 | struct d40_base { |
336 | spinlock_t interrupt_lock; | 508 | spinlock_t interrupt_lock; |
@@ -344,6 +516,7 @@ struct d40_base { | |||
344 | int irq; | 516 | int irq; |
345 | int num_phy_chans; | 517 | int num_phy_chans; |
346 | int num_log_chans; | 518 | int num_log_chans; |
519 | struct device_dma_parameters dma_parms; | ||
347 | struct dma_device dma_both; | 520 | struct dma_device dma_both; |
348 | struct dma_device dma_slave; | 521 | struct dma_device dma_slave; |
349 | struct dma_device dma_memcpy; | 522 | struct dma_device dma_memcpy; |
@@ -361,37 +534,11 @@ struct d40_base { | |||
361 | resource_size_t lcpa_size; | 534 | resource_size_t lcpa_size; |
362 | struct kmem_cache *desc_slab; | 535 | struct kmem_cache *desc_slab; |
363 | u32 reg_val_backup[BACKUP_REGS_SZ]; | 536 | u32 reg_val_backup[BACKUP_REGS_SZ]; |
364 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | 537 | u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; |
365 | u32 *reg_val_backup_chan; | 538 | u32 *reg_val_backup_chan; |
366 | u16 gcc_pwr_off_mask; | 539 | u16 gcc_pwr_off_mask; |
367 | bool initialized; | 540 | bool initialized; |
368 | }; | 541 | struct d40_gen_dmac gen_dmac; |
369 | |||
370 | /** | ||
371 | * struct d40_interrupt_lookup - lookup table for interrupt handler | ||
372 | * | ||
373 | * @src: Interrupt mask register. | ||
374 | * @clr: Interrupt clear register. | ||
375 | * @is_error: true if this is an error interrupt. | ||
376 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | ||
377 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | ||
378 | */ | ||
379 | struct d40_interrupt_lookup { | ||
380 | u32 src; | ||
381 | u32 clr; | ||
382 | bool is_error; | ||
383 | int offset; | ||
384 | }; | ||
385 | |||
386 | /** | ||
387 | * struct d40_reg_val - simple lookup struct | ||
388 | * | ||
389 | * @reg: The register. | ||
390 | * @val: The value that belongs to the register in reg. | ||
391 | */ | ||
392 | struct d40_reg_val { | ||
393 | unsigned int reg; | ||
394 | unsigned int val; | ||
395 | }; | 542 | }; |
396 | 543 | ||
397 | static struct device *chan2dev(struct d40_chan *d40c) | 544 | static struct device *chan2dev(struct d40_chan *d40c) |
@@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c, | |||
494 | unsigned long flags; | 641 | unsigned long flags; |
495 | int i; | 642 | int i; |
496 | int ret = -EINVAL; | 643 | int ret = -EINVAL; |
497 | int p; | ||
498 | 644 | ||
499 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 645 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
500 | 646 | ||
501 | p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; | ||
502 | |||
503 | /* | 647 | /* |
504 | * Allocate both src and dst at the same time, therefore the half | 648 | * Allocate both src and dst at the same time, therefore the half |
505 | * start on 1 since 0 can't be used since zero is used as end marker. | 649 | * start on 1 since 0 can't be used since zero is used as end marker. |
506 | */ | 650 | */ |
507 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | 651 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
508 | if (!d40c->base->lcla_pool.alloc_map[p + i]) { | 652 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
509 | d40c->base->lcla_pool.alloc_map[p + i] = d40d; | 653 | |
654 | if (!d40c->base->lcla_pool.alloc_map[idx]) { | ||
655 | d40c->base->lcla_pool.alloc_map[idx] = d40d; | ||
510 | d40d->lcla_alloc++; | 656 | d40d->lcla_alloc++; |
511 | ret = i; | 657 | ret = i; |
512 | break; | 658 | break; |
@@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c, | |||
531 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 677 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
532 | 678 | ||
533 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | 679 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { |
534 | if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | 680 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
535 | D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { | 681 | |
536 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | 682 | if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { |
537 | D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; | 683 | d40c->base->lcla_pool.alloc_map[idx] = NULL; |
538 | d40d->lcla_alloc--; | 684 | d40d->lcla_alloc--; |
539 | if (d40d->lcla_alloc == 0) { | 685 | if (d40d->lcla_alloc == 0) { |
540 | ret = 0; | 686 | ret = 0; |
@@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) | |||
611 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | 757 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); |
612 | } | 758 | } |
613 | 759 | ||
760 | static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) | ||
761 | { | ||
762 | list_add_tail(&desc->node, &d40c->done); | ||
763 | } | ||
764 | |||
614 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | 765 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
615 | { | 766 | { |
616 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; | 767 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
@@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
634 | * can't link back to the one in LCPA space | 785 | * can't link back to the one in LCPA space |
635 | */ | 786 | */ |
636 | if (linkback || (lli_len - lli_current > 1)) { | 787 | if (linkback || (lli_len - lli_current > 1)) { |
637 | curr_lcla = d40_lcla_alloc_one(chan, desc); | 788 | /* |
789 | * If the channel is expected to use only soft_lli don't | ||
790 | * allocate a lcla. This is to avoid a HW issue that exists | ||
791 | * in some controller during a peripheral to memory transfer | ||
792 | * that uses linked lists. | ||
793 | */ | ||
794 | if (!(chan->phy_chan->use_soft_lli && | ||
795 | chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) | ||
796 | curr_lcla = d40_lcla_alloc_one(chan, desc); | ||
797 | |||
638 | first_lcla = curr_lcla; | 798 | first_lcla = curr_lcla; |
639 | } | 799 | } |
640 | 800 | ||
@@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
771 | return d; | 931 | return d; |
772 | } | 932 | } |
773 | 933 | ||
934 | static struct d40_desc *d40_first_done(struct d40_chan *d40c) | ||
935 | { | ||
936 | if (list_empty(&d40c->done)) | ||
937 | return NULL; | ||
938 | |||
939 | return list_first_entry(&d40c->done, struct d40_desc, node); | ||
940 | } | ||
941 | |||
774 | static int d40_psize_2_burst_size(bool is_log, int psize) | 942 | static int d40_psize_2_burst_size(bool is_log, int psize) |
775 | { | 943 | { |
776 | if (is_log) { | 944 | if (is_log) { |
@@ -874,11 +1042,11 @@ static void d40_save_restore_registers(struct d40_base *base, bool save) | |||
874 | save); | 1042 | save); |
875 | 1043 | ||
876 | /* Save/Restore registers only existing on dma40 v3 and later */ | 1044 | /* Save/Restore registers only existing on dma40 v3 and later */ |
877 | if (base->rev >= 3) | 1045 | if (base->gen_dmac.backup) |
878 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | 1046 | dma40_backup(base->virtbase, base->reg_val_backup_v4, |
879 | d40_backup_regs_v3, | 1047 | base->gen_dmac.backup, |
880 | ARRAY_SIZE(d40_backup_regs_v3), | 1048 | base->gen_dmac.backup_size, |
881 | save); | 1049 | save); |
882 | } | 1050 | } |
883 | #else | 1051 | #else |
884 | static void d40_save_restore_registers(struct d40_base *base, bool save) | 1052 | static void d40_save_restore_registers(struct d40_base *base, bool save) |
@@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c) | |||
961 | struct d40_desc *d40d; | 1129 | struct d40_desc *d40d; |
962 | struct d40_desc *_d; | 1130 | struct d40_desc *_d; |
963 | 1131 | ||
1132 | /* Release completed descriptors */ | ||
1133 | while ((d40d = d40_first_done(d40c))) { | ||
1134 | d40_desc_remove(d40d); | ||
1135 | d40_desc_free(d40c, d40d); | ||
1136 | } | ||
1137 | |||
964 | /* Release active descriptors */ | 1138 | /* Release active descriptors */ |
965 | while ((d40d = d40_first_active_get(d40c))) { | 1139 | while ((d40d = d40_first_active_get(d40c))) { |
966 | d40_desc_remove(d40d); | 1140 | d40_desc_remove(d40d); |
@@ -1396,6 +1570,9 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1396 | d40c->busy = false; | 1570 | d40c->busy = false; |
1397 | pm_runtime_mark_last_busy(d40c->base->dev); | 1571 | pm_runtime_mark_last_busy(d40c->base->dev); |
1398 | pm_runtime_put_autosuspend(d40c->base->dev); | 1572 | pm_runtime_put_autosuspend(d40c->base->dev); |
1573 | |||
1574 | d40_desc_remove(d40d); | ||
1575 | d40_desc_done(d40c, d40d); | ||
1399 | } | 1576 | } |
1400 | 1577 | ||
1401 | d40c->pending_tx++; | 1578 | d40c->pending_tx++; |
@@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data) | |||
1413 | 1590 | ||
1414 | spin_lock_irqsave(&d40c->lock, flags); | 1591 | spin_lock_irqsave(&d40c->lock, flags); |
1415 | 1592 | ||
1416 | /* Get first active entry from list */ | 1593 | /* Get first entry from the done list */ |
1417 | d40d = d40_first_active_get(d40c); | 1594 | d40d = d40_first_done(d40c); |
1418 | if (d40d == NULL) | 1595 | if (d40d == NULL) { |
1419 | goto err; | 1596 | /* Check if we have reached here for cyclic job */ |
1597 | d40d = d40_first_active_get(d40c); | ||
1598 | if (d40d == NULL || !d40d->cyclic) | ||
1599 | goto err; | ||
1600 | } | ||
1420 | 1601 | ||
1421 | if (!d40d->cyclic) | 1602 | if (!d40d->cyclic) |
1422 | dma_cookie_complete(&d40d->txd); | 1603 | dma_cookie_complete(&d40d->txd); |
@@ -1438,13 +1619,11 @@ static void dma_tasklet(unsigned long data) | |||
1438 | if (async_tx_test_ack(&d40d->txd)) { | 1619 | if (async_tx_test_ack(&d40d->txd)) { |
1439 | d40_desc_remove(d40d); | 1620 | d40_desc_remove(d40d); |
1440 | d40_desc_free(d40c, d40d); | 1621 | d40_desc_free(d40c, d40d); |
1441 | } else { | 1622 | } else if (!d40d->is_in_client_list) { |
1442 | if (!d40d->is_in_client_list) { | 1623 | d40_desc_remove(d40d); |
1443 | d40_desc_remove(d40d); | 1624 | d40_lcla_free_all(d40c, d40d); |
1444 | d40_lcla_free_all(d40c, d40d); | 1625 | list_add_tail(&d40d->node, &d40c->client); |
1445 | list_add_tail(&d40d->node, &d40c->client); | 1626 | d40d->is_in_client_list = true; |
1446 | d40d->is_in_client_list = true; | ||
1447 | } | ||
1448 | } | 1627 | } |
1449 | } | 1628 | } |
1450 | 1629 | ||
@@ -1469,53 +1648,51 @@ err: | |||
1469 | 1648 | ||
1470 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | 1649 | static irqreturn_t d40_handle_interrupt(int irq, void *data) |
1471 | { | 1650 | { |
1472 | static const struct d40_interrupt_lookup il[] = { | ||
1473 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | ||
1474 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | ||
1475 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | ||
1476 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | ||
1477 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | ||
1478 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | ||
1479 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | ||
1480 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | ||
1481 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | ||
1482 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | ||
1483 | }; | ||
1484 | |||
1485 | int i; | 1651 | int i; |
1486 | u32 regs[ARRAY_SIZE(il)]; | ||
1487 | u32 idx; | 1652 | u32 idx; |
1488 | u32 row; | 1653 | u32 row; |
1489 | long chan = -1; | 1654 | long chan = -1; |
1490 | struct d40_chan *d40c; | 1655 | struct d40_chan *d40c; |
1491 | unsigned long flags; | 1656 | unsigned long flags; |
1492 | struct d40_base *base = data; | 1657 | struct d40_base *base = data; |
1658 | u32 regs[base->gen_dmac.il_size]; | ||
1659 | struct d40_interrupt_lookup *il = base->gen_dmac.il; | ||
1660 | u32 il_size = base->gen_dmac.il_size; | ||
1493 | 1661 | ||
1494 | spin_lock_irqsave(&base->interrupt_lock, flags); | 1662 | spin_lock_irqsave(&base->interrupt_lock, flags); |
1495 | 1663 | ||
1496 | /* Read interrupt status of both logical and physical channels */ | 1664 | /* Read interrupt status of both logical and physical channels */ |
1497 | for (i = 0; i < ARRAY_SIZE(il); i++) | 1665 | for (i = 0; i < il_size; i++) |
1498 | regs[i] = readl(base->virtbase + il[i].src); | 1666 | regs[i] = readl(base->virtbase + il[i].src); |
1499 | 1667 | ||
1500 | for (;;) { | 1668 | for (;;) { |
1501 | 1669 | ||
1502 | chan = find_next_bit((unsigned long *)regs, | 1670 | chan = find_next_bit((unsigned long *)regs, |
1503 | BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); | 1671 | BITS_PER_LONG * il_size, chan + 1); |
1504 | 1672 | ||
1505 | /* No more set bits found? */ | 1673 | /* No more set bits found? */ |
1506 | if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) | 1674 | if (chan == BITS_PER_LONG * il_size) |
1507 | break; | 1675 | break; |
1508 | 1676 | ||
1509 | row = chan / BITS_PER_LONG; | 1677 | row = chan / BITS_PER_LONG; |
1510 | idx = chan & (BITS_PER_LONG - 1); | 1678 | idx = chan & (BITS_PER_LONG - 1); |
1511 | 1679 | ||
1512 | /* ACK interrupt */ | ||
1513 | writel(1 << idx, base->virtbase + il[row].clr); | ||
1514 | |||
1515 | if (il[row].offset == D40_PHY_CHAN) | 1680 | if (il[row].offset == D40_PHY_CHAN) |
1516 | d40c = base->lookup_phy_chans[idx]; | 1681 | d40c = base->lookup_phy_chans[idx]; |
1517 | else | 1682 | else |
1518 | d40c = base->lookup_log_chans[il[row].offset + idx]; | 1683 | d40c = base->lookup_log_chans[il[row].offset + idx]; |
1684 | |||
1685 | if (!d40c) { | ||
1686 | /* | ||
1687 | * No error because this can happen if something else | ||
1688 | * in the system is using the channel. | ||
1689 | */ | ||
1690 | continue; | ||
1691 | } | ||
1692 | |||
1693 | /* ACK interrupt */ | ||
1694 | writel(1 << idx, base->virtbase + il[row].clr); | ||
1695 | |||
1519 | spin_lock(&d40c->lock); | 1696 | spin_lock(&d40c->lock); |
1520 | 1697 | ||
1521 | if (!il[row].is_error) | 1698 | if (!il[row].is_error) |
@@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
1710 | int i; | 1887 | int i; |
1711 | int j; | 1888 | int j; |
1712 | int log_num; | 1889 | int log_num; |
1890 | int num_phy_chans; | ||
1713 | bool is_src; | 1891 | bool is_src; |
1714 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; | 1892 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
1715 | 1893 | ||
1716 | phys = d40c->base->phy_res; | 1894 | phys = d40c->base->phy_res; |
1895 | num_phy_chans = d40c->base->num_phy_chans; | ||
1717 | 1896 | ||
1718 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1897 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1719 | dev_type = d40c->dma_cfg.src_dev_type; | 1898 | dev_type = d40c->dma_cfg.src_dev_type; |
@@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) | |||
1734 | if (!is_log) { | 1913 | if (!is_log) { |
1735 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1914 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1736 | /* Find physical half channel */ | 1915 | /* Find physical half channel */ |
1737 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1916 | if (d40c->dma_cfg.use_fixed_channel) { |
1738 | 1917 | i = d40c->dma_cfg.phy_channel; | |
1739 | if (d40_alloc_mask_set(&phys[i], is_src, | 1918 | if (d40_alloc_mask_set(&phys[i], is_src, |
1740 | 0, is_log, | 1919 | 0, is_log, |
1741 | first_phy_user)) | 1920 | first_phy_user)) |
1742 | goto found_phy; | 1921 | goto found_phy; |
1922 | } else { | ||
1923 | for (i = 0; i < num_phy_chans; i++) { | ||
1924 | if (d40_alloc_mask_set(&phys[i], is_src, | ||
1925 | 0, is_log, | ||
1926 | first_phy_user)) | ||
1927 | goto found_phy; | ||
1928 | } | ||
1743 | } | 1929 | } |
1744 | } else | 1930 | } else |
1745 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1931 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
@@ -1954,7 +2140,6 @@ _exit: | |||
1954 | 2140 | ||
1955 | } | 2141 | } |
1956 | 2142 | ||
1957 | |||
1958 | static u32 stedma40_residue(struct dma_chan *chan) | 2143 | static u32 stedma40_residue(struct dma_chan *chan) |
1959 | { | 2144 | { |
1960 | struct d40_chan *d40c = | 2145 | struct d40_chan *d40c = |
@@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | |||
2030 | return ret < 0 ? ret : 0; | 2215 | return ret < 0 ? ret : 0; |
2031 | } | 2216 | } |
2032 | 2217 | ||
2033 | |||
2034 | static struct d40_desc * | 2218 | static struct d40_desc * |
2035 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | 2219 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, |
2036 | unsigned int sg_len, unsigned long dma_flags) | 2220 | unsigned int sg_len, unsigned long dma_flags) |
@@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | |||
2056 | goto err; | 2240 | goto err; |
2057 | } | 2241 | } |
2058 | 2242 | ||
2059 | |||
2060 | desc->lli_current = 0; | 2243 | desc->lli_current = 0; |
2061 | desc->txd.flags = dma_flags; | 2244 | desc->txd.flags = dma_flags; |
2062 | desc->txd.tx_submit = d40_tx_submit; | 2245 | desc->txd.tx_submit = d40_tx_submit; |
@@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
2105 | return NULL; | 2288 | return NULL; |
2106 | } | 2289 | } |
2107 | 2290 | ||
2108 | |||
2109 | spin_lock_irqsave(&chan->lock, flags); | 2291 | spin_lock_irqsave(&chan->lock, flags); |
2110 | 2292 | ||
2111 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); | 2293 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
@@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) | |||
2179 | { | 2361 | { |
2180 | bool realtime = d40c->dma_cfg.realtime; | 2362 | bool realtime = d40c->dma_cfg.realtime; |
2181 | bool highprio = d40c->dma_cfg.high_priority; | 2363 | bool highprio = d40c->dma_cfg.high_priority; |
2182 | u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; | 2364 | u32 rtreg; |
2183 | u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; | ||
2184 | u32 event = D40_TYPE_TO_EVENT(dev_type); | 2365 | u32 event = D40_TYPE_TO_EVENT(dev_type); |
2185 | u32 group = D40_TYPE_TO_GROUP(dev_type); | 2366 | u32 group = D40_TYPE_TO_GROUP(dev_type); |
2186 | u32 bit = 1 << event; | 2367 | u32 bit = 1 << event; |
2368 | u32 prioreg; | ||
2369 | struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; | ||
2370 | |||
2371 | rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; | ||
2372 | /* | ||
2373 | * Due to a hardware bug, in some cases a logical channel triggered by | ||
2374 | * a high priority destination event line can generate extra packet | ||
2375 | * transactions. | ||
2376 | * | ||
2377 | * The workaround is to not set the high priority level for the | ||
2378 | * destination event lines that trigger logical channels. | ||
2379 | */ | ||
2380 | if (!src && chan_is_logical(d40c)) | ||
2381 | highprio = false; | ||
2382 | |||
2383 | prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; | ||
2187 | 2384 | ||
2188 | /* Destination event lines are stored in the upper halfword */ | 2385 | /* Destination event lines are stored in the upper halfword */ |
2189 | if (!src) | 2386 | if (!src) |
@@ -2248,11 +2445,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2248 | 2445 | ||
2249 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 2446 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
2250 | d40c->lcpa = d40c->base->lcpa_base + | 2447 | d40c->lcpa = d40c->base->lcpa_base + |
2251 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; | 2448 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; |
2252 | else | 2449 | else |
2253 | d40c->lcpa = d40c->base->lcpa_base + | 2450 | d40c->lcpa = d40c->base->lcpa_base + |
2254 | d40c->dma_cfg.dst_dev_type * | 2451 | d40c->dma_cfg.dst_dev_type * |
2255 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2452 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2256 | } | 2453 | } |
2257 | 2454 | ||
2258 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | 2455 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", |
@@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
2287 | return; | 2484 | return; |
2288 | } | 2485 | } |
2289 | 2486 | ||
2290 | |||
2291 | spin_lock_irqsave(&d40c->lock, flags); | 2487 | spin_lock_irqsave(&d40c->lock, flags); |
2292 | 2488 | ||
2293 | err = d40_free_dma(d40c); | 2489 | err = d40_free_dma(d40c); |
@@ -2330,14 +2526,12 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
2330 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); | 2526 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); |
2331 | } | 2527 | } |
2332 | 2528 | ||
2333 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2529 | static struct dma_async_tx_descriptor * |
2334 | struct scatterlist *sgl, | 2530 | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
2335 | unsigned int sg_len, | 2531 | unsigned int sg_len, enum dma_transfer_direction direction, |
2336 | enum dma_transfer_direction direction, | 2532 | unsigned long dma_flags, void *context) |
2337 | unsigned long dma_flags, | ||
2338 | void *context) | ||
2339 | { | 2533 | { |
2340 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) | 2534 | if (!is_slave_direction(direction)) |
2341 | return NULL; | 2535 | return NULL; |
2342 | 2536 | ||
2343 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 2537 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
@@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2577 | return -EINVAL; | 2771 | return -EINVAL; |
2578 | } | 2772 | } |
2579 | 2773 | ||
2774 | if (src_maxburst > 16) { | ||
2775 | src_maxburst = 16; | ||
2776 | dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; | ||
2777 | } else if (dst_maxburst > 16) { | ||
2778 | dst_maxburst = 16; | ||
2779 | src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; | ||
2780 | } | ||
2781 | |||
2580 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, | 2782 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
2581 | src_addr_width, | 2783 | src_addr_width, |
2582 | src_maxburst); | 2784 | src_maxburst); |
@@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2659 | 2861 | ||
2660 | d40c->log_num = D40_PHY_CHAN; | 2862 | d40c->log_num = D40_PHY_CHAN; |
2661 | 2863 | ||
2864 | INIT_LIST_HEAD(&d40c->done); | ||
2662 | INIT_LIST_HEAD(&d40c->active); | 2865 | INIT_LIST_HEAD(&d40c->active); |
2663 | INIT_LIST_HEAD(&d40c->queue); | 2866 | INIT_LIST_HEAD(&d40c->queue); |
2664 | INIT_LIST_HEAD(&d40c->pending_queue); | 2867 | INIT_LIST_HEAD(&d40c->pending_queue); |
@@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev) | |||
2773 | struct platform_device *pdev = to_platform_device(dev); | 2976 | struct platform_device *pdev = to_platform_device(dev); |
2774 | struct d40_base *base = platform_get_drvdata(pdev); | 2977 | struct d40_base *base = platform_get_drvdata(pdev); |
2775 | int ret = 0; | 2978 | int ret = 0; |
2776 | if (!pm_runtime_suspended(dev)) | ||
2777 | return -EBUSY; | ||
2778 | 2979 | ||
2779 | if (base->lcpa_regulator) | 2980 | if (base->lcpa_regulator) |
2780 | ret = regulator_disable(base->lcpa_regulator); | 2981 | ret = regulator_disable(base->lcpa_regulator); |
@@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2882 | num_phy_chans_avail--; | 3083 | num_phy_chans_avail--; |
2883 | } | 3084 | } |
2884 | 3085 | ||
3086 | /* Mark soft_lli channels */ | ||
3087 | for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { | ||
3088 | int chan = base->plat_data->soft_lli_chans[i]; | ||
3089 | |||
3090 | base->phy_res[chan].use_soft_lli = true; | ||
3091 | } | ||
3092 | |||
2885 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | 3093 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
2886 | num_phy_chans_avail, base->num_phy_chans); | 3094 | num_phy_chans_avail, base->num_phy_chans); |
2887 | 3095 | ||
@@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2975 | * ? has revision 1 | 3183 | * ? has revision 1 |
2976 | * DB8500v1 has revision 2 | 3184 | * DB8500v1 has revision 2 |
2977 | * DB8500v2 has revision 3 | 3185 | * DB8500v2 has revision 3 |
3186 | * AP9540v1 has revision 4 | ||
3187 | * DB8540v1 has revision 4 | ||
2978 | */ | 3188 | */ |
2979 | rev = AMBA_REV_BITS(pid); | 3189 | rev = AMBA_REV_BITS(pid); |
2980 | 3190 | ||
3191 | plat_data = pdev->dev.platform_data; | ||
3192 | |||
2981 | /* The number of physical channels on this HW */ | 3193 | /* The number of physical channels on this HW */ |
2982 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 3194 | if (plat_data->num_of_phy_chans) |
3195 | num_phy_chans = plat_data->num_of_phy_chans; | ||
3196 | else | ||
3197 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | ||
2983 | 3198 | ||
2984 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 3199 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", |
2985 | rev, res->start); | 3200 | rev, res->start, num_phy_chans); |
2986 | 3201 | ||
2987 | if (rev < 2) { | 3202 | if (rev < 2) { |
2988 | d40_err(&pdev->dev, "hardware revision: %d is not supported", | 3203 | d40_err(&pdev->dev, "hardware revision: %d is not supported", |
@@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2990 | goto failure; | 3205 | goto failure; |
2991 | } | 3206 | } |
2992 | 3207 | ||
2993 | plat_data = pdev->dev.platform_data; | ||
2994 | |||
2995 | /* Count the number of logical channels in use */ | 3208 | /* Count the number of logical channels in use */ |
2996 | for (i = 0; i < plat_data->dev_len; i++) | 3209 | for (i = 0; i < plat_data->dev_len; i++) |
2997 | if (plat_data->dev_rx[i] != 0) | 3210 | if (plat_data->dev_rx[i] != 0) |
@@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
3022 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | 3235 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); |
3023 | base->log_chans = &base->phy_chans[num_phy_chans]; | 3236 | base->log_chans = &base->phy_chans[num_phy_chans]; |
3024 | 3237 | ||
3238 | if (base->plat_data->num_of_phy_chans == 14) { | ||
3239 | base->gen_dmac.backup = d40_backup_regs_v4b; | ||
3240 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; | ||
3241 | base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; | ||
3242 | base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; | ||
3243 | base->gen_dmac.realtime_en = D40_DREG_CRSEG1; | ||
3244 | base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; | ||
3245 | base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; | ||
3246 | base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; | ||
3247 | base->gen_dmac.il = il_v4b; | ||
3248 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); | ||
3249 | base->gen_dmac.init_reg = dma_init_reg_v4b; | ||
3250 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); | ||
3251 | } else { | ||
3252 | if (base->rev >= 3) { | ||
3253 | base->gen_dmac.backup = d40_backup_regs_v4a; | ||
3254 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; | ||
3255 | } | ||
3256 | base->gen_dmac.interrupt_en = D40_DREG_PCMIS; | ||
3257 | base->gen_dmac.interrupt_clear = D40_DREG_PCICR; | ||
3258 | base->gen_dmac.realtime_en = D40_DREG_RSEG1; | ||
3259 | base->gen_dmac.realtime_clear = D40_DREG_RCEG1; | ||
3260 | base->gen_dmac.high_prio_en = D40_DREG_PSEG1; | ||
3261 | base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; | ||
3262 | base->gen_dmac.il = il_v4a; | ||
3263 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); | ||
3264 | base->gen_dmac.init_reg = dma_init_reg_v4a; | ||
3265 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); | ||
3266 | } | ||
3267 | |||
3025 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), | 3268 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), |
3026 | GFP_KERNEL); | 3269 | GFP_KERNEL); |
3027 | if (!base->phy_res) | 3270 | if (!base->phy_res) |
@@ -3093,31 +3336,15 @@ failure: | |||
3093 | static void __init d40_hw_init(struct d40_base *base) | 3336 | static void __init d40_hw_init(struct d40_base *base) |
3094 | { | 3337 | { |
3095 | 3338 | ||
3096 | static struct d40_reg_val dma_init_reg[] = { | ||
3097 | /* Clock every part of the DMA block from start */ | ||
3098 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | ||
3099 | |||
3100 | /* Interrupts on all logical channels */ | ||
3101 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | ||
3102 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | ||
3103 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | ||
3104 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | ||
3105 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | ||
3106 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | ||
3107 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | ||
3108 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | ||
3109 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | ||
3110 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | ||
3111 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | ||
3112 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | ||
3113 | }; | ||
3114 | int i; | 3339 | int i; |
3115 | u32 prmseo[2] = {0, 0}; | 3340 | u32 prmseo[2] = {0, 0}; |
3116 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | 3341 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; |
3117 | u32 pcmis = 0; | 3342 | u32 pcmis = 0; |
3118 | u32 pcicr = 0; | 3343 | u32 pcicr = 0; |
3344 | struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; | ||
3345 | u32 reg_size = base->gen_dmac.init_reg_size; | ||
3119 | 3346 | ||
3120 | for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) | 3347 | for (i = 0; i < reg_size; i++) |
3121 | writel(dma_init_reg[i].val, | 3348 | writel(dma_init_reg[i].val, |
3122 | base->virtbase + dma_init_reg[i].reg); | 3349 | base->virtbase + dma_init_reg[i].reg); |
3123 | 3350 | ||
@@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base) | |||
3150 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | 3377 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); |
3151 | 3378 | ||
3152 | /* Write which interrupt to enable */ | 3379 | /* Write which interrupt to enable */ |
3153 | writel(pcmis, base->virtbase + D40_DREG_PCMIS); | 3380 | writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); |
3154 | 3381 | ||
3155 | /* Write which interrupt to clear */ | 3382 | /* Write which interrupt to clear */ |
3156 | writel(pcicr, base->virtbase + D40_DREG_PCICR); | 3383 | writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); |
3157 | 3384 | ||
3385 | /* These are __initdata and cannot be accessed after init */ | ||
3386 | base->gen_dmac.init_reg = NULL; | ||
3387 | base->gen_dmac.init_reg_size = 0; | ||
3158 | } | 3388 | } |
3159 | 3389 | ||
3160 | static int __init d40_lcla_allocate(struct d40_base *base) | 3390 | static int __init d40_lcla_allocate(struct d40_base *base) |
@@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3362 | if (err) | 3592 | if (err) |
3363 | goto failure; | 3593 | goto failure; |
3364 | 3594 | ||
3595 | base->dev->dma_parms = &base->dma_parms; | ||
3596 | err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); | ||
3597 | if (err) { | ||
3598 | d40_err(&pdev->dev, "Failed to set dma max seg size\n"); | ||
3599 | goto failure; | ||
3600 | } | ||
3601 | |||
3365 | d40_hw_init(base); | 3602 | d40_hw_init(base); |
3366 | 3603 | ||
3367 | dev_info(base->dev, "initialized\n"); | 3604 | dev_info(base->dev, "initialized\n"); |
@@ -3397,7 +3634,7 @@ failure: | |||
3397 | release_mem_region(base->phy_start, | 3634 | release_mem_region(base->phy_start, |
3398 | base->phy_size); | 3635 | base->phy_size); |
3399 | if (base->clk) { | 3636 | if (base->clk) { |
3400 | clk_disable(base->clk); | 3637 | clk_disable_unprepare(base->clk); |
3401 | clk_put(base->clk); | 3638 | clk_put(base->clk); |
3402 | } | 3639 | } |
3403 | 3640 | ||
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 851ad56e8409..7180e0d41722 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | |||
102 | src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; | 102 | src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; |
103 | dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; | 103 | dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; |
104 | 104 | ||
105 | /* Set the priority bit to high for the physical channel */ | ||
106 | if (cfg->high_priority) { | ||
107 | src |= 1 << D40_SREG_CFG_PRI_POS; | ||
108 | dst |= 1 << D40_SREG_CFG_PRI_POS; | ||
109 | } | ||
110 | |||
105 | } else { | 111 | } else { |
106 | /* Logical channel */ | 112 | /* Logical channel */ |
107 | dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; | 113 | dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; |
108 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; | 114 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; |
109 | } | 115 | } |
110 | 116 | ||
111 | if (cfg->high_priority) { | ||
112 | src |= 1 << D40_SREG_CFG_PRI_POS; | ||
113 | dst |= 1 << D40_SREG_CFG_PRI_POS; | ||
114 | } | ||
115 | |||
116 | if (cfg->src_info.big_endian) | 117 | if (cfg->src_info.big_endian) |
117 | src |= 1 << D40_SREG_CFG_LBE_POS; | 118 | src |= 1 << D40_SREG_CFG_LBE_POS; |
118 | if (cfg->dst_info.big_endian) | 119 | if (cfg->dst_info.big_endian) |
@@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, | |||
250 | 251 | ||
251 | return lli; | 252 | return lli; |
252 | 253 | ||
253 | err: | 254 | err: |
254 | return NULL; | 255 | return NULL; |
255 | } | 256 | } |
256 | 257 | ||
@@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | |||
331 | { | 332 | { |
332 | d40_log_lli_link(lli_dst, lli_src, next, flags); | 333 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
333 | 334 | ||
334 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); | 335 | writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0); |
335 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); | 336 | writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1); |
336 | writel(lli_dst->lcsp02, &lcpa[0].lcsp2); | 337 | writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2); |
337 | writel(lli_dst->lcsp13, &lcpa[0].lcsp3); | 338 | writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3); |
338 | } | 339 | } |
339 | 340 | ||
340 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 341 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
@@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | |||
344 | { | 345 | { |
345 | d40_log_lli_link(lli_dst, lli_src, next, flags); | 346 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
346 | 347 | ||
347 | writel(lli_src->lcsp02, &lcla[0].lcsp02); | 348 | writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02); |
348 | writel(lli_src->lcsp13, &lcla[0].lcsp13); | 349 | writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13); |
349 | writel(lli_dst->lcsp02, &lcla[1].lcsp02); | 350 | writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02); |
350 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); | 351 | writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13); |
351 | } | 352 | } |
352 | 353 | ||
353 | static void d40_log_fill_lli(struct d40_log_lli *lli, | 354 | static void d40_log_fill_lli(struct d40_log_lli *lli, |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 6d47373f3f58..fdde8ef77542 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -125,7 +125,7 @@ | |||
125 | #define D40_DREG_GCC 0x000 | 125 | #define D40_DREG_GCC 0x000 |
126 | #define D40_DREG_GCC_ENA 0x1 | 126 | #define D40_DREG_GCC_ENA 0x1 |
127 | /* This assumes that there are only 4 event groups */ | 127 | /* This assumes that there are only 4 event groups */ |
128 | #define D40_DREG_GCC_ENABLE_ALL 0xff01 | 128 | #define D40_DREG_GCC_ENABLE_ALL 0x3ff01 |
129 | #define D40_DREG_GCC_EVTGRP_POS 8 | 129 | #define D40_DREG_GCC_EVTGRP_POS 8 |
130 | #define D40_DREG_GCC_SRC 0 | 130 | #define D40_DREG_GCC_SRC 0 |
131 | #define D40_DREG_GCC_DST 1 | 131 | #define D40_DREG_GCC_DST 1 |
@@ -148,14 +148,31 @@ | |||
148 | 148 | ||
149 | #define D40_DREG_LCPA 0x020 | 149 | #define D40_DREG_LCPA 0x020 |
150 | #define D40_DREG_LCLA 0x024 | 150 | #define D40_DREG_LCLA 0x024 |
151 | |||
152 | #define D40_DREG_SSEG1 0x030 | ||
153 | #define D40_DREG_SSEG2 0x034 | ||
154 | #define D40_DREG_SSEG3 0x038 | ||
155 | #define D40_DREG_SSEG4 0x03C | ||
156 | |||
157 | #define D40_DREG_SCEG1 0x040 | ||
158 | #define D40_DREG_SCEG2 0x044 | ||
159 | #define D40_DREG_SCEG3 0x048 | ||
160 | #define D40_DREG_SCEG4 0x04C | ||
161 | |||
151 | #define D40_DREG_ACTIVE 0x050 | 162 | #define D40_DREG_ACTIVE 0x050 |
152 | #define D40_DREG_ACTIVO 0x054 | 163 | #define D40_DREG_ACTIVO 0x054 |
153 | #define D40_DREG_FSEB1 0x058 | 164 | #define D40_DREG_CIDMOD 0x058 |
154 | #define D40_DREG_FSEB2 0x05C | 165 | #define D40_DREG_TCIDV 0x05C |
155 | #define D40_DREG_PCMIS 0x060 | 166 | #define D40_DREG_PCMIS 0x060 |
156 | #define D40_DREG_PCICR 0x064 | 167 | #define D40_DREG_PCICR 0x064 |
157 | #define D40_DREG_PCTIS 0x068 | 168 | #define D40_DREG_PCTIS 0x068 |
158 | #define D40_DREG_PCEIS 0x06C | 169 | #define D40_DREG_PCEIS 0x06C |
170 | |||
171 | #define D40_DREG_SPCMIS 0x070 | ||
172 | #define D40_DREG_SPCICR 0x074 | ||
173 | #define D40_DREG_SPCTIS 0x078 | ||
174 | #define D40_DREG_SPCEIS 0x07C | ||
175 | |||
159 | #define D40_DREG_LCMIS0 0x080 | 176 | #define D40_DREG_LCMIS0 0x080 |
160 | #define D40_DREG_LCMIS1 0x084 | 177 | #define D40_DREG_LCMIS1 0x084 |
161 | #define D40_DREG_LCMIS2 0x088 | 178 | #define D40_DREG_LCMIS2 0x088 |
@@ -172,6 +189,33 @@ | |||
172 | #define D40_DREG_LCEIS1 0x0B4 | 189 | #define D40_DREG_LCEIS1 0x0B4 |
173 | #define D40_DREG_LCEIS2 0x0B8 | 190 | #define D40_DREG_LCEIS2 0x0B8 |
174 | #define D40_DREG_LCEIS3 0x0BC | 191 | #define D40_DREG_LCEIS3 0x0BC |
192 | |||
193 | #define D40_DREG_SLCMIS1 0x0C0 | ||
194 | #define D40_DREG_SLCMIS2 0x0C4 | ||
195 | #define D40_DREG_SLCMIS3 0x0C8 | ||
196 | #define D40_DREG_SLCMIS4 0x0CC | ||
197 | |||
198 | #define D40_DREG_SLCICR1 0x0D0 | ||
199 | #define D40_DREG_SLCICR2 0x0D4 | ||
200 | #define D40_DREG_SLCICR3 0x0D8 | ||
201 | #define D40_DREG_SLCICR4 0x0DC | ||
202 | |||
203 | #define D40_DREG_SLCTIS1 0x0E0 | ||
204 | #define D40_DREG_SLCTIS2 0x0E4 | ||
205 | #define D40_DREG_SLCTIS3 0x0E8 | ||
206 | #define D40_DREG_SLCTIS4 0x0EC | ||
207 | |||
208 | #define D40_DREG_SLCEIS1 0x0F0 | ||
209 | #define D40_DREG_SLCEIS2 0x0F4 | ||
210 | #define D40_DREG_SLCEIS3 0x0F8 | ||
211 | #define D40_DREG_SLCEIS4 0x0FC | ||
212 | |||
213 | #define D40_DREG_FSESS1 0x100 | ||
214 | #define D40_DREG_FSESS2 0x104 | ||
215 | |||
216 | #define D40_DREG_FSEBS1 0x108 | ||
217 | #define D40_DREG_FSEBS2 0x10C | ||
218 | |||
175 | #define D40_DREG_PSEG1 0x110 | 219 | #define D40_DREG_PSEG1 0x110 |
176 | #define D40_DREG_PSEG2 0x114 | 220 | #define D40_DREG_PSEG2 0x114 |
177 | #define D40_DREG_PSEG3 0x118 | 221 | #define D40_DREG_PSEG3 0x118 |
@@ -188,6 +232,86 @@ | |||
188 | #define D40_DREG_RCEG2 0x144 | 232 | #define D40_DREG_RCEG2 0x144 |
189 | #define D40_DREG_RCEG3 0x148 | 233 | #define D40_DREG_RCEG3 0x148 |
190 | #define D40_DREG_RCEG4 0x14C | 234 | #define D40_DREG_RCEG4 0x14C |
235 | |||
236 | #define D40_DREG_PREFOT 0x15C | ||
237 | #define D40_DREG_EXTCFG 0x160 | ||
238 | |||
239 | #define D40_DREG_CPSEG1 0x200 | ||
240 | #define D40_DREG_CPSEG2 0x204 | ||
241 | #define D40_DREG_CPSEG3 0x208 | ||
242 | #define D40_DREG_CPSEG4 0x20C | ||
243 | #define D40_DREG_CPSEG5 0x210 | ||
244 | |||
245 | #define D40_DREG_CPCEG1 0x220 | ||
246 | #define D40_DREG_CPCEG2 0x224 | ||
247 | #define D40_DREG_CPCEG3 0x228 | ||
248 | #define D40_DREG_CPCEG4 0x22C | ||
249 | #define D40_DREG_CPCEG5 0x230 | ||
250 | |||
251 | #define D40_DREG_CRSEG1 0x240 | ||
252 | #define D40_DREG_CRSEG2 0x244 | ||
253 | #define D40_DREG_CRSEG3 0x248 | ||
254 | #define D40_DREG_CRSEG4 0x24C | ||
255 | #define D40_DREG_CRSEG5 0x250 | ||
256 | |||
257 | #define D40_DREG_CRCEG1 0x260 | ||
258 | #define D40_DREG_CRCEG2 0x264 | ||
259 | #define D40_DREG_CRCEG3 0x268 | ||
260 | #define D40_DREG_CRCEG4 0x26C | ||
261 | #define D40_DREG_CRCEG5 0x270 | ||
262 | |||
263 | #define D40_DREG_CFSESS1 0x280 | ||
264 | #define D40_DREG_CFSESS2 0x284 | ||
265 | #define D40_DREG_CFSESS3 0x288 | ||
266 | |||
267 | #define D40_DREG_CFSEBS1 0x290 | ||
268 | #define D40_DREG_CFSEBS2 0x294 | ||
269 | #define D40_DREG_CFSEBS3 0x298 | ||
270 | |||
271 | #define D40_DREG_CLCMIS1 0x300 | ||
272 | #define D40_DREG_CLCMIS2 0x304 | ||
273 | #define D40_DREG_CLCMIS3 0x308 | ||
274 | #define D40_DREG_CLCMIS4 0x30C | ||
275 | #define D40_DREG_CLCMIS5 0x310 | ||
276 | |||
277 | #define D40_DREG_CLCICR1 0x320 | ||
278 | #define D40_DREG_CLCICR2 0x324 | ||
279 | #define D40_DREG_CLCICR3 0x328 | ||
280 | #define D40_DREG_CLCICR4 0x32C | ||
281 | #define D40_DREG_CLCICR5 0x330 | ||
282 | |||
283 | #define D40_DREG_CLCTIS1 0x340 | ||
284 | #define D40_DREG_CLCTIS2 0x344 | ||
285 | #define D40_DREG_CLCTIS3 0x348 | ||
286 | #define D40_DREG_CLCTIS4 0x34C | ||
287 | #define D40_DREG_CLCTIS5 0x350 | ||
288 | |||
289 | #define D40_DREG_CLCEIS1 0x360 | ||
290 | #define D40_DREG_CLCEIS2 0x364 | ||
291 | #define D40_DREG_CLCEIS3 0x368 | ||
292 | #define D40_DREG_CLCEIS4 0x36C | ||
293 | #define D40_DREG_CLCEIS5 0x370 | ||
294 | |||
295 | #define D40_DREG_CPCMIS 0x380 | ||
296 | #define D40_DREG_CPCICR 0x384 | ||
297 | #define D40_DREG_CPCTIS 0x388 | ||
298 | #define D40_DREG_CPCEIS 0x38C | ||
299 | |||
300 | #define D40_DREG_SCCIDA1 0xE80 | ||
301 | #define D40_DREG_SCCIDA2 0xE90 | ||
302 | #define D40_DREG_SCCIDA3 0xEA0 | ||
303 | #define D40_DREG_SCCIDA4 0xEB0 | ||
304 | #define D40_DREG_SCCIDA5 0xEC0 | ||
305 | |||
306 | #define D40_DREG_SCCIDB1 0xE84 | ||
307 | #define D40_DREG_SCCIDB2 0xE94 | ||
308 | #define D40_DREG_SCCIDB3 0xEA4 | ||
309 | #define D40_DREG_SCCIDB4 0xEB4 | ||
310 | #define D40_DREG_SCCIDB5 0xEC4 | ||
311 | |||
312 | #define D40_DREG_PRSCCIDA 0xF80 | ||
313 | #define D40_DREG_PRSCCIDB 0xF84 | ||
314 | |||
191 | #define D40_DREG_STFU 0xFC8 | 315 | #define D40_DREG_STFU 0xFC8 |
192 | #define D40_DREG_ICFG 0xFCC | 316 | #define D40_DREG_ICFG 0xFCC |
193 | #define D40_DREG_PERIPHID0 0xFE0 | 317 | #define D40_DREG_PERIPHID0 0xFE0 |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index f6c018f1b453..fcee27eae1f6 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -63,6 +63,9 @@ | |||
63 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 | 63 | #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 |
64 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC | 64 | #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC |
65 | 65 | ||
66 | #define TEGRA_APBDMA_CHAN_CSRE 0x00C | ||
67 | #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31) | ||
68 | |||
66 | /* AHB memory address */ | 69 | /* AHB memory address */ |
67 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 | 70 | #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 |
68 | 71 | ||
@@ -113,10 +116,12 @@ struct tegra_dma; | |||
113 | * tegra_dma_chip_data Tegra chip specific DMA data | 116 | * tegra_dma_chip_data Tegra chip specific DMA data |
114 | * @nr_channels: Number of channels available in the controller. | 117 | * @nr_channels: Number of channels available in the controller. |
115 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. | 118 | * @max_dma_count: Maximum DMA transfer count supported by DMA controller. |
119 | * @support_channel_pause: Support channel wise pause of dma. | ||
116 | */ | 120 | */ |
117 | struct tegra_dma_chip_data { | 121 | struct tegra_dma_chip_data { |
118 | int nr_channels; | 122 | int nr_channels; |
119 | int max_dma_count; | 123 | int max_dma_count; |
124 | bool support_channel_pause; | ||
120 | }; | 125 | }; |
121 | 126 | ||
122 | /* DMA channel registers */ | 127 | /* DMA channel registers */ |
@@ -355,6 +360,32 @@ static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) | |||
355 | spin_unlock(&tdma->global_lock); | 360 | spin_unlock(&tdma->global_lock); |
356 | } | 361 | } |
357 | 362 | ||
363 | static void tegra_dma_pause(struct tegra_dma_channel *tdc, | ||
364 | bool wait_for_burst_complete) | ||
365 | { | ||
366 | struct tegra_dma *tdma = tdc->tdma; | ||
367 | |||
368 | if (tdma->chip_data->support_channel_pause) { | ||
369 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, | ||
370 | TEGRA_APBDMA_CHAN_CSRE_PAUSE); | ||
371 | if (wait_for_burst_complete) | ||
372 | udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); | ||
373 | } else { | ||
374 | tegra_dma_global_pause(tdc, wait_for_burst_complete); | ||
375 | } | ||
376 | } | ||
377 | |||
378 | static void tegra_dma_resume(struct tegra_dma_channel *tdc) | ||
379 | { | ||
380 | struct tegra_dma *tdma = tdc->tdma; | ||
381 | |||
382 | if (tdma->chip_data->support_channel_pause) { | ||
383 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0); | ||
384 | } else { | ||
385 | tegra_dma_global_resume(tdc); | ||
386 | } | ||
387 | } | ||
388 | |||
358 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) | 389 | static void tegra_dma_stop(struct tegra_dma_channel *tdc) |
359 | { | 390 | { |
360 | u32 csr; | 391 | u32 csr; |
@@ -410,7 +441,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | |||
410 | * If there is already IEC status then interrupt handler need to | 441 | * If there is already IEC status then interrupt handler need to |
411 | * load new configuration. | 442 | * load new configuration. |
412 | */ | 443 | */ |
413 | tegra_dma_global_pause(tdc, false); | 444 | tegra_dma_pause(tdc, false); |
414 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | 445 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
415 | 446 | ||
416 | /* | 447 | /* |
@@ -420,7 +451,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | |||
420 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | 451 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
421 | dev_err(tdc2dev(tdc), | 452 | dev_err(tdc2dev(tdc), |
422 | "Skipping new configuration as interrupt is pending\n"); | 453 | "Skipping new configuration as interrupt is pending\n"); |
423 | tegra_dma_global_resume(tdc); | 454 | tegra_dma_resume(tdc); |
424 | return; | 455 | return; |
425 | } | 456 | } |
426 | 457 | ||
@@ -431,7 +462,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, | |||
431 | nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); | 462 | nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); |
432 | nsg_req->configured = true; | 463 | nsg_req->configured = true; |
433 | 464 | ||
434 | tegra_dma_global_resume(tdc); | 465 | tegra_dma_resume(tdc); |
435 | } | 466 | } |
436 | 467 | ||
437 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) | 468 | static void tdc_start_head_req(struct tegra_dma_channel *tdc) |
@@ -692,7 +723,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) | |||
692 | goto skip_dma_stop; | 723 | goto skip_dma_stop; |
693 | 724 | ||
694 | /* Pause DMA before checking the queue status */ | 725 | /* Pause DMA before checking the queue status */ |
695 | tegra_dma_global_pause(tdc, true); | 726 | tegra_dma_pause(tdc, true); |
696 | 727 | ||
697 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | 728 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
698 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | 729 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
@@ -710,7 +741,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) | |||
710 | sgreq->dma_desc->bytes_transferred += | 741 | sgreq->dma_desc->bytes_transferred += |
711 | get_current_xferred_count(tdc, sgreq, status); | 742 | get_current_xferred_count(tdc, sgreq, status); |
712 | } | 743 | } |
713 | tegra_dma_global_resume(tdc); | 744 | tegra_dma_resume(tdc); |
714 | 745 | ||
715 | skip_dma_stop: | 746 | skip_dma_stop: |
716 | tegra_dma_abort_all(tdc); | 747 | tegra_dma_abort_all(tdc); |
@@ -738,7 +769,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | |||
738 | 769 | ||
739 | ret = dma_cookie_status(dc, cookie, txstate); | 770 | ret = dma_cookie_status(dc, cookie, txstate); |
740 | if (ret == DMA_SUCCESS) { | 771 | if (ret == DMA_SUCCESS) { |
741 | dma_set_residue(txstate, 0); | ||
742 | spin_unlock_irqrestore(&tdc->lock, flags); | 772 | spin_unlock_irqrestore(&tdc->lock, flags); |
743 | return ret; | 773 | return ret; |
744 | } | 774 | } |
@@ -1180,6 +1210,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) | |||
1180 | static const struct tegra_dma_chip_data tegra20_dma_chip_data = { | 1210 | static const struct tegra_dma_chip_data tegra20_dma_chip_data = { |
1181 | .nr_channels = 16, | 1211 | .nr_channels = 16, |
1182 | .max_dma_count = 1024UL * 64, | 1212 | .max_dma_count = 1024UL * 64, |
1213 | .support_channel_pause = false, | ||
1183 | }; | 1214 | }; |
1184 | 1215 | ||
1185 | #if defined(CONFIG_OF) | 1216 | #if defined(CONFIG_OF) |
@@ -1187,10 +1218,22 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { | |||
1187 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { | 1218 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { |
1188 | .nr_channels = 32, | 1219 | .nr_channels = 32, |
1189 | .max_dma_count = 1024UL * 64, | 1220 | .max_dma_count = 1024UL * 64, |
1221 | .support_channel_pause = false, | ||
1190 | }; | 1222 | }; |
1191 | 1223 | ||
1224 | /* Tegra114 specific DMA controller information */ | ||
1225 | static const struct tegra_dma_chip_data tegra114_dma_chip_data = { | ||
1226 | .nr_channels = 32, | ||
1227 | .max_dma_count = 1024UL * 64, | ||
1228 | .support_channel_pause = true, | ||
1229 | }; | ||
1230 | |||
1231 | |||
1192 | static const struct of_device_id tegra_dma_of_match[] = { | 1232 | static const struct of_device_id tegra_dma_of_match[] = { |
1193 | { | 1233 | { |
1234 | .compatible = "nvidia,tegra114-apbdma", | ||
1235 | .data = &tegra114_dma_chip_data, | ||
1236 | }, { | ||
1194 | .compatible = "nvidia,tegra30-apbdma", | 1237 | .compatible = "nvidia,tegra30-apbdma", |
1195 | .data = &tegra30_dma_chip_data, | 1238 | .data = &tegra30_dma_chip_data, |
1196 | }, { | 1239 | }, { |
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c index eaddfe9db149..736c7714f565 100644 --- a/drivers/misc/carma/carma-fpga-program.c +++ b/drivers/misc/carma/carma-fpga-program.c | |||
@@ -546,7 +546,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv) | |||
546 | goto out_dma_unmap; | 546 | goto out_dma_unmap; |
547 | } | 547 | } |
548 | 548 | ||
549 | dma_async_memcpy_issue_pending(chan); | 549 | dma_async_issue_pending(chan); |
550 | 550 | ||
551 | /* Set the total byte count */ | 551 | /* Set the total byte count */ |
552 | fpga_set_byte_count(priv->regs, priv->bytes); | 552 | fpga_set_byte_count(priv->regs, priv->bytes); |
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 8835eabb3b87..7508cafff103 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c | |||
@@ -631,6 +631,8 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) | |||
631 | struct dma_async_tx_descriptor *tx; | 631 | struct dma_async_tx_descriptor *tx; |
632 | dma_cookie_t cookie; | 632 | dma_cookie_t cookie; |
633 | dma_addr_t dst, src; | 633 | dma_addr_t dst, src; |
634 | unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | | ||
635 | DMA_COMPL_SKIP_SRC_UNMAP; | ||
634 | 636 | ||
635 | dst_sg = buf->vb.sglist; | 637 | dst_sg = buf->vb.sglist; |
636 | dst_nents = buf->vb.sglen; | 638 | dst_nents = buf->vb.sglen; |
@@ -666,7 +668,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) | |||
666 | src = SYS_FPGA_BLOCK; | 668 | src = SYS_FPGA_BLOCK; |
667 | tx = chan->device->device_prep_dma_memcpy(chan, dst, src, | 669 | tx = chan->device->device_prep_dma_memcpy(chan, dst, src, |
668 | REG_BLOCK_SIZE, | 670 | REG_BLOCK_SIZE, |
669 | 0); | 671 | dma_flags); |
670 | if (!tx) { | 672 | if (!tx) { |
671 | dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n"); | 673 | dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n"); |
672 | return -ENOMEM; | 674 | return -ENOMEM; |
@@ -749,7 +751,7 @@ static irqreturn_t data_irq(int irq, void *dev_id) | |||
749 | submitted = true; | 751 | submitted = true; |
750 | 752 | ||
751 | /* Start the DMA Engine */ | 753 | /* Start the DMA Engine */ |
752 | dma_async_memcpy_issue_pending(priv->chan); | 754 | dma_async_issue_pending(priv->chan); |
753 | 755 | ||
754 | out: | 756 | out: |
755 | /* If no DMA was submitted, re-enable interrupts */ | 757 | /* If no DMA was submitted, re-enable interrupts */ |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 09af555408b7..05ba3f0c2d19 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
@@ -573,23 +573,22 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
573 | dma_dev = chan->device; | 573 | dma_dev = chan->device; |
574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); | 574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); |
575 | 575 | ||
576 | flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; | ||
577 | |||
576 | if (direction == DMA_TO_DEVICE) { | 578 | if (direction == DMA_TO_DEVICE) { |
577 | dma_src = dma_addr; | 579 | dma_src = dma_addr; |
578 | dma_dst = host->data_pa; | 580 | dma_dst = host->data_pa; |
579 | flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP; | ||
580 | } else { | 581 | } else { |
581 | dma_src = host->data_pa; | 582 | dma_src = host->data_pa; |
582 | dma_dst = dma_addr; | 583 | dma_dst = dma_addr; |
583 | flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP; | ||
584 | } | 584 | } |
585 | 585 | ||
586 | tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, | 586 | tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, |
587 | len, flags); | 587 | len, flags); |
588 | |||
589 | if (!tx) { | 588 | if (!tx) { |
590 | dev_err(host->dev, "device_prep_dma_memcpy error\n"); | 589 | dev_err(host->dev, "device_prep_dma_memcpy error\n"); |
591 | dma_unmap_single(dma_dev->dev, dma_addr, len, direction); | 590 | ret = -EIO; |
592 | return -EIO; | 591 | goto unmap_dma; |
593 | } | 592 | } |
594 | 593 | ||
595 | tx->callback = dma_complete; | 594 | tx->callback = dma_complete; |
@@ -599,7 +598,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
599 | ret = dma_submit_error(cookie); | 598 | ret = dma_submit_error(cookie); |
600 | if (ret) { | 599 | if (ret) { |
601 | dev_err(host->dev, "dma_submit_error %d\n", cookie); | 600 | dev_err(host->dev, "dma_submit_error %d\n", cookie); |
602 | return ret; | 601 | goto unmap_dma; |
603 | } | 602 | } |
604 | 603 | ||
605 | dma_async_issue_pending(chan); | 604 | dma_async_issue_pending(chan); |
@@ -610,10 +609,17 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
610 | if (ret <= 0) { | 609 | if (ret <= 0) { |
611 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 610 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
612 | dev_err(host->dev, "wait_for_completion_timeout\n"); | 611 | dev_err(host->dev, "wait_for_completion_timeout\n"); |
613 | return ret ? ret : -ETIMEDOUT; | 612 | if (!ret) |
613 | ret = -ETIMEDOUT; | ||
614 | goto unmap_dma; | ||
614 | } | 615 | } |
615 | 616 | ||
616 | return 0; | 617 | ret = 0; |
618 | |||
619 | unmap_dma: | ||
620 | dma_unmap_single(dma_dev->dev, dma_addr, len, direction); | ||
621 | |||
622 | return ret; | ||
617 | } | 623 | } |
618 | 624 | ||
619 | /* | 625 | /* |
diff --git a/arch/arm/include/asm/hardware/pl080.h b/include/linux/amba/pl080.h index 4eea2107214b..3e7b62fbefbd 100644 --- a/arch/arm/include/asm/hardware/pl080.h +++ b/include/linux/amba/pl080.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* arch/arm/include/asm/hardware/pl080.h | 1 | /* include/linux/amba/pl080.h |
2 | * | 2 | * |
3 | * Copyright 2008 Openmoko, Inc. | 3 | * Copyright 2008 Openmoko, Inc. |
4 | * Copyright 2008 Simtec Electronics | 4 | * Copyright 2008 Simtec Electronics |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d3201e438d16..f5939999cb65 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -608,7 +608,10 @@ static inline int dmaengine_device_control(struct dma_chan *chan, | |||
608 | enum dma_ctrl_cmd cmd, | 608 | enum dma_ctrl_cmd cmd, |
609 | unsigned long arg) | 609 | unsigned long arg) |
610 | { | 610 | { |
611 | return chan->device->device_control(chan, cmd, arg); | 611 | if (chan->device->device_control) |
612 | return chan->device->device_control(chan, cmd, arg); | ||
613 | |||
614 | return -ENOSYS; | ||
612 | } | 615 | } |
613 | 616 | ||
614 | static inline int dmaengine_slave_config(struct dma_chan *chan, | 617 | static inline int dmaengine_slave_config(struct dma_chan *chan, |
@@ -618,6 +621,11 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, | |||
618 | (unsigned long)config); | 621 | (unsigned long)config); |
619 | } | 622 | } |
620 | 623 | ||
624 | static inline bool is_slave_direction(enum dma_transfer_direction direction) | ||
625 | { | ||
626 | return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); | ||
627 | } | ||
628 | |||
621 | static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( | 629 | static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( |
622 | struct dma_chan *chan, dma_addr_t buf, size_t len, | 630 | struct dma_chan *chan, dma_addr_t buf, size_t len, |
623 | enum dma_transfer_direction dir, unsigned long flags) | 631 | enum dma_transfer_direction dir, unsigned long flags) |
@@ -660,6 +668,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( | |||
660 | period_len, dir, flags, NULL); | 668 | period_len, dir, flags, NULL); |
661 | } | 669 | } |
662 | 670 | ||
671 | static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( | ||
672 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
673 | unsigned long flags) | ||
674 | { | ||
675 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); | ||
676 | } | ||
677 | |||
663 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | 678 | static inline int dmaengine_terminate_all(struct dma_chan *chan) |
664 | { | 679 | { |
665 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | 680 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); |
@@ -849,20 +864,6 @@ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) | |||
849 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; | 864 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; |
850 | } | 865 | } |
851 | 866 | ||
852 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | ||
853 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) | ||
854 | { | ||
855 | return min_t(int, DMA_TX_TYPE_END, | ||
856 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); | ||
857 | } | ||
858 | |||
859 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) | ||
860 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) | ||
861 | { | ||
862 | return min_t(int, DMA_TX_TYPE_END, | ||
863 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | ||
864 | } | ||
865 | |||
866 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) | 867 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) |
867 | static inline void | 868 | static inline void |
868 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 869 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) |
@@ -891,9 +892,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | |||
891 | } | 892 | } |
892 | 893 | ||
893 | #define for_each_dma_cap_mask(cap, mask) \ | 894 | #define for_each_dma_cap_mask(cap, mask) \ |
894 | for ((cap) = first_dma_cap(mask); \ | 895 | for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) |
895 | (cap) < DMA_TX_TYPE_END; \ | ||
896 | (cap) = next_dma_cap((cap), (mask))) | ||
897 | 896 | ||
898 | /** | 897 | /** |
899 | * dma_async_issue_pending - flush pending transactions to HW | 898 | * dma_async_issue_pending - flush pending transactions to HW |
@@ -907,8 +906,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) | |||
907 | chan->device->device_issue_pending(chan); | 906 | chan->device->device_issue_pending(chan); |
908 | } | 907 | } |
909 | 908 | ||
910 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | ||
911 | |||
912 | /** | 909 | /** |
913 | * dma_async_is_tx_complete - poll for transaction completion | 910 | * dma_async_is_tx_complete - poll for transaction completion |
914 | * @chan: DMA channel | 911 | * @chan: DMA channel |
@@ -934,16 +931,13 @@ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | |||
934 | return status; | 931 | return status; |
935 | } | 932 | } |
936 | 933 | ||
937 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | ||
938 | dma_async_is_tx_complete(chan, cookie, last, used) | ||
939 | |||
940 | /** | 934 | /** |
941 | * dma_async_is_complete - test a cookie against chan state | 935 | * dma_async_is_complete - test a cookie against chan state |
942 | * @cookie: transaction identifier to test status of | 936 | * @cookie: transaction identifier to test status of |
943 | * @last_complete: last know completed transaction | 937 | * @last_complete: last know completed transaction |
944 | * @last_used: last cookie value handed out | 938 | * @last_used: last cookie value handed out |
945 | * | 939 | * |
946 | * dma_async_is_complete() is used in dma_async_memcpy_complete() | 940 | * dma_async_is_complete() is used in dma_async_is_tx_complete() |
947 | * the test logic is separated for lightweight testing of multiple cookies | 941 | * the test logic is separated for lightweight testing of multiple cookies |
948 | */ | 942 | */ |
949 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | 943 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, |
@@ -974,6 +968,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | |||
974 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 968 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
975 | void dma_issue_pending_all(void); | 969 | void dma_issue_pending_all(void); |
976 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | 970 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); |
971 | struct dma_chan *dma_request_slave_channel(struct device *dev, char *name); | ||
977 | void dma_release_channel(struct dma_chan *chan); | 972 | void dma_release_channel(struct dma_chan *chan); |
978 | #else | 973 | #else |
979 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 974 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
@@ -988,6 +983,11 @@ static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, | |||
988 | { | 983 | { |
989 | return NULL; | 984 | return NULL; |
990 | } | 985 | } |
986 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, | ||
987 | char *name) | ||
988 | { | ||
989 | return NULL; | ||
990 | } | ||
991 | static inline void dma_release_channel(struct dma_chan *chan) | 991 | static inline void dma_release_channel(struct dma_chan *chan) |
992 | { | 992 | { |
993 | } | 993 | } |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index e1c8c9e919ac..41766de66e33 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -15,14 +15,38 @@ | |||
15 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
16 | 16 | ||
17 | /** | 17 | /** |
18 | * struct dw_dma_slave - Controller-specific information about a slave | ||
19 | * | ||
20 | * @dma_dev: required DMA master device. Depricated. | ||
21 | * @bus_id: name of this device channel, not just a device name since | ||
22 | * devices may have more than one channel e.g. "foo_tx" | ||
23 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | ||
24 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | ||
25 | * @src_master: src master for transfers on allocated channel. | ||
26 | * @dst_master: dest master for transfers on allocated channel. | ||
27 | */ | ||
28 | struct dw_dma_slave { | ||
29 | struct device *dma_dev; | ||
30 | const char *bus_id; | ||
31 | u32 cfg_hi; | ||
32 | u32 cfg_lo; | ||
33 | u8 src_master; | ||
34 | u8 dst_master; | ||
35 | }; | ||
36 | |||
37 | /** | ||
18 | * struct dw_dma_platform_data - Controller configuration parameters | 38 | * struct dw_dma_platform_data - Controller configuration parameters |
19 | * @nr_channels: Number of channels supported by hardware (max 8) | 39 | * @nr_channels: Number of channels supported by hardware (max 8) |
20 | * @is_private: The device channels should be marked as private and not for | 40 | * @is_private: The device channels should be marked as private and not for |
21 | * by the general purpose DMA channel allocator. | 41 | * by the general purpose DMA channel allocator. |
42 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | ||
43 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | ||
22 | * @block_size: Maximum block size supported by the controller | 44 | * @block_size: Maximum block size supported by the controller |
23 | * @nr_masters: Number of AHB masters supported by the controller | 45 | * @nr_masters: Number of AHB masters supported by the controller |
24 | * @data_width: Maximum data width supported by hardware per AHB master | 46 | * @data_width: Maximum data width supported by hardware per AHB master |
25 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | 47 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) |
48 | * @sd: slave specific data. Used for configuring channels | ||
49 | * @sd_count: count of slave data structures passed. | ||
26 | */ | 50 | */ |
27 | struct dw_dma_platform_data { | 51 | struct dw_dma_platform_data { |
28 | unsigned int nr_channels; | 52 | unsigned int nr_channels; |
@@ -36,6 +60,9 @@ struct dw_dma_platform_data { | |||
36 | unsigned short block_size; | 60 | unsigned short block_size; |
37 | unsigned char nr_masters; | 61 | unsigned char nr_masters; |
38 | unsigned char data_width[4]; | 62 | unsigned char data_width[4]; |
63 | |||
64 | struct dw_dma_slave *sd; | ||
65 | unsigned int sd_count; | ||
39 | }; | 66 | }; |
40 | 67 | ||
41 | /* bursts size */ | 68 | /* bursts size */ |
@@ -50,23 +77,6 @@ enum dw_dma_msize { | |||
50 | DW_DMA_MSIZE_256, | 77 | DW_DMA_MSIZE_256, |
51 | }; | 78 | }; |
52 | 79 | ||
53 | /** | ||
54 | * struct dw_dma_slave - Controller-specific information about a slave | ||
55 | * | ||
56 | * @dma_dev: required DMA master device | ||
57 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | ||
58 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | ||
59 | * @src_master: src master for transfers on allocated channel. | ||
60 | * @dst_master: dest master for transfers on allocated channel. | ||
61 | */ | ||
62 | struct dw_dma_slave { | ||
63 | struct device *dma_dev; | ||
64 | u32 cfg_hi; | ||
65 | u32 cfg_lo; | ||
66 | u8 src_master; | ||
67 | u8 dst_master; | ||
68 | }; | ||
69 | |||
70 | /* Platform-configurable bits in CFG_HI */ | 80 | /* Platform-configurable bits in CFG_HI */ |
71 | #define DWC_CFGH_FCMODE (1 << 0) | 81 | #define DWC_CFGH_FCMODE (1 << 0) |
72 | #define DWC_CFGH_FIFO_MODE (1 << 1) | 82 | #define DWC_CFGH_FIFO_MODE (1 << 1) |
@@ -104,5 +114,6 @@ void dw_dma_cyclic_stop(struct dma_chan *chan); | |||
104 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); | 114 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); |
105 | 115 | ||
106 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); | 116 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); |
117 | bool dw_dma_generic_filter(struct dma_chan *chan, void *param); | ||
107 | 118 | ||
108 | #endif /* DW_DMAC_H */ | 119 | #endif /* DW_DMAC_H */ |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h new file mode 100644 index 000000000000..d15073e080dd --- /dev/null +++ b/include/linux/of_dma.h | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * OF helpers for DMA request / controller | ||
3 | * | ||
4 | * Based on of_gpio.h | ||
5 | * | ||
6 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef __LINUX_OF_DMA_H | ||
14 | #define __LINUX_OF_DMA_H | ||
15 | |||
16 | #include <linux/of.h> | ||
17 | #include <linux/dmaengine.h> | ||
18 | |||
19 | struct device_node; | ||
20 | |||
21 | struct of_dma { | ||
22 | struct list_head of_dma_controllers; | ||
23 | struct device_node *of_node; | ||
24 | int of_dma_nbcells; | ||
25 | struct dma_chan *(*of_dma_xlate) | ||
26 | (struct of_phandle_args *, struct of_dma *); | ||
27 | void *of_dma_data; | ||
28 | int use_count; | ||
29 | }; | ||
30 | |||
31 | struct of_dma_filter_info { | ||
32 | dma_cap_mask_t dma_cap; | ||
33 | dma_filter_fn filter_fn; | ||
34 | }; | ||
35 | |||
36 | #ifdef CONFIG_OF | ||
37 | extern int of_dma_controller_register(struct device_node *np, | ||
38 | struct dma_chan *(*of_dma_xlate) | ||
39 | (struct of_phandle_args *, struct of_dma *), | ||
40 | void *data); | ||
41 | extern int of_dma_controller_free(struct device_node *np); | ||
42 | extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | ||
43 | char *name); | ||
44 | extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | ||
45 | struct of_dma *ofdma); | ||
46 | #else | ||
47 | static inline int of_dma_controller_register(struct device_node *np, | ||
48 | struct dma_chan *(*of_dma_xlate) | ||
49 | (struct of_phandle_args *, struct of_dma *), | ||
50 | void *data) | ||
51 | { | ||
52 | return -ENODEV; | ||
53 | } | ||
54 | |||
55 | static inline int of_dma_controller_free(struct device_node *np) | ||
56 | { | ||
57 | return -ENODEV; | ||
58 | } | ||
59 | |||
60 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | ||
61 | char *name) | ||
62 | { | ||
63 | return NULL; | ||
64 | } | ||
65 | |||
66 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | ||
67 | struct of_dma *ofdma) | ||
68 | { | ||
69 | return NULL; | ||
70 | } | ||
71 | |||
72 | #endif | ||
73 | |||
74 | #endif /* __LINUX_OF_DMA_H */ | ||
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h index 9ff93b065686..4b781014b0a0 100644 --- a/include/linux/platform_data/dma-ste-dma40.h +++ b/include/linux/platform_data/dma-ste-dma40.h | |||
@@ -147,6 +147,16 @@ struct stedma40_chan_cfg { | |||
147 | * @memcpy_conf_log: default configuration of logical channel memcpy | 147 | * @memcpy_conf_log: default configuration of logical channel memcpy |
148 | * @disabled_channels: A vector, ending with -1, that marks physical channels | 148 | * @disabled_channels: A vector, ending with -1, that marks physical channels |
149 | * that are for different reasons not available for the driver. | 149 | * that are for different reasons not available for the driver. |
150 | * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW | ||
151 | * which avoids HW bug that exists in some versions of the controller. | ||
152 | * SoftLLI introduces relink overhead that could impact performace for | ||
153 | * certain use cases. | ||
154 | * @num_of_soft_lli_chans: The number of channels that needs to be configured | ||
155 | * to use SoftLLI. | ||
156 | * @use_esram_lcla: flag for mapping the lcla into esram region | ||
157 | * @num_of_phy_chans: The number of physical channels implemented in HW. | ||
158 | * 0 means reading the number of channels from DMA HW but this is only valid | ||
159 | * for 'multiple of 4' channels, like 8. | ||
150 | */ | 160 | */ |
151 | struct stedma40_platform_data { | 161 | struct stedma40_platform_data { |
152 | u32 dev_len; | 162 | u32 dev_len; |
@@ -157,7 +167,10 @@ struct stedma40_platform_data { | |||
157 | struct stedma40_chan_cfg *memcpy_conf_phy; | 167 | struct stedma40_chan_cfg *memcpy_conf_phy; |
158 | struct stedma40_chan_cfg *memcpy_conf_log; | 168 | struct stedma40_chan_cfg *memcpy_conf_log; |
159 | int disabled_channels[STEDMA40_MAX_PHYS]; | 169 | int disabled_channels[STEDMA40_MAX_PHYS]; |
170 | int *soft_lli_chans; | ||
171 | int num_of_soft_lli_chans; | ||
160 | bool use_esram_lcla; | 172 | bool use_esram_lcla; |
173 | int num_of_phy_chans; | ||
161 | }; | 174 | }; |
162 | 175 | ||
163 | #ifdef CONFIG_STE_DMA40 | 176 | #ifdef CONFIG_STE_DMA40 |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7a5ba48c2cc9..47e854fcae24 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1409,10 +1409,10 @@ static void tcp_service_net_dma(struct sock *sk, bool wait) | |||
1409 | return; | 1409 | return; |
1410 | 1410 | ||
1411 | last_issued = tp->ucopy.dma_cookie; | 1411 | last_issued = tp->ucopy.dma_cookie; |
1412 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1412 | dma_async_issue_pending(tp->ucopy.dma_chan); |
1413 | 1413 | ||
1414 | do { | 1414 | do { |
1415 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | 1415 | if (dma_async_is_tx_complete(tp->ucopy.dma_chan, |
1416 | last_issued, &done, | 1416 | last_issued, &done, |
1417 | &used) == DMA_SUCCESS) { | 1417 | &used) == DMA_SUCCESS) { |
1418 | /* Safe to free early-copied skbs now */ | 1418 | /* Safe to free early-copied skbs now */ |
@@ -1754,7 +1754,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1754 | tcp_service_net_dma(sk, true); | 1754 | tcp_service_net_dma(sk, true); |
1755 | tcp_cleanup_rbuf(sk, copied); | 1755 | tcp_cleanup_rbuf(sk, copied); |
1756 | } else | 1756 | } else |
1757 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1757 | dma_async_issue_pending(tp->ucopy.dma_chan); |
1758 | } | 1758 | } |
1759 | #endif | 1759 | #endif |
1760 | if (copied >= target) { | 1760 | if (copied >= target) { |
@@ -1847,7 +1847,7 @@ do_prequeue: | |||
1847 | break; | 1847 | break; |
1848 | } | 1848 | } |
1849 | 1849 | ||
1850 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1850 | dma_async_issue_pending(tp->ucopy.dma_chan); |
1851 | 1851 | ||
1852 | if ((offset + used) == skb->len) | 1852 | if ((offset + used) == skb->len) |
1853 | copied_early = true; | 1853 | copied_early = true; |