diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 23:42:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 23:42:45 -0500 |
commit | e3842cbfe0976b014288147b130551d8bf52b96c (patch) | |
tree | 686501f0eb80076240c5f38b34d1acbb105a190b | |
parent | 4d98ead183a2be77bfea425d5243e32629eaaeb1 (diff) | |
parent | 4625d2a513d60ca9c3e8cae42c8f3d9efc1b4211 (diff) |
Merge tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"Fairly routine update this time around with all changes specific to
drivers:
- New driver for STMicroelectronics FDMA
- Memory-to-memory transfers on dw dmac
- Support for slave maps on pl08x devices
- Bunch of driver fixes to use dma_pool_zalloc
- Bunch of compile and warning fixes spread across drivers"
[ The ST FDMA driver already came in earlier through the remoteproc tree ]
* tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits)
dmaengine: sirf-dma: remove unused ‘sdesc’
dmaengine: pl330: remove unused ‘regs’
dmaengine: s3c24xx: remove unused ‘cdata’
dmaengine: stm32-dma: remove unused ‘src_addr’
dmaengine: stm32-dma: remove unused ‘dst_addr’
dmaengine: stm32-dma: remove unused ‘sfcr’
dmaengine: pch_dma: remove unused ‘cookie’
dmaengine: mic_x100_dma: remove unused ‘data’
dmaengine: img-mdc: remove unused ‘prev_phys’
dmaengine: usb-dmac: remove unused ‘uchan’
dmaengine: ioat: remove unused ‘res’
dmaengine: ioat: remove unused ‘ioat_dma’
dmaengine: ioat: remove unused ‘is_raid_device’
dmaengine: pl330: do not generate unaligned access
dmaengine: k3dma: move to dma_pool_zalloc
dmaengine: at_hdmac: move to dma_pool_zalloc
dmaengine: at_xdmac: don't restore unsaved status
dmaengine: ioat: set error code on failures
dmaengine: ioat: set error code on failures
dmaengine: DW DMAC: add multi-block property to device tree
...
53 files changed, 868 insertions, 349 deletions
diff --git a/Documentation/devicetree/bindings/dma/nbpfaxi.txt b/Documentation/devicetree/bindings/dma/nbpfaxi.txt index d5e2522b9ec1..d2e1e62e346a 100644 --- a/Documentation/devicetree/bindings/dma/nbpfaxi.txt +++ b/Documentation/devicetree/bindings/dma/nbpfaxi.txt | |||
@@ -23,6 +23,14 @@ Required properties | |||
23 | #define NBPF_SLAVE_RQ_LEVEL 4 | 23 | #define NBPF_SLAVE_RQ_LEVEL 4 |
24 | 24 | ||
25 | Optional properties: | 25 | Optional properties: |
26 | - max-burst-mem-read: limit burst size for memory reads | ||
27 | (DMA_MEM_TO_MEM/DMA_MEM_TO_DEV) to this value, specified in bytes, rather | ||
28 | than using the maximum burst size allowed by the hardware's buffer size. | ||
29 | - max-burst-mem-write: limit burst size for memory writes | ||
30 | (DMA_DEV_TO_MEM/DMA_MEM_TO_MEM) to this value, specified in bytes, rather | ||
31 | than using the maximum burst size allowed by the hardware's buffer size. | ||
32 | If both max-burst-mem-read and max-burst-mem-write are set, DMA_MEM_TO_MEM | ||
33 | will use the lower value. | ||
26 | 34 | ||
27 | You can use dma-channels and dma-requests as described in dma.txt, although they | 35 | You can use dma-channels and dma-requests as described in dma.txt, although they |
28 | won't be used, this information is derived from the compatibility string. | 36 | won't be used, this information is derived from the compatibility string. |
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt index fd5618bd8fbc..55492c264d17 100644 --- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt +++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt | |||
@@ -5,13 +5,13 @@ memcpy and memset capabilities. It has been designed for virtualized | |||
5 | environments. | 5 | environments. |
6 | 6 | ||
7 | Each HIDMA HW instance consists of multiple DMA channels. These channels | 7 | Each HIDMA HW instance consists of multiple DMA channels. These channels |
8 | share the same bandwidth. The bandwidth utilization can be parititioned | 8 | share the same bandwidth. The bandwidth utilization can be partitioned |
9 | among channels based on the priority and weight assignments. | 9 | among channels based on the priority and weight assignments. |
10 | 10 | ||
11 | There are only two priority levels and 15 weigh assignments possible. | 11 | There are only two priority levels and 15 weigh assignments possible. |
12 | 12 | ||
13 | Other parameters here determine how much of the system bus this HIDMA | 13 | Other parameters here determine how much of the system bus this HIDMA |
14 | instance can use like maximum read/write request and and number of bytes to | 14 | instance can use like maximum read/write request and number of bytes to |
15 | read/write in a single burst. | 15 | read/write in a single burst. |
16 | 16 | ||
17 | Main node required properties: | 17 | Main node required properties: |
@@ -47,12 +47,18 @@ When the OS is not in control of the management interface (i.e. it's a guest), | |||
47 | the channel nodes appear on their own, not under a management node. | 47 | the channel nodes appear on their own, not under a management node. |
48 | 48 | ||
49 | Required properties: | 49 | Required properties: |
50 | - compatible: must contain "qcom,hidma-1.0" | 50 | - compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1" |
51 | for MSI capable HW. | ||
51 | - reg: Addresses for the transfer and event channel | 52 | - reg: Addresses for the transfer and event channel |
52 | - interrupts: Should contain the event interrupt | 53 | - interrupts: Should contain the event interrupt |
53 | - desc-count: Number of asynchronous requests this channel can handle | 54 | - desc-count: Number of asynchronous requests this channel can handle |
54 | - iommus: required a iommu node | 55 | - iommus: required a iommu node |
55 | 56 | ||
57 | Optional properties for MSI: | ||
58 | - msi-parent : See the generic MSI binding described in | ||
59 | devicetree/bindings/interrupt-controller/msi.txt for a description of the | ||
60 | msi-parent property. | ||
61 | |||
56 | Example: | 62 | Example: |
57 | 63 | ||
58 | Hypervisor OS configuration: | 64 | Hypervisor OS configuration: |
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 5f2ce669789a..3316a9c2e638 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | |||
@@ -24,6 +24,7 @@ Required Properties: | |||
24 | - "renesas,dmac-r8a7793" (R-Car M2-N) | 24 | - "renesas,dmac-r8a7793" (R-Car M2-N) |
25 | - "renesas,dmac-r8a7794" (R-Car E2) | 25 | - "renesas,dmac-r8a7794" (R-Car E2) |
26 | - "renesas,dmac-r8a7795" (R-Car H3) | 26 | - "renesas,dmac-r8a7795" (R-Car H3) |
27 | - "renesas,dmac-r8a7796" (R-Car M3-W) | ||
27 | 28 | ||
28 | - reg: base address and length of the registers block for the DMAC | 29 | - reg: base address and length of the registers block for the DMAC |
29 | 30 | ||
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index 0f5583293c9c..4775c66f4508 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt | |||
@@ -27,6 +27,8 @@ Optional properties: | |||
27 | that services interrupts for this device | 27 | that services interrupts for this device |
28 | - is_private: The device channels should be marked as private and not for by the | 28 | - is_private: The device channels should be marked as private and not for by the |
29 | general purpose DMA channel allocator. False if not passed. | 29 | general purpose DMA channel allocator. False if not passed. |
30 | - multi-block: Multi block transfers supported by hardware. Array property with | ||
31 | one cell per channel. 0: not supported, 1 (default): supported. | ||
30 | 32 | ||
31 | Example: | 33 | Example: |
32 | 34 | ||
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt index 9e33189745f0..c72b4563de10 100644 --- a/Documentation/dmaengine/client.txt +++ b/Documentation/dmaengine/client.txt | |||
@@ -37,8 +37,8 @@ The slave DMA usage consists of following steps: | |||
37 | 2. Set slave and controller specific parameters | 37 | 2. Set slave and controller specific parameters |
38 | 38 | ||
39 | Next step is always to pass some specific information to the DMA | 39 | Next step is always to pass some specific information to the DMA |
40 | driver. Most of the generic information which a slave DMA can use | 40 | driver. Most of the generic information which a slave DMA can use |
41 | is in struct dma_slave_config. This allows the clients to specify | 41 | is in struct dma_slave_config. This allows the clients to specify |
42 | DMA direction, DMA addresses, bus widths, DMA burst lengths etc | 42 | DMA direction, DMA addresses, bus widths, DMA burst lengths etc |
43 | for the peripheral. | 43 | for the peripheral. |
44 | 44 | ||
@@ -52,7 +52,7 @@ The slave DMA usage consists of following steps: | |||
52 | struct dma_slave_config *config) | 52 | struct dma_slave_config *config) |
53 | 53 | ||
54 | Please see the dma_slave_config structure definition in dmaengine.h | 54 | Please see the dma_slave_config structure definition in dmaengine.h |
55 | for a detailed explanation of the struct members. Please note | 55 | for a detailed explanation of the struct members. Please note |
56 | that the 'direction' member will be going away as it duplicates the | 56 | that the 'direction' member will be going away as it duplicates the |
57 | direction given in the prepare call. | 57 | direction given in the prepare call. |
58 | 58 | ||
@@ -101,7 +101,7 @@ The slave DMA usage consists of following steps: | |||
101 | desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags); | 101 | desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags); |
102 | 102 | ||
103 | Once a descriptor has been obtained, the callback information can be | 103 | Once a descriptor has been obtained, the callback information can be |
104 | added and the descriptor must then be submitted. Some DMA engine | 104 | added and the descriptor must then be submitted. Some DMA engine |
105 | drivers may hold a spinlock between a successful preparation and | 105 | drivers may hold a spinlock between a successful preparation and |
106 | submission so it is important that these two operations are closely | 106 | submission so it is important that these two operations are closely |
107 | paired. | 107 | paired. |
@@ -138,7 +138,7 @@ The slave DMA usage consists of following steps: | |||
138 | activity via other DMA engine calls not covered in this document. | 138 | activity via other DMA engine calls not covered in this document. |
139 | 139 | ||
140 | dmaengine_submit() will not start the DMA operation, it merely adds | 140 | dmaengine_submit() will not start the DMA operation, it merely adds |
141 | it to the pending queue. For this, see step 5, dma_async_issue_pending. | 141 | it to the pending queue. For this, see step 5, dma_async_issue_pending. |
142 | 142 | ||
143 | 5. Issue pending DMA requests and wait for callback notification | 143 | 5. Issue pending DMA requests and wait for callback notification |
144 | 144 | ||
@@ -184,13 +184,13 @@ Further APIs: | |||
184 | 184 | ||
185 | 3. int dmaengine_resume(struct dma_chan *chan) | 185 | 3. int dmaengine_resume(struct dma_chan *chan) |
186 | 186 | ||
187 | Resume a previously paused DMA channel. It is invalid to resume a | 187 | Resume a previously paused DMA channel. It is invalid to resume a |
188 | channel which is not currently paused. | 188 | channel which is not currently paused. |
189 | 189 | ||
190 | 4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 190 | 4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
191 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 191 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
192 | 192 | ||
193 | This can be used to check the status of the channel. Please see | 193 | This can be used to check the status of the channel. Please see |
194 | the documentation in include/linux/dmaengine.h for a more complete | 194 | the documentation in include/linux/dmaengine.h for a more complete |
195 | description of this API. | 195 | description of this API. |
196 | 196 | ||
@@ -200,7 +200,7 @@ Further APIs: | |||
200 | 200 | ||
201 | Note: | 201 | Note: |
202 | Not all DMA engine drivers can return reliable information for | 202 | Not all DMA engine drivers can return reliable information for |
203 | a running DMA channel. It is recommended that DMA engine users | 203 | a running DMA channel. It is recommended that DMA engine users |
204 | pause or stop (via dmaengine_terminate_all()) the channel before | 204 | pause or stop (via dmaengine_terminate_all()) the channel before |
205 | using this API. | 205 | using this API. |
206 | 206 | ||
diff --git a/Documentation/dmaengine/dmatest.txt b/Documentation/dmaengine/dmatest.txt index dd77a81bdb80..fb683c72dea8 100644 --- a/Documentation/dmaengine/dmatest.txt +++ b/Documentation/dmaengine/dmatest.txt | |||
@@ -34,7 +34,7 @@ command: | |||
34 | % ls -1 /sys/class/dma/ | 34 | % ls -1 /sys/class/dma/ |
35 | 35 | ||
36 | Once started a message like "dmatest: Started 1 threads using dma0chan0" is | 36 | Once started a message like "dmatest: Started 1 threads using dma0chan0" is |
37 | emitted. After that only test failure messages are reported until the test | 37 | emitted. After that only test failure messages are reported until the test |
38 | stops. | 38 | stops. |
39 | 39 | ||
40 | Note that running a new test will not stop any in progress test. | 40 | Note that running a new test will not stop any in progress test. |
@@ -43,11 +43,11 @@ The following command returns the state of the test. | |||
43 | % cat /sys/module/dmatest/parameters/run | 43 | % cat /sys/module/dmatest/parameters/run |
44 | 44 | ||
45 | To wait for test completion userpace can poll 'run' until it is false, or use | 45 | To wait for test completion userpace can poll 'run' until it is false, or use |
46 | the wait parameter. Specifying 'wait=1' when loading the module causes module | 46 | the wait parameter. Specifying 'wait=1' when loading the module causes module |
47 | initialization to pause until a test run has completed, while reading | 47 | initialization to pause until a test run has completed, while reading |
48 | /sys/module/dmatest/parameters/wait waits for any running test to complete | 48 | /sys/module/dmatest/parameters/wait waits for any running test to complete |
49 | before returning. For example, the following scripts wait for 42 tests | 49 | before returning. For example, the following scripts wait for 42 tests |
50 | to complete before exiting. Note that if 'iterations' is set to 'infinite' then | 50 | to complete before exiting. Note that if 'iterations' is set to 'infinite' then |
51 | waiting is disabled. | 51 | waiting is disabled. |
52 | 52 | ||
53 | Example: | 53 | Example: |
@@ -81,7 +81,7 @@ Example of output: | |||
81 | 81 | ||
82 | The message format is unified across the different types of errors. A number in | 82 | The message format is unified across the different types of errors. A number in |
83 | the parens represents additional information, e.g. error code, error counter, | 83 | the parens represents additional information, e.g. error code, error counter, |
84 | or status. A test thread also emits a summary line at completion listing the | 84 | or status. A test thread also emits a summary line at completion listing the |
85 | number of tests executed, number that failed, and a result code. | 85 | number of tests executed, number that failed, and a result code. |
86 | 86 | ||
87 | Example: | 87 | Example: |
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index c4fd47540b31..e33bc1c8ed2c 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt | |||
@@ -384,7 +384,7 @@ where to put them) | |||
384 | - The descriptor should be prepared for reuse by invoking | 384 | - The descriptor should be prepared for reuse by invoking |
385 | dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE. | 385 | dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE. |
386 | - dmaengine_desc_set_reuse() will succeed only when channel support | 386 | - dmaengine_desc_set_reuse() will succeed only when channel support |
387 | reusable descriptor as exhibited by capablities | 387 | reusable descriptor as exhibited by capabilities |
388 | - As a consequence, if a device driver wants to skip the dma_map_sg() and | 388 | - As a consequence, if a device driver wants to skip the dma_map_sg() and |
389 | dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used, | 389 | dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used, |
390 | it can resubmit the transfer right after its completion. | 390 | it can resubmit the transfer right after its completion. |
diff --git a/Documentation/dmaengine/pxa_dma.txt b/Documentation/dmaengine/pxa_dma.txt index 413ef9cfaa4d..0736d44b5438 100644 --- a/Documentation/dmaengine/pxa_dma.txt +++ b/Documentation/dmaengine/pxa_dma.txt | |||
@@ -29,7 +29,7 @@ Constraints | |||
29 | 29 | ||
30 | d) Bandwidth guarantee | 30 | d) Bandwidth guarantee |
31 | The PXA architecture has 4 levels of DMAs priorities : high, normal, low. | 31 | The PXA architecture has 4 levels of DMAs priorities : high, normal, low. |
32 | The high prorities get twice as much bandwidth as the normal, which get twice | 32 | The high priorities get twice as much bandwidth as the normal, which get twice |
33 | as much as the low priorities. | 33 | as much as the low priorities. |
34 | A driver should be able to request a priority, especially the real-time | 34 | A driver should be able to request a priority, especially the real-time |
35 | ones such as pxa_camera with (big) throughputs. | 35 | ones such as pxa_camera with (big) throughputs. |
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi index de53f5c3251c..3121536b25a3 100644 --- a/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi | |||
@@ -129,6 +129,7 @@ | |||
129 | data-width = <4>; | 129 | data-width = <4>; |
130 | clocks = <&ahb_clk>; | 130 | clocks = <&ahb_clk>; |
131 | clock-names = "hclk"; | 131 | clock-names = "hclk"; |
132 | multi-block = <1 1 1 1 1 1>; | ||
132 | }; | 133 | }; |
133 | 134 | ||
134 | i2c0: i2c@FF120000 { | 135 | i2c0: i2c@FF120000 { |
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 449acf0d8272..17ea0abcdbd7 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi | |||
@@ -118,6 +118,7 @@ | |||
118 | block_size = <0xfff>; | 118 | block_size = <0xfff>; |
119 | dma-masters = <2>; | 119 | dma-masters = <2>; |
120 | data-width = <8 8>; | 120 | data-width = <8 8>; |
121 | multi-block = <1 1 1 1 1 1 1 1>; | ||
121 | }; | 122 | }; |
122 | 123 | ||
123 | dma@eb000000 { | 124 | dma@eb000000 { |
@@ -134,6 +135,7 @@ | |||
134 | chan_priority = <1>; | 135 | chan_priority = <1>; |
135 | block_size = <0xfff>; | 136 | block_size = <0xfff>; |
136 | data-width = <8 8>; | 137 | data-width = <8 8>; |
138 | multi-block = <1 1 1 1 1 1 1 1>; | ||
137 | }; | 139 | }; |
138 | 140 | ||
139 | fsmc: flash@b0000000 { | 141 | fsmc: flash@b0000000 { |
diff --git a/arch/arm/mach-s3c64xx/pl080.c b/arch/arm/mach-s3c64xx/pl080.c index 89c5a62830a7..261820a855ec 100644 --- a/arch/arm/mach-s3c64xx/pl080.c +++ b/arch/arm/mach-s3c64xx/pl080.c | |||
@@ -117,6 +117,25 @@ static struct pl08x_channel_data s3c64xx_dma0_info[] = { | |||
117 | } | 117 | } |
118 | }; | 118 | }; |
119 | 119 | ||
120 | static const struct dma_slave_map s3c64xx_dma0_slave_map[] = { | ||
121 | { "s3c6400-uart.0", "tx", &s3c64xx_dma0_info[0] }, | ||
122 | { "s3c6400-uart.0", "rx", &s3c64xx_dma0_info[1] }, | ||
123 | { "s3c6400-uart.1", "tx", &s3c64xx_dma0_info[2] }, | ||
124 | { "s3c6400-uart.1", "rx", &s3c64xx_dma0_info[3] }, | ||
125 | { "s3c6400-uart.2", "tx", &s3c64xx_dma0_info[4] }, | ||
126 | { "s3c6400-uart.2", "rx", &s3c64xx_dma0_info[5] }, | ||
127 | { "s3c6400-uart.3", "tx", &s3c64xx_dma0_info[6] }, | ||
128 | { "s3c6400-uart.3", "rx", &s3c64xx_dma0_info[7] }, | ||
129 | { "samsung-pcm.0", "tx", &s3c64xx_dma0_info[8] }, | ||
130 | { "samsung-pcm.0", "rx", &s3c64xx_dma0_info[9] }, | ||
131 | { "samsung-i2s.0", "tx", &s3c64xx_dma0_info[10] }, | ||
132 | { "samsung-i2s.0", "rx", &s3c64xx_dma0_info[11] }, | ||
133 | { "s3c6410-spi.0", "tx", &s3c64xx_dma0_info[12] }, | ||
134 | { "s3c6410-spi.0", "rx", &s3c64xx_dma0_info[13] }, | ||
135 | { "samsung-i2s.2", "tx", &s3c64xx_dma0_info[14] }, | ||
136 | { "samsung-i2s.2", "rx", &s3c64xx_dma0_info[15] }, | ||
137 | }; | ||
138 | |||
120 | struct pl08x_platform_data s3c64xx_dma0_plat_data = { | 139 | struct pl08x_platform_data s3c64xx_dma0_plat_data = { |
121 | .memcpy_channel = { | 140 | .memcpy_channel = { |
122 | .bus_id = "memcpy", | 141 | .bus_id = "memcpy", |
@@ -134,6 +153,8 @@ struct pl08x_platform_data s3c64xx_dma0_plat_data = { | |||
134 | .put_xfer_signal = pl08x_put_xfer_signal, | 153 | .put_xfer_signal = pl08x_put_xfer_signal, |
135 | .slave_channels = s3c64xx_dma0_info, | 154 | .slave_channels = s3c64xx_dma0_info, |
136 | .num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info), | 155 | .num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info), |
156 | .slave_map = s3c64xx_dma0_slave_map, | ||
157 | .slave_map_len = ARRAY_SIZE(s3c64xx_dma0_slave_map), | ||
137 | }; | 158 | }; |
138 | 159 | ||
139 | static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0, | 160 | static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0, |
@@ -207,6 +228,15 @@ static struct pl08x_channel_data s3c64xx_dma1_info[] = { | |||
207 | }, | 228 | }, |
208 | }; | 229 | }; |
209 | 230 | ||
231 | static const struct dma_slave_map s3c64xx_dma1_slave_map[] = { | ||
232 | { "samsung-pcm.1", "tx", &s3c64xx_dma1_info[0] }, | ||
233 | { "samsung-pcm.1", "rx", &s3c64xx_dma1_info[1] }, | ||
234 | { "samsung-i2s.1", "tx", &s3c64xx_dma1_info[2] }, | ||
235 | { "samsung-i2s.1", "rx", &s3c64xx_dma1_info[3] }, | ||
236 | { "s3c6410-spi.1", "tx", &s3c64xx_dma1_info[4] }, | ||
237 | { "s3c6410-spi.1", "rx", &s3c64xx_dma1_info[5] }, | ||
238 | }; | ||
239 | |||
210 | struct pl08x_platform_data s3c64xx_dma1_plat_data = { | 240 | struct pl08x_platform_data s3c64xx_dma1_plat_data = { |
211 | .memcpy_channel = { | 241 | .memcpy_channel = { |
212 | .bus_id = "memcpy", | 242 | .bus_id = "memcpy", |
@@ -224,6 +254,8 @@ struct pl08x_platform_data s3c64xx_dma1_plat_data = { | |||
224 | .put_xfer_signal = pl08x_put_xfer_signal, | 254 | .put_xfer_signal = pl08x_put_xfer_signal, |
225 | .slave_channels = s3c64xx_dma1_info, | 255 | .slave_channels = s3c64xx_dma1_info, |
226 | .num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info), | 256 | .num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info), |
257 | .slave_map = s3c64xx_dma1_slave_map, | ||
258 | .slave_map_len = ARRAY_SIZE(s3c64xx_dma1_slave_map), | ||
227 | }; | 259 | }; |
228 | 260 | ||
229 | static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0, | 261 | static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0, |
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index e93aa6734147..cf7b95fddbb3 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c | |||
@@ -1124,15 +1124,6 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, | |||
1124 | pd.num_cs = num_cs; | 1124 | pd.num_cs = num_cs; |
1125 | pd.src_clk_nr = src_clk_nr; | 1125 | pd.src_clk_nr = src_clk_nr; |
1126 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; | 1126 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; |
1127 | pd.dma_tx = (void *)DMACH_SPI0_TX; | ||
1128 | pd.dma_rx = (void *)DMACH_SPI0_RX; | ||
1129 | #if defined(CONFIG_PL330_DMA) | ||
1130 | pd.filter = pl330_filter; | ||
1131 | #elif defined(CONFIG_S3C64XX_PL080) | ||
1132 | pd.filter = pl08x_filter_id; | ||
1133 | #elif defined(CONFIG_S3C24XX_DMAC) | ||
1134 | pd.filter = s3c24xx_dma_filter; | ||
1135 | #endif | ||
1136 | 1127 | ||
1137 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); | 1128 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); |
1138 | } | 1129 | } |
@@ -1169,14 +1160,6 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, | |||
1169 | pd.num_cs = num_cs; | 1160 | pd.num_cs = num_cs; |
1170 | pd.src_clk_nr = src_clk_nr; | 1161 | pd.src_clk_nr = src_clk_nr; |
1171 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; | 1162 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; |
1172 | pd.dma_tx = (void *)DMACH_SPI1_TX; | ||
1173 | pd.dma_rx = (void *)DMACH_SPI1_RX; | ||
1174 | #if defined(CONFIG_PL330_DMA) | ||
1175 | pd.filter = pl330_filter; | ||
1176 | #elif defined(CONFIG_S3C64XX_PL080) | ||
1177 | pd.filter = pl08x_filter_id; | ||
1178 | #endif | ||
1179 | |||
1180 | 1163 | ||
1181 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); | 1164 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); |
1182 | } | 1165 | } |
@@ -1213,13 +1196,6 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, | |||
1213 | pd.num_cs = num_cs; | 1196 | pd.num_cs = num_cs; |
1214 | pd.src_clk_nr = src_clk_nr; | 1197 | pd.src_clk_nr = src_clk_nr; |
1215 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; | 1198 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; |
1216 | pd.dma_tx = (void *)DMACH_SPI2_TX; | ||
1217 | pd.dma_rx = (void *)DMACH_SPI2_RX; | ||
1218 | #if defined(CONFIG_PL330_DMA) | ||
1219 | pd.filter = pl330_filter; | ||
1220 | #elif defined(CONFIG_S3C64XX_PL080) | ||
1221 | pd.filter = pl08x_filter_id; | ||
1222 | #endif | ||
1223 | 1199 | ||
1224 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); | 1200 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); |
1225 | } | 1201 | } |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 2154ea3c5d1c..263495d0adbd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -494,7 +494,7 @@ config TEGRA20_APB_DMA | |||
494 | or vice versa. It does not support memory to memory data transfer. | 494 | or vice versa. It does not support memory to memory data transfer. |
495 | 495 | ||
496 | config TEGRA210_ADMA | 496 | config TEGRA210_ADMA |
497 | bool "NVIDIA Tegra210 ADMA support" | 497 | tristate "NVIDIA Tegra210 ADMA support" |
498 | depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK | 498 | depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK |
499 | select DMA_ENGINE | 499 | select DMA_ENGINE |
500 | select DMA_VIRTUAL_CHANNELS | 500 | select DMA_VIRTUAL_CHANNELS |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 939a7c31f760..0b7c6ce629a6 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1793,6 +1793,13 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | |||
1793 | } | 1793 | } |
1794 | EXPORT_SYMBOL_GPL(pl08x_filter_id); | 1794 | EXPORT_SYMBOL_GPL(pl08x_filter_id); |
1795 | 1795 | ||
1796 | static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id) | ||
1797 | { | ||
1798 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1799 | |||
1800 | return plchan->cd == chan_id; | ||
1801 | } | ||
1802 | |||
1796 | /* | 1803 | /* |
1797 | * Just check that the device is there and active | 1804 | * Just check that the device is there and active |
1798 | * TODO: turn this bit on/off depending on the number of physical channels | 1805 | * TODO: turn this bit on/off depending on the number of physical channels |
@@ -2307,6 +2314,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2307 | ret = -EINVAL; | 2314 | ret = -EINVAL; |
2308 | goto out_no_platdata; | 2315 | goto out_no_platdata; |
2309 | } | 2316 | } |
2317 | } else { | ||
2318 | pl08x->slave.filter.map = pl08x->pd->slave_map; | ||
2319 | pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len; | ||
2320 | pl08x->slave.filter.fn = pl08x_filter_fn; | ||
2310 | } | 2321 | } |
2311 | 2322 | ||
2312 | /* By default, AHB1 only. If dualmaster, from platform */ | 2323 | /* By default, AHB1 only. If dualmaster, from platform */ |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index a4c8f80db29d..1baf3404a365 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -111,9 +111,8 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, | |||
111 | struct at_dma *atdma = to_at_dma(chan->device); | 111 | struct at_dma *atdma = to_at_dma(chan->device); |
112 | dma_addr_t phys; | 112 | dma_addr_t phys; |
113 | 113 | ||
114 | desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); | 114 | desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys); |
115 | if (desc) { | 115 | if (desc) { |
116 | memset(desc, 0, sizeof(struct at_desc)); | ||
117 | INIT_LIST_HEAD(&desc->tx_list); | 116 | INIT_LIST_HEAD(&desc->tx_list); |
118 | dma_async_tx_descriptor_init(&desc->txd, chan); | 117 | dma_async_tx_descriptor_init(&desc->txd, chan); |
119 | /* txd.flags will be overwritten in prep functions */ | 118 | /* txd.flags will be overwritten in prep functions */ |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b7d7f2d443a1..7d4e0bcda9af 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -221,7 +221,6 @@ struct at_xdmac { | |||
221 | int irq; | 221 | int irq; |
222 | struct clk *clk; | 222 | struct clk *clk; |
223 | u32 save_gim; | 223 | u32 save_gim; |
224 | u32 save_gs; | ||
225 | struct dma_pool *at_xdmac_desc_pool; | 224 | struct dma_pool *at_xdmac_desc_pool; |
226 | struct at_xdmac_chan chan[0]; | 225 | struct at_xdmac_chan chan[0]; |
227 | }; | 226 | }; |
@@ -444,9 +443,8 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, | |||
444 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); | 443 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); |
445 | dma_addr_t phys; | 444 | dma_addr_t phys; |
446 | 445 | ||
447 | desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); | 446 | desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); |
448 | if (desc) { | 447 | if (desc) { |
449 | memset(desc, 0, sizeof(*desc)); | ||
450 | INIT_LIST_HEAD(&desc->descs_list); | 448 | INIT_LIST_HEAD(&desc->descs_list); |
451 | dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); | 449 | dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); |
452 | desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; | 450 | desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; |
@@ -1896,7 +1894,6 @@ static int atmel_xdmac_resume(struct device *dev) | |||
1896 | } | 1894 | } |
1897 | 1895 | ||
1898 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); | 1896 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); |
1899 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); | ||
1900 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | 1897 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { |
1901 | atchan = to_at_xdmac_chan(chan); | 1898 | atchan = to_at_xdmac_chan(chan); |
1902 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); | 1899 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index cf76fc6149e5..451f899f74e4 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -164,7 +164,9 @@ struct dmatest_thread { | |||
164 | struct task_struct *task; | 164 | struct task_struct *task; |
165 | struct dma_chan *chan; | 165 | struct dma_chan *chan; |
166 | u8 **srcs; | 166 | u8 **srcs; |
167 | u8 **usrcs; | ||
167 | u8 **dsts; | 168 | u8 **dsts; |
169 | u8 **udsts; | ||
168 | enum dma_transaction_type type; | 170 | enum dma_transaction_type type; |
169 | bool done; | 171 | bool done; |
170 | }; | 172 | }; |
@@ -431,6 +433,7 @@ static int dmatest_func(void *data) | |||
431 | ktime_t comparetime = ktime_set(0, 0); | 433 | ktime_t comparetime = ktime_set(0, 0); |
432 | s64 runtime = 0; | 434 | s64 runtime = 0; |
433 | unsigned long long total_len = 0; | 435 | unsigned long long total_len = 0; |
436 | u8 align = 0; | ||
434 | 437 | ||
435 | set_freezable(); | 438 | set_freezable(); |
436 | 439 | ||
@@ -441,20 +444,24 @@ static int dmatest_func(void *data) | |||
441 | params = &info->params; | 444 | params = &info->params; |
442 | chan = thread->chan; | 445 | chan = thread->chan; |
443 | dev = chan->device; | 446 | dev = chan->device; |
444 | if (thread->type == DMA_MEMCPY) | 447 | if (thread->type == DMA_MEMCPY) { |
448 | align = dev->copy_align; | ||
445 | src_cnt = dst_cnt = 1; | 449 | src_cnt = dst_cnt = 1; |
446 | else if (thread->type == DMA_SG) | 450 | } else if (thread->type == DMA_SG) { |
451 | align = dev->copy_align; | ||
447 | src_cnt = dst_cnt = sg_buffers; | 452 | src_cnt = dst_cnt = sg_buffers; |
448 | else if (thread->type == DMA_XOR) { | 453 | } else if (thread->type == DMA_XOR) { |
449 | /* force odd to ensure dst = src */ | 454 | /* force odd to ensure dst = src */ |
450 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | 455 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); |
451 | dst_cnt = 1; | 456 | dst_cnt = 1; |
457 | align = dev->xor_align; | ||
452 | } else if (thread->type == DMA_PQ) { | 458 | } else if (thread->type == DMA_PQ) { |
453 | /* force odd to ensure dst = src */ | 459 | /* force odd to ensure dst = src */ |
454 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); | 460 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); |
455 | dst_cnt = 2; | 461 | dst_cnt = 2; |
462 | align = dev->pq_align; | ||
456 | 463 | ||
457 | pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); | 464 | pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL); |
458 | if (!pq_coefs) | 465 | if (!pq_coefs) |
459 | goto err_thread_type; | 466 | goto err_thread_type; |
460 | 467 | ||
@@ -463,23 +470,47 @@ static int dmatest_func(void *data) | |||
463 | } else | 470 | } else |
464 | goto err_thread_type; | 471 | goto err_thread_type; |
465 | 472 | ||
466 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); | 473 | thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); |
467 | if (!thread->srcs) | 474 | if (!thread->srcs) |
468 | goto err_srcs; | 475 | goto err_srcs; |
476 | |||
477 | thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
478 | if (!thread->usrcs) | ||
479 | goto err_usrcs; | ||
480 | |||
469 | for (i = 0; i < src_cnt; i++) { | 481 | for (i = 0; i < src_cnt; i++) { |
470 | thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); | 482 | thread->usrcs[i] = kmalloc(params->buf_size + align, |
471 | if (!thread->srcs[i]) | 483 | GFP_KERNEL); |
484 | if (!thread->usrcs[i]) | ||
472 | goto err_srcbuf; | 485 | goto err_srcbuf; |
486 | |||
487 | /* align srcs to alignment restriction */ | ||
488 | if (align) | ||
489 | thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align); | ||
490 | else | ||
491 | thread->srcs[i] = thread->usrcs[i]; | ||
473 | } | 492 | } |
474 | thread->srcs[i] = NULL; | 493 | thread->srcs[i] = NULL; |
475 | 494 | ||
476 | thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); | 495 | thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); |
477 | if (!thread->dsts) | 496 | if (!thread->dsts) |
478 | goto err_dsts; | 497 | goto err_dsts; |
498 | |||
499 | thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
500 | if (!thread->udsts) | ||
501 | goto err_udsts; | ||
502 | |||
479 | for (i = 0; i < dst_cnt; i++) { | 503 | for (i = 0; i < dst_cnt; i++) { |
480 | thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); | 504 | thread->udsts[i] = kmalloc(params->buf_size + align, |
481 | if (!thread->dsts[i]) | 505 | GFP_KERNEL); |
506 | if (!thread->udsts[i]) | ||
482 | goto err_dstbuf; | 507 | goto err_dstbuf; |
508 | |||
509 | /* align dsts to alignment restriction */ | ||
510 | if (align) | ||
511 | thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align); | ||
512 | else | ||
513 | thread->dsts[i] = thread->udsts[i]; | ||
483 | } | 514 | } |
484 | thread->dsts[i] = NULL; | 515 | thread->dsts[i] = NULL; |
485 | 516 | ||
@@ -498,20 +529,11 @@ static int dmatest_func(void *data) | |||
498 | dma_addr_t srcs[src_cnt]; | 529 | dma_addr_t srcs[src_cnt]; |
499 | dma_addr_t *dsts; | 530 | dma_addr_t *dsts; |
500 | unsigned int src_off, dst_off, len; | 531 | unsigned int src_off, dst_off, len; |
501 | u8 align = 0; | ||
502 | struct scatterlist tx_sg[src_cnt]; | 532 | struct scatterlist tx_sg[src_cnt]; |
503 | struct scatterlist rx_sg[src_cnt]; | 533 | struct scatterlist rx_sg[src_cnt]; |
504 | 534 | ||
505 | total_tests++; | 535 | total_tests++; |
506 | 536 | ||
507 | /* honor alignment restrictions */ | ||
508 | if (thread->type == DMA_MEMCPY || thread->type == DMA_SG) | ||
509 | align = dev->copy_align; | ||
510 | else if (thread->type == DMA_XOR) | ||
511 | align = dev->xor_align; | ||
512 | else if (thread->type == DMA_PQ) | ||
513 | align = dev->pq_align; | ||
514 | |||
515 | if (1 << align > params->buf_size) { | 537 | if (1 << align > params->buf_size) { |
516 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | 538 | pr_err("%u-byte buffer too small for %d-byte alignment\n", |
517 | params->buf_size, 1 << align); | 539 | params->buf_size, 1 << align); |
@@ -549,7 +571,7 @@ static int dmatest_func(void *data) | |||
549 | filltime = ktime_add(filltime, diff); | 571 | filltime = ktime_add(filltime, diff); |
550 | } | 572 | } |
551 | 573 | ||
552 | um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, | 574 | um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt, |
553 | GFP_KERNEL); | 575 | GFP_KERNEL); |
554 | if (!um) { | 576 | if (!um) { |
555 | failed_tests++; | 577 | failed_tests++; |
@@ -729,13 +751,17 @@ static int dmatest_func(void *data) | |||
729 | 751 | ||
730 | ret = 0; | 752 | ret = 0; |
731 | err_dstbuf: | 753 | err_dstbuf: |
732 | for (i = 0; thread->dsts[i]; i++) | 754 | for (i = 0; thread->udsts[i]; i++) |
733 | kfree(thread->dsts[i]); | 755 | kfree(thread->udsts[i]); |
756 | kfree(thread->udsts); | ||
757 | err_udsts: | ||
734 | kfree(thread->dsts); | 758 | kfree(thread->dsts); |
735 | err_dsts: | 759 | err_dsts: |
736 | err_srcbuf: | 760 | err_srcbuf: |
737 | for (i = 0; thread->srcs[i]; i++) | 761 | for (i = 0; thread->usrcs[i]; i++) |
738 | kfree(thread->srcs[i]); | 762 | kfree(thread->usrcs[i]); |
763 | kfree(thread->usrcs); | ||
764 | err_usrcs: | ||
739 | kfree(thread->srcs); | 765 | kfree(thread->srcs); |
740 | err_srcs: | 766 | err_srcs: |
741 | kfree(pq_coefs); | 767 | kfree(pq_coefs); |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index c2c0a613cb7a..e5adf5d1c34f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -1569,7 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip) | |||
1569 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | 1569 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; |
1570 | } else { | 1570 | } else { |
1571 | dwc->block_size = pdata->block_size; | 1571 | dwc->block_size = pdata->block_size; |
1572 | dwc->nollp = pdata->is_nollp; | 1572 | dwc->nollp = !pdata->multi_block[i]; |
1573 | } | 1573 | } |
1574 | } | 1574 | } |
1575 | 1575 | ||
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 5bda0eb9f393..b1655e40cfa2 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -102,7 +102,7 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
102 | { | 102 | { |
103 | struct device_node *np = pdev->dev.of_node; | 103 | struct device_node *np = pdev->dev.of_node; |
104 | struct dw_dma_platform_data *pdata; | 104 | struct dw_dma_platform_data *pdata; |
105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; | 105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS]; |
106 | u32 nr_masters; | 106 | u32 nr_masters; |
107 | u32 nr_channels; | 107 | u32 nr_channels; |
108 | 108 | ||
@@ -118,6 +118,8 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
118 | 118 | ||
119 | if (of_property_read_u32(np, "dma-channels", &nr_channels)) | 119 | if (of_property_read_u32(np, "dma-channels", &nr_channels)) |
120 | return NULL; | 120 | return NULL; |
121 | if (nr_channels > DW_DMA_MAX_NR_CHANNELS) | ||
122 | return NULL; | ||
121 | 123 | ||
122 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 124 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
123 | if (!pdata) | 125 | if (!pdata) |
@@ -129,6 +131,12 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
129 | if (of_property_read_bool(np, "is_private")) | 131 | if (of_property_read_bool(np, "is_private")) |
130 | pdata->is_private = true; | 132 | pdata->is_private = true; |
131 | 133 | ||
134 | /* | ||
135 | * All known devices, which use DT for configuration, support | ||
136 | * memory-to-memory transfers. So enable it by default. | ||
137 | */ | ||
138 | pdata->is_memcpy = true; | ||
139 | |||
132 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) | 140 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) |
133 | pdata->chan_allocation_order = (unsigned char)tmp; | 141 | pdata->chan_allocation_order = (unsigned char)tmp; |
134 | 142 | ||
@@ -146,6 +154,14 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
146 | pdata->data_width[tmp] = BIT(arr[tmp] & 0x07); | 154 | pdata->data_width[tmp] = BIT(arr[tmp] & 0x07); |
147 | } | 155 | } |
148 | 156 | ||
157 | if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) { | ||
158 | for (tmp = 0; tmp < nr_channels; tmp++) | ||
159 | pdata->multi_block[tmp] = mb[tmp]; | ||
160 | } else { | ||
161 | for (tmp = 0; tmp < nr_channels; tmp++) | ||
162 | pdata->multi_block[tmp] = 1; | ||
163 | } | ||
164 | |||
149 | return pdata; | 165 | return pdata; |
150 | } | 166 | } |
151 | #else | 167 | #else |
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index f65dd104479f..4e0128c62704 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -12,7 +12,8 @@ | |||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/dmaengine.h> | 13 | #include <linux/dmaengine.h> |
14 | 14 | ||
15 | #define DW_DMA_MAX_NR_CHANNELS 8 | 15 | #include "internal.h" |
16 | |||
16 | #define DW_DMA_MAX_NR_REQUESTS 16 | 17 | #define DW_DMA_MAX_NR_REQUESTS 16 |
17 | 18 | ||
18 | /* flow controller */ | 19 | /* flow controller */ |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 77242b37ef87..3879f80a4815 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -2451,6 +2451,9 @@ static int edma_pm_resume(struct device *dev) | |||
2451 | int i; | 2451 | int i; |
2452 | s8 (*queue_priority_mapping)[2]; | 2452 | s8 (*queue_priority_mapping)[2]; |
2453 | 2453 | ||
2454 | /* re initialize dummy slot to dummy param set */ | ||
2455 | edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset); | ||
2456 | |||
2454 | queue_priority_mapping = ecc->info->queue_priority_mapping; | 2457 | queue_priority_mapping = ecc->info->queue_priority_mapping; |
2455 | 2458 | ||
2456 | /* Event queue priority mapping */ | 2459 | /* Event queue priority mapping */ |
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index db2f9e1653a2..90d29f90acfb 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c | |||
@@ -881,6 +881,7 @@ static struct of_device_id fsl_re_ids[] = { | |||
881 | { .compatible = "fsl,raideng-v1.0", }, | 881 | { .compatible = "fsl,raideng-v1.0", }, |
882 | {} | 882 | {} |
883 | }; | 883 | }; |
884 | MODULE_DEVICE_TABLE(of, fsl_re_ids); | ||
884 | 885 | ||
885 | static struct platform_driver fsl_re_driver = { | 886 | static struct platform_driver fsl_re_driver = { |
886 | .driver = { | 887 | .driver = { |
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index b51639f045ed..4875fa428e81 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c | |||
@@ -77,13 +77,15 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
77 | if (!chip) | 77 | if (!chip) |
78 | return -ENOMEM; | 78 | return -ENOMEM; |
79 | 79 | ||
80 | ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); | ||
81 | if (ret < 0) | ||
82 | return ret; | ||
83 | |||
80 | chip->dev = &pdev->dev; | 84 | chip->dev = &pdev->dev; |
81 | chip->regs = pcim_iomap_table(pdev)[0]; | 85 | chip->regs = pcim_iomap_table(pdev)[0]; |
82 | chip->length = pci_resource_len(pdev, 0); | 86 | chip->length = pci_resource_len(pdev, 0); |
83 | chip->offset = HSU_PCI_CHAN_OFFSET; | 87 | chip->offset = HSU_PCI_CHAN_OFFSET; |
84 | chip->irq = pdev->irq; | 88 | chip->irq = pci_irq_vector(pdev, 0); |
85 | |||
86 | pci_enable_msi(pdev); | ||
87 | 89 | ||
88 | ret = hsu_dma_probe(chip); | 90 | ret = hsu_dma_probe(chip); |
89 | if (ret) | 91 | if (ret) |
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 624f1e1e9c55..54db1411ce73 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c | |||
@@ -292,7 +292,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( | |||
292 | struct mdc_dma *mdma = mchan->mdma; | 292 | struct mdc_dma *mdma = mchan->mdma; |
293 | struct mdc_tx_desc *mdesc; | 293 | struct mdc_tx_desc *mdesc; |
294 | struct mdc_hw_list_desc *curr, *prev = NULL; | 294 | struct mdc_hw_list_desc *curr, *prev = NULL; |
295 | dma_addr_t curr_phys, prev_phys; | 295 | dma_addr_t curr_phys; |
296 | 296 | ||
297 | if (!len) | 297 | if (!len) |
298 | return NULL; | 298 | return NULL; |
@@ -324,7 +324,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( | |||
324 | xfer_size); | 324 | xfer_size); |
325 | 325 | ||
326 | prev = curr; | 326 | prev = curr; |
327 | prev_phys = curr_phys; | ||
328 | 327 | ||
329 | mdesc->list_len++; | 328 | mdesc->list_len++; |
330 | src += xfer_size; | 329 | src += xfer_size; |
@@ -375,7 +374,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( | |||
375 | struct mdc_dma *mdma = mchan->mdma; | 374 | struct mdc_dma *mdma = mchan->mdma; |
376 | struct mdc_tx_desc *mdesc; | 375 | struct mdc_tx_desc *mdesc; |
377 | struct mdc_hw_list_desc *curr, *prev = NULL; | 376 | struct mdc_hw_list_desc *curr, *prev = NULL; |
378 | dma_addr_t curr_phys, prev_phys; | 377 | dma_addr_t curr_phys; |
379 | 378 | ||
380 | if (!buf_len && !period_len) | 379 | if (!buf_len && !period_len) |
381 | return NULL; | 380 | return NULL; |
@@ -430,7 +429,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( | |||
430 | } | 429 | } |
431 | 430 | ||
432 | prev = curr; | 431 | prev = curr; |
433 | prev_phys = curr_phys; | ||
434 | 432 | ||
435 | mdesc->list_len++; | 433 | mdesc->list_len++; |
436 | buf_addr += xfer_size; | 434 | buf_addr += xfer_size; |
@@ -458,7 +456,7 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg( | |||
458 | struct mdc_tx_desc *mdesc; | 456 | struct mdc_tx_desc *mdesc; |
459 | struct scatterlist *sg; | 457 | struct scatterlist *sg; |
460 | struct mdc_hw_list_desc *curr, *prev = NULL; | 458 | struct mdc_hw_list_desc *curr, *prev = NULL; |
461 | dma_addr_t curr_phys, prev_phys; | 459 | dma_addr_t curr_phys; |
462 | unsigned int i; | 460 | unsigned int i; |
463 | 461 | ||
464 | if (!sgl) | 462 | if (!sgl) |
@@ -509,7 +507,6 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg( | |||
509 | } | 507 | } |
510 | 508 | ||
511 | prev = curr; | 509 | prev = curr; |
512 | prev_phys = curr_phys; | ||
513 | 510 | ||
514 | mdesc->list_len++; | 511 | mdesc->list_len++; |
515 | mdesc->list_xfer_size += xfer_size; | 512 | mdesc->list_xfer_size += xfer_size; |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b9629b2bfc05..d1651a50c349 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -298,6 +298,7 @@ struct sdma_engine; | |||
298 | * @event_id1 for channels that use 2 events | 298 | * @event_id1 for channels that use 2 events |
299 | * @word_size peripheral access size | 299 | * @word_size peripheral access size |
300 | * @buf_tail ID of the buffer that was processed | 300 | * @buf_tail ID of the buffer that was processed |
301 | * @buf_ptail ID of the previous buffer that was processed | ||
301 | * @num_bd max NUM_BD. number of descriptors currently handling | 302 | * @num_bd max NUM_BD. number of descriptors currently handling |
302 | */ | 303 | */ |
303 | struct sdma_channel { | 304 | struct sdma_channel { |
@@ -309,6 +310,7 @@ struct sdma_channel { | |||
309 | unsigned int event_id1; | 310 | unsigned int event_id1; |
310 | enum dma_slave_buswidth word_size; | 311 | enum dma_slave_buswidth word_size; |
311 | unsigned int buf_tail; | 312 | unsigned int buf_tail; |
313 | unsigned int buf_ptail; | ||
312 | unsigned int num_bd; | 314 | unsigned int num_bd; |
313 | unsigned int period_len; | 315 | unsigned int period_len; |
314 | struct sdma_buffer_descriptor *bd; | 316 | struct sdma_buffer_descriptor *bd; |
@@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac) | |||
700 | sdmac->chn_real_count = bd->mode.count; | 702 | sdmac->chn_real_count = bd->mode.count; |
701 | bd->mode.status |= BD_DONE; | 703 | bd->mode.status |= BD_DONE; |
702 | bd->mode.count = sdmac->period_len; | 704 | bd->mode.count = sdmac->period_len; |
705 | sdmac->buf_ptail = sdmac->buf_tail; | ||
706 | sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd; | ||
703 | 707 | ||
704 | /* | 708 | /* |
705 | * The callback is called from the interrupt context in order | 709 | * The callback is called from the interrupt context in order |
@@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac) | |||
710 | 714 | ||
711 | dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); | 715 | dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); |
712 | 716 | ||
713 | sdmac->buf_tail++; | ||
714 | sdmac->buf_tail %= sdmac->num_bd; | ||
715 | |||
716 | if (error) | 717 | if (error) |
717 | sdmac->status = old_status; | 718 | sdmac->status = old_status; |
718 | } | 719 | } |
@@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
1186 | sdmac->flags = 0; | 1187 | sdmac->flags = 0; |
1187 | 1188 | ||
1188 | sdmac->buf_tail = 0; | 1189 | sdmac->buf_tail = 0; |
1190 | sdmac->buf_ptail = 0; | ||
1191 | sdmac->chn_real_count = 0; | ||
1189 | 1192 | ||
1190 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", | 1193 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", |
1191 | sg_len, channel); | 1194 | sg_len, channel); |
@@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
1288 | sdmac->status = DMA_IN_PROGRESS; | 1291 | sdmac->status = DMA_IN_PROGRESS; |
1289 | 1292 | ||
1290 | sdmac->buf_tail = 0; | 1293 | sdmac->buf_tail = 0; |
1294 | sdmac->buf_ptail = 0; | ||
1295 | sdmac->chn_real_count = 0; | ||
1291 | sdmac->period_len = period_len; | 1296 | sdmac->period_len = period_len; |
1292 | 1297 | ||
1293 | sdmac->flags |= IMX_DMA_SG_LOOP; | 1298 | sdmac->flags |= IMX_DMA_SG_LOOP; |
@@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1385 | u32 residue; | 1390 | u32 residue; |
1386 | 1391 | ||
1387 | if (sdmac->flags & IMX_DMA_SG_LOOP) | 1392 | if (sdmac->flags & IMX_DMA_SG_LOOP) |
1388 | residue = (sdmac->num_bd - sdmac->buf_tail) * | 1393 | residue = (sdmac->num_bd - sdmac->buf_ptail) * |
1389 | sdmac->period_len - sdmac->chn_real_count; | 1394 | sdmac->period_len - sdmac->chn_real_count; |
1390 | else | 1395 | else |
1391 | residue = sdmac->chn_count - sdmac->chn_real_count; | 1396 | residue = sdmac->chn_count - sdmac->chn_real_count; |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 49386ce04bf5..a371b07a0981 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "../dmaengine.h" | 39 | #include "../dmaengine.h" |
40 | 40 | ||
41 | static char *chanerr_str[] = { | 41 | static char *chanerr_str[] = { |
42 | "DMA Transfer Source Address Error", | ||
42 | "DMA Transfer Destination Address Error", | 43 | "DMA Transfer Destination Address Error", |
43 | "Next Descriptor Address Error", | 44 | "Next Descriptor Address Error", |
44 | "Descriptor Error", | 45 | "Descriptor Error", |
@@ -66,7 +67,6 @@ static char *chanerr_str[] = { | |||
66 | "Result Guard Tag verification Error", | 67 | "Result Guard Tag verification Error", |
67 | "Result Application Tag verification Error", | 68 | "Result Application Tag verification Error", |
68 | "Result Reference Tag verification Error", | 69 | "Result Reference Tag verification Error", |
69 | NULL | ||
70 | }; | 70 | }; |
71 | 71 | ||
72 | static void ioat_eh(struct ioatdma_chan *ioat_chan); | 72 | static void ioat_eh(struct ioatdma_chan *ioat_chan); |
@@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr) | |||
75 | { | 75 | { |
76 | int i; | 76 | int i; |
77 | 77 | ||
78 | for (i = 0; i < 32; i++) { | 78 | for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) { |
79 | if ((chanerr >> i) & 1) { | 79 | if ((chanerr >> i) & 1) { |
80 | if (chanerr_str[i]) { | 80 | dev_err(to_dev(ioat_chan), "Err(%d): %s\n", |
81 | dev_err(to_dev(ioat_chan), "Err(%d): %s\n", | 81 | i, chanerr_str[i]); |
82 | i, chanerr_str[i]); | ||
83 | } else | ||
84 | break; | ||
85 | } | 82 | } |
86 | } | 83 | } |
87 | } | 84 | } |
@@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags) | |||
341 | { | 338 | { |
342 | struct ioat_dma_descriptor *hw; | 339 | struct ioat_dma_descriptor *hw; |
343 | struct ioat_ring_ent *desc; | 340 | struct ioat_ring_ent *desc; |
344 | struct ioatdma_device *ioat_dma; | ||
345 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | 341 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); |
346 | int chunk; | 342 | int chunk; |
347 | dma_addr_t phys; | 343 | dma_addr_t phys; |
348 | u8 *pos; | 344 | u8 *pos; |
349 | off_t offs; | 345 | off_t offs; |
350 | 346 | ||
351 | ioat_dma = to_ioatdma_device(chan->device); | ||
352 | |||
353 | chunk = idx / IOAT_DESCS_PER_2M; | 347 | chunk = idx / IOAT_DESCS_PER_2M; |
354 | idx &= (IOAT_DESCS_PER_2M - 1); | 348 | idx &= (IOAT_DESCS_PER_2M - 1); |
355 | offs = idx * IOAT_DESC_SZ; | 349 | offs = idx * IOAT_DESC_SZ; |
@@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | |||
614 | 608 | ||
615 | tx = &desc->txd; | 609 | tx = &desc->txd; |
616 | if (tx->cookie) { | 610 | if (tx->cookie) { |
617 | struct dmaengine_result res; | ||
618 | |||
619 | dma_cookie_complete(tx); | 611 | dma_cookie_complete(tx); |
620 | dma_descriptor_unmap(tx); | 612 | dma_descriptor_unmap(tx); |
621 | res.result = DMA_TRANS_NOERROR; | ||
622 | dmaengine_desc_get_callback_invoke(tx, NULL); | 613 | dmaengine_desc_get_callback_invoke(tx, NULL); |
623 | tx->callback = NULL; | 614 | tx->callback = NULL; |
624 | tx->callback_result = NULL; | 615 | tx->callback_result = NULL; |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 015f7110b96d..90eddd9f07e4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) | |||
340 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | 340 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
341 | if (dma_mapping_error(dev, dma_src)) { | 341 | if (dma_mapping_error(dev, dma_src)) { |
342 | dev_err(dev, "mapping src buffer failed\n"); | 342 | dev_err(dev, "mapping src buffer failed\n"); |
343 | err = -ENOMEM; | ||
343 | goto free_resources; | 344 | goto free_resources; |
344 | } | 345 | } |
345 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 346 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
346 | if (dma_mapping_error(dev, dma_dest)) { | 347 | if (dma_mapping_error(dev, dma_dest)) { |
347 | dev_err(dev, "mapping dest buffer failed\n"); | 348 | dev_err(dev, "mapping dest buffer failed\n"); |
349 | err = -ENOMEM; | ||
348 | goto unmap_src; | 350 | goto unmap_src; |
349 | } | 351 | } |
350 | flags = DMA_PREP_INTERRUPT; | 352 | flags = DMA_PREP_INTERRUPT; |
@@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
827 | op = IOAT_OP_XOR; | 829 | op = IOAT_OP_XOR; |
828 | 830 | ||
829 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 831 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
830 | if (dma_mapping_error(dev, dest_dma)) | 832 | if (dma_mapping_error(dev, dest_dma)) { |
833 | err = -ENOMEM; | ||
831 | goto free_resources; | 834 | goto free_resources; |
835 | } | ||
832 | 836 | ||
833 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 837 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) |
834 | dma_srcs[i] = DMA_ERROR_CODE; | 838 | dma_srcs[i] = DMA_ERROR_CODE; |
835 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { | 839 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { |
836 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | 840 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, |
837 | DMA_TO_DEVICE); | 841 | DMA_TO_DEVICE); |
838 | if (dma_mapping_error(dev, dma_srcs[i])) | 842 | if (dma_mapping_error(dev, dma_srcs[i])) { |
843 | err = -ENOMEM; | ||
839 | goto dma_unmap; | 844 | goto dma_unmap; |
845 | } | ||
840 | } | 846 | } |
841 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 847 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
842 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | 848 | IOAT_NUM_SRC_TEST, PAGE_SIZE, |
@@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
904 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | 910 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { |
905 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 911 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
906 | DMA_TO_DEVICE); | 912 | DMA_TO_DEVICE); |
907 | if (dma_mapping_error(dev, dma_srcs[i])) | 913 | if (dma_mapping_error(dev, dma_srcs[i])) { |
914 | err = -ENOMEM; | ||
908 | goto dma_unmap; | 915 | goto dma_unmap; |
916 | } | ||
909 | } | 917 | } |
910 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 918 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
911 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 919 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
@@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
957 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | 965 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { |
958 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 966 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
959 | DMA_TO_DEVICE); | 967 | DMA_TO_DEVICE); |
960 | if (dma_mapping_error(dev, dma_srcs[i])) | 968 | if (dma_mapping_error(dev, dma_srcs[i])) { |
969 | err = -ENOMEM; | ||
961 | goto dma_unmap; | 970 | goto dma_unmap; |
971 | } | ||
962 | } | 972 | } |
963 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 973 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
964 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 974 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
@@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1071 | struct dma_device *dma; | 1081 | struct dma_device *dma; |
1072 | struct dma_chan *c; | 1082 | struct dma_chan *c; |
1073 | struct ioatdma_chan *ioat_chan; | 1083 | struct ioatdma_chan *ioat_chan; |
1074 | bool is_raid_device = false; | ||
1075 | int err; | 1084 | int err; |
1076 | u16 val16; | 1085 | u16 val16; |
1077 | 1086 | ||
@@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1095 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | 1104 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); |
1096 | 1105 | ||
1097 | if (ioat_dma->cap & IOAT_CAP_XOR) { | 1106 | if (ioat_dma->cap & IOAT_CAP_XOR) { |
1098 | is_raid_device = true; | ||
1099 | dma->max_xor = 8; | 1107 | dma->max_xor = 8; |
1100 | 1108 | ||
1101 | dma_cap_set(DMA_XOR, dma->cap_mask); | 1109 | dma_cap_set(DMA_XOR, dma->cap_mask); |
@@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1106 | } | 1114 | } |
1107 | 1115 | ||
1108 | if (ioat_dma->cap & IOAT_CAP_PQ) { | 1116 | if (ioat_dma->cap & IOAT_CAP_PQ) { |
1109 | is_raid_device = true; | ||
1110 | 1117 | ||
1111 | dma->device_prep_dma_pq = ioat_prep_pq; | 1118 | dma->device_prep_dma_pq = ioat_prep_pq; |
1112 | dma->device_prep_dma_pq_val = ioat_prep_pq_val; | 1119 | dma->device_prep_dma_pq_val = ioat_prep_pq_val; |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index aabcb7934b05..01e25c68dd5a 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
@@ -458,13 +458,12 @@ static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num, | |||
458 | if (!ds) | 458 | if (!ds) |
459 | return NULL; | 459 | return NULL; |
460 | 460 | ||
461 | ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); | 461 | ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); |
462 | if (!ds->desc_hw) { | 462 | if (!ds->desc_hw) { |
463 | dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); | 463 | dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); |
464 | kfree(ds); | 464 | kfree(ds); |
465 | return NULL; | 465 | return NULL; |
466 | } | 466 | } |
467 | memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num); | ||
468 | ds->desc_num = num; | 467 | ds->desc_num = num; |
469 | return ds; | 468 | return ds; |
470 | } | 469 | } |
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 818255844a3c..5ba5714d0b7c 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c | |||
@@ -554,9 +554,7 @@ static int mic_dma_init(struct mic_dma_device *mic_dma_dev, | |||
554 | int ret; | 554 | int ret; |
555 | 555 | ||
556 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | 556 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { |
557 | unsigned long data; | ||
558 | ch = &mic_dma_dev->mic_ch[i]; | 557 | ch = &mic_dma_dev->mic_ch[i]; |
559 | data = (unsigned long)ch; | ||
560 | ch->ch_num = i; | 558 | ch->ch_num = i; |
561 | ch->owner = owner; | 559 | ch->owner = owner; |
562 | spin_lock_init(&ch->cleanup_lock); | 560 | spin_lock_init(&ch->cleanup_lock); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 23f75285a4d9..0cb951b743a6 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, | |||
68 | hw_desc->byte_count = byte_count; | 68 | hw_desc->byte_count = byte_count; |
69 | } | 69 | } |
70 | 70 | ||
71 | /* Populate the descriptor */ | ||
72 | static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc, | ||
73 | dma_addr_t dma_src, dma_addr_t dma_dst, | ||
74 | u32 len, struct mv_xor_desc_slot *prev) | ||
75 | { | ||
76 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
77 | |||
78 | hw_desc->status = XOR_DESC_DMA_OWNED; | ||
79 | hw_desc->phy_next_desc = 0; | ||
80 | /* Configure for XOR with only one src address -> MEMCPY */ | ||
81 | hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0); | ||
82 | hw_desc->phy_dest_addr = dma_dst; | ||
83 | hw_desc->phy_src_addr[0] = dma_src; | ||
84 | hw_desc->byte_count = len; | ||
85 | |||
86 | if (prev) { | ||
87 | struct mv_xor_desc *hw_prev = prev->hw_desc; | ||
88 | |||
89 | hw_prev->phy_next_desc = desc->async_tx.phys; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc) | ||
94 | { | ||
95 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
96 | |||
97 | /* Enable end-of-descriptor interrupt */ | ||
98 | hw_desc->desc_command |= XOR_DESC_EOD_INT_EN; | ||
99 | } | ||
100 | |||
71 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) | 101 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) |
72 | { | 102 | { |
73 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 103 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
@@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) | |||
228 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | 258 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
229 | node) { | 259 | node) { |
230 | 260 | ||
231 | if (async_tx_test_ack(&iter->async_tx)) | 261 | if (async_tx_test_ack(&iter->async_tx)) { |
232 | list_move_tail(&iter->node, &mv_chan->free_slots); | 262 | list_move_tail(&iter->node, &mv_chan->free_slots); |
263 | if (!list_empty(&iter->sg_tx_list)) { | ||
264 | list_splice_tail_init(&iter->sg_tx_list, | ||
265 | &mv_chan->free_slots); | ||
266 | } | ||
267 | } | ||
233 | } | 268 | } |
234 | return 0; | 269 | return 0; |
235 | } | 270 | } |
@@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc, | |||
244 | /* the client is allowed to attach dependent operations | 279 | /* the client is allowed to attach dependent operations |
245 | * until 'ack' is set | 280 | * until 'ack' is set |
246 | */ | 281 | */ |
247 | if (!async_tx_test_ack(&desc->async_tx)) | 282 | if (!async_tx_test_ack(&desc->async_tx)) { |
248 | /* move this slot to the completed_slots */ | 283 | /* move this slot to the completed_slots */ |
249 | list_move_tail(&desc->node, &mv_chan->completed_slots); | 284 | list_move_tail(&desc->node, &mv_chan->completed_slots); |
250 | else | 285 | if (!list_empty(&desc->sg_tx_list)) { |
286 | list_splice_tail_init(&desc->sg_tx_list, | ||
287 | &mv_chan->completed_slots); | ||
288 | } | ||
289 | } else { | ||
251 | list_move_tail(&desc->node, &mv_chan->free_slots); | 290 | list_move_tail(&desc->node, &mv_chan->free_slots); |
291 | if (!list_empty(&desc->sg_tx_list)) { | ||
292 | list_splice_tail_init(&desc->sg_tx_list, | ||
293 | &mv_chan->free_slots); | ||
294 | } | ||
295 | } | ||
252 | 296 | ||
253 | return 0; | 297 | return 0; |
254 | } | 298 | } |
@@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
450 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 494 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
451 | slot->async_tx.tx_submit = mv_xor_tx_submit; | 495 | slot->async_tx.tx_submit = mv_xor_tx_submit; |
452 | INIT_LIST_HEAD(&slot->node); | 496 | INIT_LIST_HEAD(&slot->node); |
497 | INIT_LIST_HEAD(&slot->sg_tx_list); | ||
453 | dma_desc = mv_chan->dma_desc_pool; | 498 | dma_desc = mv_chan->dma_desc_pool; |
454 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | 499 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; |
455 | slot->idx = idx++; | 500 | slot->idx = idx++; |
@@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
617 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | 662 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); |
618 | } | 663 | } |
619 | 664 | ||
665 | /** | ||
666 | * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction | ||
667 | * @chan: DMA channel | ||
668 | * @dst_sg: Destination scatter list | ||
669 | * @dst_sg_len: Number of entries in destination scatter list | ||
670 | * @src_sg: Source scatter list | ||
671 | * @src_sg_len: Number of entries in source scatter list | ||
672 | * @flags: transfer ack flags | ||
673 | * | ||
674 | * Return: Async transaction descriptor on success and NULL on failure | ||
675 | */ | ||
676 | static struct dma_async_tx_descriptor * | ||
677 | mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg, | ||
678 | unsigned int dst_sg_len, struct scatterlist *src_sg, | ||
679 | unsigned int src_sg_len, unsigned long flags) | ||
680 | { | ||
681 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
682 | struct mv_xor_desc_slot *new; | ||
683 | struct mv_xor_desc_slot *first = NULL; | ||
684 | struct mv_xor_desc_slot *prev = NULL; | ||
685 | size_t len, dst_avail, src_avail; | ||
686 | dma_addr_t dma_dst, dma_src; | ||
687 | int desc_cnt = 0; | ||
688 | int ret; | ||
689 | |||
690 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
691 | "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n", | ||
692 | __func__, dst_sg_len, src_sg_len, flags); | ||
693 | |||
694 | dst_avail = sg_dma_len(dst_sg); | ||
695 | src_avail = sg_dma_len(src_sg); | ||
696 | |||
697 | /* Run until we are out of scatterlist entries */ | ||
698 | while (true) { | ||
699 | /* Allocate and populate the descriptor */ | ||
700 | desc_cnt++; | ||
701 | new = mv_chan_alloc_slot(mv_chan); | ||
702 | if (!new) { | ||
703 | dev_err(mv_chan_to_devp(mv_chan), | ||
704 | "Out of descriptors (desc_cnt=%d)!\n", | ||
705 | desc_cnt); | ||
706 | goto err; | ||
707 | } | ||
708 | |||
709 | len = min_t(size_t, src_avail, dst_avail); | ||
710 | len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT); | ||
711 | if (len == 0) | ||
712 | goto fetch; | ||
713 | |||
714 | if (len < MV_XOR_MIN_BYTE_COUNT) { | ||
715 | dev_err(mv_chan_to_devp(mv_chan), | ||
716 | "Transfer size of %zu too small!\n", len); | ||
717 | goto err; | ||
718 | } | ||
719 | |||
720 | dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - | ||
721 | dst_avail; | ||
722 | dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - | ||
723 | src_avail; | ||
724 | |||
725 | /* Check if a new window needs to get added for 'dst' */ | ||
726 | ret = mv_xor_add_io_win(mv_chan, dma_dst); | ||
727 | if (ret) | ||
728 | goto err; | ||
729 | |||
730 | /* Check if a new window needs to get added for 'src' */ | ||
731 | ret = mv_xor_add_io_win(mv_chan, dma_src); | ||
732 | if (ret) | ||
733 | goto err; | ||
734 | |||
735 | /* Populate the descriptor */ | ||
736 | mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev); | ||
737 | prev = new; | ||
738 | dst_avail -= len; | ||
739 | src_avail -= len; | ||
740 | |||
741 | if (!first) | ||
742 | first = new; | ||
743 | else | ||
744 | list_move_tail(&new->node, &first->sg_tx_list); | ||
745 | |||
746 | fetch: | ||
747 | /* Fetch the next dst scatterlist entry */ | ||
748 | if (dst_avail == 0) { | ||
749 | if (dst_sg_len == 0) | ||
750 | break; | ||
751 | |||
752 | /* Fetch the next entry: if there are no more: done */ | ||
753 | dst_sg = sg_next(dst_sg); | ||
754 | if (dst_sg == NULL) | ||
755 | break; | ||
756 | |||
757 | dst_sg_len--; | ||
758 | dst_avail = sg_dma_len(dst_sg); | ||
759 | } | ||
760 | |||
761 | /* Fetch the next src scatterlist entry */ | ||
762 | if (src_avail == 0) { | ||
763 | if (src_sg_len == 0) | ||
764 | break; | ||
765 | |||
766 | /* Fetch the next entry: if there are no more: done */ | ||
767 | src_sg = sg_next(src_sg); | ||
768 | if (src_sg == NULL) | ||
769 | break; | ||
770 | |||
771 | src_sg_len--; | ||
772 | src_avail = sg_dma_len(src_sg); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | /* Set the EOD flag in the last descriptor */ | ||
777 | mv_xor_desc_config_eod(new); | ||
778 | first->async_tx.flags = flags; | ||
779 | |||
780 | return &first->async_tx; | ||
781 | |||
782 | err: | ||
783 | /* Cleanup: Move all descriptors back into the free list */ | ||
784 | spin_lock_bh(&mv_chan->lock); | ||
785 | mv_desc_clean_slot(first, mv_chan); | ||
786 | spin_unlock_bh(&mv_chan->lock); | ||
787 | |||
788 | return NULL; | ||
789 | } | ||
790 | |||
620 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | 791 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
621 | { | 792 | { |
622 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 793 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
@@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1083 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | 1254 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; |
1084 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1255 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1085 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1256 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1257 | if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) | ||
1258 | dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg; | ||
1086 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1259 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1087 | dma_dev->max_xor = 8; | 1260 | dma_dev->max_xor = 8; |
1088 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 1261 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
@@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1132 | goto err_free_irq; | 1305 | goto err_free_irq; |
1133 | } | 1306 | } |
1134 | 1307 | ||
1135 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", | 1308 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n", |
1136 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", | 1309 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", |
1137 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1310 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1138 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1311 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1312 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "", | ||
1139 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1313 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1140 | 1314 | ||
1141 | dma_async_device_register(dma_dev); | 1315 | dma_async_device_register(dma_dev); |
@@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1378 | 1552 | ||
1379 | dma_cap_zero(cap_mask); | 1553 | dma_cap_zero(cap_mask); |
1380 | dma_cap_set(DMA_MEMCPY, cap_mask); | 1554 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1555 | dma_cap_set(DMA_SG, cap_mask); | ||
1381 | dma_cap_set(DMA_XOR, cap_mask); | 1556 | dma_cap_set(DMA_XOR, cap_mask); |
1382 | dma_cap_set(DMA_INTERRUPT, cap_mask); | 1557 | dma_cap_set(DMA_INTERRUPT, cap_mask); |
1383 | 1558 | ||
@@ -1455,12 +1630,7 @@ static struct platform_driver mv_xor_driver = { | |||
1455 | }, | 1630 | }, |
1456 | }; | 1631 | }; |
1457 | 1632 | ||
1458 | 1633 | builtin_platform_driver(mv_xor_driver); | |
1459 | static int __init mv_xor_init(void) | ||
1460 | { | ||
1461 | return platform_driver_register(&mv_xor_driver); | ||
1462 | } | ||
1463 | device_initcall(mv_xor_init); | ||
1464 | 1634 | ||
1465 | /* | 1635 | /* |
1466 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); | 1636 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 88eeab222a23..cf921dd6af73 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -148,6 +148,7 @@ struct mv_xor_chan { | |||
148 | */ | 148 | */ |
149 | struct mv_xor_desc_slot { | 149 | struct mv_xor_desc_slot { |
150 | struct list_head node; | 150 | struct list_head node; |
151 | struct list_head sg_tx_list; | ||
151 | enum dma_transaction_type type; | 152 | enum dma_transaction_type type; |
152 | void *hw_desc; | 153 | void *hw_desc; |
153 | u16 idx; | 154 | u16 idx; |
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 09de71519d37..3f45b9bdf201 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c | |||
@@ -225,6 +225,8 @@ struct nbpf_channel { | |||
225 | struct nbpf_device { | 225 | struct nbpf_device { |
226 | struct dma_device dma_dev; | 226 | struct dma_device dma_dev; |
227 | void __iomem *base; | 227 | void __iomem *base; |
228 | u32 max_burst_mem_read; | ||
229 | u32 max_burst_mem_write; | ||
228 | struct clk *clk; | 230 | struct clk *clk; |
229 | const struct nbpf_config *config; | 231 | const struct nbpf_config *config; |
230 | unsigned int eirq; | 232 | unsigned int eirq; |
@@ -425,10 +427,33 @@ static void nbpf_chan_configure(struct nbpf_channel *chan) | |||
425 | nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); | 427 | nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); |
426 | } | 428 | } |
427 | 429 | ||
428 | static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size) | 430 | static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size, |
431 | enum dma_transfer_direction direction) | ||
429 | { | 432 | { |
433 | int max_burst = nbpf->config->buffer_size * 8; | ||
434 | |||
435 | if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) { | ||
436 | switch (direction) { | ||
437 | case DMA_MEM_TO_MEM: | ||
438 | max_burst = min_not_zero(nbpf->max_burst_mem_read, | ||
439 | nbpf->max_burst_mem_write); | ||
440 | break; | ||
441 | case DMA_MEM_TO_DEV: | ||
442 | if (nbpf->max_burst_mem_read) | ||
443 | max_burst = nbpf->max_burst_mem_read; | ||
444 | break; | ||
445 | case DMA_DEV_TO_MEM: | ||
446 | if (nbpf->max_burst_mem_write) | ||
447 | max_burst = nbpf->max_burst_mem_write; | ||
448 | break; | ||
449 | case DMA_DEV_TO_DEV: | ||
450 | default: | ||
451 | break; | ||
452 | } | ||
453 | } | ||
454 | |||
430 | /* Maximum supported bursts depend on the buffer size */ | 455 | /* Maximum supported bursts depend on the buffer size */ |
431 | return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8)); | 456 | return min_t(int, __ffs(size), ilog2(max_burst)); |
432 | } | 457 | } |
433 | 458 | ||
434 | static size_t nbpf_xfer_size(struct nbpf_device *nbpf, | 459 | static size_t nbpf_xfer_size(struct nbpf_device *nbpf, |
@@ -458,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf, | |||
458 | size = burst; | 483 | size = burst; |
459 | } | 484 | } |
460 | 485 | ||
461 | return nbpf_xfer_ds(nbpf, size); | 486 | return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE); |
462 | } | 487 | } |
463 | 488 | ||
464 | /* | 489 | /* |
@@ -507,7 +532,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc, | |||
507 | * transfers we enable the SBE bit and terminate the transfer in our | 532 | * transfers we enable the SBE bit and terminate the transfer in our |
508 | * .device_pause handler. | 533 | * .device_pause handler. |
509 | */ | 534 | */ |
510 | mem_xfer = nbpf_xfer_ds(chan->nbpf, size); | 535 | mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction); |
511 | 536 | ||
512 | switch (direction) { | 537 | switch (direction) { |
513 | case DMA_DEV_TO_MEM: | 538 | case DMA_DEV_TO_MEM: |
@@ -1313,6 +1338,11 @@ static int nbpf_probe(struct platform_device *pdev) | |||
1313 | if (IS_ERR(nbpf->clk)) | 1338 | if (IS_ERR(nbpf->clk)) |
1314 | return PTR_ERR(nbpf->clk); | 1339 | return PTR_ERR(nbpf->clk); |
1315 | 1340 | ||
1341 | of_property_read_u32(np, "max-burst-mem-read", | ||
1342 | &nbpf->max_burst_mem_read); | ||
1343 | of_property_read_u32(np, "max-burst-mem-write", | ||
1344 | &nbpf->max_burst_mem_write); | ||
1345 | |||
1316 | nbpf->config = cfg; | 1346 | nbpf->config = cfg; |
1317 | 1347 | ||
1318 | for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { | 1348 | for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 7ca27d4b1c54..ac68666cd3f4 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -166,6 +166,9 @@ enum { | |||
166 | CSDP_DST_BURST_16 = 1 << 14, | 166 | CSDP_DST_BURST_16 = 1 << 14, |
167 | CSDP_DST_BURST_32 = 2 << 14, | 167 | CSDP_DST_BURST_32 = 2 << 14, |
168 | CSDP_DST_BURST_64 = 3 << 14, | 168 | CSDP_DST_BURST_64 = 3 << 14, |
169 | CSDP_WRITE_NON_POSTED = 0 << 16, | ||
170 | CSDP_WRITE_POSTED = 1 << 16, | ||
171 | CSDP_WRITE_LAST_NON_POSTED = 2 << 16, | ||
169 | 172 | ||
170 | CICR_TOUT_IE = BIT(0), /* OMAP1 only */ | 173 | CICR_TOUT_IE = BIT(0), /* OMAP1 only */ |
171 | CICR_DROP_IE = BIT(1), | 174 | CICR_DROP_IE = BIT(1), |
@@ -422,7 +425,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) | |||
422 | c->running = true; | 425 | c->running = true; |
423 | } | 426 | } |
424 | 427 | ||
425 | static void omap_dma_stop(struct omap_chan *c) | 428 | static void omap_dma_drain_chan(struct omap_chan *c) |
429 | { | ||
430 | int i; | ||
431 | u32 val; | ||
432 | |||
433 | /* Wait for sDMA FIFO to drain */ | ||
434 | for (i = 0; ; i++) { | ||
435 | val = omap_dma_chan_read(c, CCR); | ||
436 | if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) | ||
437 | break; | ||
438 | |||
439 | if (i > 100) | ||
440 | break; | ||
441 | |||
442 | udelay(5); | ||
443 | } | ||
444 | |||
445 | if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) | ||
446 | dev_err(c->vc.chan.device->dev, | ||
447 | "DMA drain did not complete on lch %d\n", | ||
448 | c->dma_ch); | ||
449 | } | ||
450 | |||
451 | static int omap_dma_stop(struct omap_chan *c) | ||
426 | { | 452 | { |
427 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | 453 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); |
428 | uint32_t val; | 454 | uint32_t val; |
@@ -435,7 +461,6 @@ static void omap_dma_stop(struct omap_chan *c) | |||
435 | val = omap_dma_chan_read(c, CCR); | 461 | val = omap_dma_chan_read(c, CCR); |
436 | if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { | 462 | if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { |
437 | uint32_t sysconfig; | 463 | uint32_t sysconfig; |
438 | unsigned i; | ||
439 | 464 | ||
440 | sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); | 465 | sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); |
441 | val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; | 466 | val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; |
@@ -446,27 +471,19 @@ static void omap_dma_stop(struct omap_chan *c) | |||
446 | val &= ~CCR_ENABLE; | 471 | val &= ~CCR_ENABLE; |
447 | omap_dma_chan_write(c, CCR, val); | 472 | omap_dma_chan_write(c, CCR, val); |
448 | 473 | ||
449 | /* Wait for sDMA FIFO to drain */ | 474 | if (!(c->ccr & CCR_BUFFERING_DISABLE)) |
450 | for (i = 0; ; i++) { | 475 | omap_dma_drain_chan(c); |
451 | val = omap_dma_chan_read(c, CCR); | ||
452 | if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) | ||
453 | break; | ||
454 | |||
455 | if (i > 100) | ||
456 | break; | ||
457 | |||
458 | udelay(5); | ||
459 | } | ||
460 | |||
461 | if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) | ||
462 | dev_err(c->vc.chan.device->dev, | ||
463 | "DMA drain did not complete on lch %d\n", | ||
464 | c->dma_ch); | ||
465 | 476 | ||
466 | omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); | 477 | omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); |
467 | } else { | 478 | } else { |
479 | if (!(val & CCR_ENABLE)) | ||
480 | return -EINVAL; | ||
481 | |||
468 | val &= ~CCR_ENABLE; | 482 | val &= ~CCR_ENABLE; |
469 | omap_dma_chan_write(c, CCR, val); | 483 | omap_dma_chan_write(c, CCR, val); |
484 | |||
485 | if (!(c->ccr & CCR_BUFFERING_DISABLE)) | ||
486 | omap_dma_drain_chan(c); | ||
470 | } | 487 | } |
471 | 488 | ||
472 | mb(); | 489 | mb(); |
@@ -481,8 +498,8 @@ static void omap_dma_stop(struct omap_chan *c) | |||
481 | 498 | ||
482 | omap_dma_chan_write(c, CLNK_CTRL, val); | 499 | omap_dma_chan_write(c, CLNK_CTRL, val); |
483 | } | 500 | } |
484 | |||
485 | c->running = false; | 501 | c->running = false; |
502 | return 0; | ||
486 | } | 503 | } |
487 | 504 | ||
488 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) | 505 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) |
@@ -836,6 +853,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | |||
836 | } else { | 853 | } else { |
837 | txstate->residue = 0; | 854 | txstate->residue = 0; |
838 | } | 855 | } |
856 | if (ret == DMA_IN_PROGRESS && c->paused) | ||
857 | ret = DMA_PAUSED; | ||
839 | spin_unlock_irqrestore(&c->vc.lock, flags); | 858 | spin_unlock_irqrestore(&c->vc.lock, flags); |
840 | 859 | ||
841 | return ret; | 860 | return ret; |
@@ -865,15 +884,18 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
865 | unsigned i, es, en, frame_bytes; | 884 | unsigned i, es, en, frame_bytes; |
866 | bool ll_failed = false; | 885 | bool ll_failed = false; |
867 | u32 burst; | 886 | u32 burst; |
887 | u32 port_window, port_window_bytes; | ||
868 | 888 | ||
869 | if (dir == DMA_DEV_TO_MEM) { | 889 | if (dir == DMA_DEV_TO_MEM) { |
870 | dev_addr = c->cfg.src_addr; | 890 | dev_addr = c->cfg.src_addr; |
871 | dev_width = c->cfg.src_addr_width; | 891 | dev_width = c->cfg.src_addr_width; |
872 | burst = c->cfg.src_maxburst; | 892 | burst = c->cfg.src_maxburst; |
893 | port_window = c->cfg.src_port_window_size; | ||
873 | } else if (dir == DMA_MEM_TO_DEV) { | 894 | } else if (dir == DMA_MEM_TO_DEV) { |
874 | dev_addr = c->cfg.dst_addr; | 895 | dev_addr = c->cfg.dst_addr; |
875 | dev_width = c->cfg.dst_addr_width; | 896 | dev_width = c->cfg.dst_addr_width; |
876 | burst = c->cfg.dst_maxburst; | 897 | burst = c->cfg.dst_maxburst; |
898 | port_window = c->cfg.dst_port_window_size; | ||
877 | } else { | 899 | } else { |
878 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 900 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); |
879 | return NULL; | 901 | return NULL; |
@@ -894,6 +916,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
894 | return NULL; | 916 | return NULL; |
895 | } | 917 | } |
896 | 918 | ||
919 | /* When the port_window is used, one frame must cover the window */ | ||
920 | if (port_window) { | ||
921 | burst = port_window; | ||
922 | port_window_bytes = port_window * es_bytes[es]; | ||
923 | } | ||
924 | |||
897 | /* Now allocate and setup the descriptor. */ | 925 | /* Now allocate and setup the descriptor. */ |
898 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); | 926 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); |
899 | if (!d) | 927 | if (!d) |
@@ -905,11 +933,45 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
905 | 933 | ||
906 | d->ccr = c->ccr | CCR_SYNC_FRAME; | 934 | d->ccr = c->ccr | CCR_SYNC_FRAME; |
907 | if (dir == DMA_DEV_TO_MEM) { | 935 | if (dir == DMA_DEV_TO_MEM) { |
908 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; | ||
909 | d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; | 936 | d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; |
937 | |||
938 | d->ccr |= CCR_DST_AMODE_POSTINC; | ||
939 | if (port_window) { | ||
940 | d->ccr |= CCR_SRC_AMODE_DBLIDX; | ||
941 | d->ei = 1; | ||
942 | /* | ||
943 | * One frame covers the port_window and by configure | ||
944 | * the source frame index to be -1 * (port_window - 1) | ||
945 | * we instruct the sDMA that after a frame is processed | ||
946 | * it should move back to the start of the window. | ||
947 | */ | ||
948 | d->fi = -(port_window_bytes - 1); | ||
949 | |||
950 | if (port_window_bytes >= 64) | ||
951 | d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; | ||
952 | else if (port_window_bytes >= 32) | ||
953 | d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED; | ||
954 | else if (port_window_bytes >= 16) | ||
955 | d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED; | ||
956 | } else { | ||
957 | d->ccr |= CCR_SRC_AMODE_CONSTANT; | ||
958 | } | ||
910 | } else { | 959 | } else { |
911 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; | ||
912 | d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; | 960 | d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; |
961 | |||
962 | d->ccr |= CCR_SRC_AMODE_POSTINC; | ||
963 | if (port_window) { | ||
964 | d->ccr |= CCR_DST_AMODE_DBLIDX; | ||
965 | |||
966 | if (port_window_bytes >= 64) | ||
967 | d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; | ||
968 | else if (port_window_bytes >= 32) | ||
969 | d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED; | ||
970 | else if (port_window_bytes >= 16) | ||
971 | d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED; | ||
972 | } else { | ||
973 | d->ccr |= CCR_DST_AMODE_CONSTANT; | ||
974 | } | ||
913 | } | 975 | } |
914 | 976 | ||
915 | d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; | 977 | d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; |
@@ -927,6 +989,9 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
927 | d->ccr |= CCR_TRIGGER_SRC; | 989 | d->ccr |= CCR_TRIGGER_SRC; |
928 | 990 | ||
929 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; | 991 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
992 | |||
993 | if (port_window) | ||
994 | d->csdp |= CSDP_WRITE_LAST_NON_POSTED; | ||
930 | } | 995 | } |
931 | if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) | 996 | if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) |
932 | d->clnk_ctrl = c->dma_ch; | 997 | d->clnk_ctrl = c->dma_ch; |
@@ -952,6 +1017,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
952 | osg->addr = sg_dma_address(sgent); | 1017 | osg->addr = sg_dma_address(sgent); |
953 | osg->en = en; | 1018 | osg->en = en; |
954 | osg->fn = sg_dma_len(sgent) / frame_bytes; | 1019 | osg->fn = sg_dma_len(sgent) / frame_bytes; |
1020 | if (port_window && dir == DMA_MEM_TO_DEV) { | ||
1021 | osg->ei = 1; | ||
1022 | /* | ||
1023 | * One frame covers the port_window and by configure | ||
1024 | * the source frame index to be -1 * (port_window - 1) | ||
1025 | * we instruct the sDMA that after a frame is processed | ||
1026 | * it should move back to the start of the window. | ||
1027 | */ | ||
1028 | osg->fi = -(port_window_bytes - 1); | ||
1029 | } | ||
955 | 1030 | ||
956 | if (d->using_ll) { | 1031 | if (d->using_ll) { |
957 | osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC, | 1032 | osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC, |
@@ -1247,10 +1322,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan) | |||
1247 | omap_dma_stop(c); | 1322 | omap_dma_stop(c); |
1248 | } | 1323 | } |
1249 | 1324 | ||
1250 | if (c->cyclic) { | 1325 | c->cyclic = false; |
1251 | c->cyclic = false; | 1326 | c->paused = false; |
1252 | c->paused = false; | ||
1253 | } | ||
1254 | 1327 | ||
1255 | vchan_get_all_descriptors(&c->vc, &head); | 1328 | vchan_get_all_descriptors(&c->vc, &head); |
1256 | spin_unlock_irqrestore(&c->vc.lock, flags); | 1329 | spin_unlock_irqrestore(&c->vc.lock, flags); |
@@ -1269,28 +1342,66 @@ static void omap_dma_synchronize(struct dma_chan *chan) | |||
1269 | static int omap_dma_pause(struct dma_chan *chan) | 1342 | static int omap_dma_pause(struct dma_chan *chan) |
1270 | { | 1343 | { |
1271 | struct omap_chan *c = to_omap_dma_chan(chan); | 1344 | struct omap_chan *c = to_omap_dma_chan(chan); |
1345 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | ||
1346 | unsigned long flags; | ||
1347 | int ret = -EINVAL; | ||
1348 | bool can_pause = false; | ||
1272 | 1349 | ||
1273 | /* Pause/Resume only allowed with cyclic mode */ | 1350 | spin_lock_irqsave(&od->irq_lock, flags); |
1274 | if (!c->cyclic) | ||
1275 | return -EINVAL; | ||
1276 | 1351 | ||
1277 | if (!c->paused) { | 1352 | if (!c->desc) |
1278 | omap_dma_stop(c); | 1353 | goto out; |
1279 | c->paused = true; | 1354 | |
1355 | if (c->cyclic) | ||
1356 | can_pause = true; | ||
1357 | |||
1358 | /* | ||
1359 | * We do not allow DMA_MEM_TO_DEV transfers to be paused. | ||
1360 | * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer: | ||
1361 | * "When a channel is disabled during a transfer, the channel undergoes | ||
1362 | * an abort, unless it is hardware-source-synchronized …". | ||
1363 | * A source-synchronised channel is one where the fetching of data is | ||
1364 | * under control of the device. In other words, a device-to-memory | ||
1365 | * transfer. So, a destination-synchronised channel (which would be a | ||
1366 | * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE | ||
1367 | * bit is cleared. | ||
1368 | * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel | ||
1369 | * aborts immediately after completion of current read/write | ||
1370 | * transactions and then the FIFO is cleaned up." The term "cleaned up" | ||
1371 | * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE | ||
1372 | * are both clear _before_ disabling the channel, otherwise data loss | ||
1373 | * will occur. | ||
1374 | * The problem is that if the channel is active, then device activity | ||
1375 | * can result in DMA activity starting between reading those as both | ||
1376 | * clear and the write to DMA_CCR to clear the enable bit hitting the | ||
1377 | * hardware. If the DMA hardware can't drain the data in its FIFO to the | ||
1378 | * destination, then data loss "might" occur (say if we write to an UART | ||
1379 | * and the UART is not accepting any further data). | ||
1380 | */ | ||
1381 | else if (c->desc->dir == DMA_DEV_TO_MEM) | ||
1382 | can_pause = true; | ||
1383 | |||
1384 | if (can_pause && !c->paused) { | ||
1385 | ret = omap_dma_stop(c); | ||
1386 | if (!ret) | ||
1387 | c->paused = true; | ||
1280 | } | 1388 | } |
1389 | out: | ||
1390 | spin_unlock_irqrestore(&od->irq_lock, flags); | ||
1281 | 1391 | ||
1282 | return 0; | 1392 | return ret; |
1283 | } | 1393 | } |
1284 | 1394 | ||
1285 | static int omap_dma_resume(struct dma_chan *chan) | 1395 | static int omap_dma_resume(struct dma_chan *chan) |
1286 | { | 1396 | { |
1287 | struct omap_chan *c = to_omap_dma_chan(chan); | 1397 | struct omap_chan *c = to_omap_dma_chan(chan); |
1398 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | ||
1399 | unsigned long flags; | ||
1400 | int ret = -EINVAL; | ||
1288 | 1401 | ||
1289 | /* Pause/Resume only allowed with cyclic mode */ | 1402 | spin_lock_irqsave(&od->irq_lock, flags); |
1290 | if (!c->cyclic) | ||
1291 | return -EINVAL; | ||
1292 | 1403 | ||
1293 | if (c->paused) { | 1404 | if (c->paused && c->desc) { |
1294 | mb(); | 1405 | mb(); |
1295 | 1406 | ||
1296 | /* Restore channel link register */ | 1407 | /* Restore channel link register */ |
@@ -1298,9 +1409,11 @@ static int omap_dma_resume(struct dma_chan *chan) | |||
1298 | 1409 | ||
1299 | omap_dma_start(c, c->desc); | 1410 | omap_dma_start(c, c->desc); |
1300 | c->paused = false; | 1411 | c->paused = false; |
1412 | ret = 0; | ||
1301 | } | 1413 | } |
1414 | spin_unlock_irqrestore(&od->irq_lock, flags); | ||
1302 | 1415 | ||
1303 | return 0; | 1416 | return ret; |
1304 | } | 1417 | } |
1305 | 1418 | ||
1306 | static int omap_dma_chan_init(struct omap_dmadev *od) | 1419 | static int omap_dma_chan_init(struct omap_dmadev *od) |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index df95727dc2fb..f9028e9d0dfc 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -417,10 +417,8 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
417 | { | 417 | { |
418 | struct pch_dma_desc *desc = to_pd_desc(txd); | 418 | struct pch_dma_desc *desc = to_pd_desc(txd); |
419 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | 419 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); |
420 | dma_cookie_t cookie; | ||
421 | 420 | ||
422 | spin_lock(&pd_chan->lock); | 421 | spin_lock(&pd_chan->lock); |
423 | cookie = dma_cookie_assign(txd); | ||
424 | 422 | ||
425 | if (list_empty(&pd_chan->active_list)) { | 423 | if (list_empty(&pd_chan->active_list)) { |
426 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | 424 | list_add_tail(&desc->desc_node, &pd_chan->active_list); |
@@ -439,9 +437,8 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |||
439 | struct pch_dma *pd = to_pd(chan->device); | 437 | struct pch_dma *pd = to_pd(chan->device); |
440 | dma_addr_t addr; | 438 | dma_addr_t addr; |
441 | 439 | ||
442 | desc = pci_pool_alloc(pd->pool, flags, &addr); | 440 | desc = pci_pool_zalloc(pd->pool, flags, &addr); |
443 | if (desc) { | 441 | if (desc) { |
444 | memset(desc, 0, sizeof(struct pch_dma_desc)); | ||
445 | INIT_LIST_HEAD(&desc->tx_list); | 442 | INIT_LIST_HEAD(&desc->tx_list); |
446 | dma_async_tx_descriptor_init(&desc->txd, chan); | 443 | dma_async_tx_descriptor_init(&desc->txd, chan); |
447 | desc->txd.tx_submit = pd_tx_submit; | 444 | desc->txd.tx_submit = pd_tx_submit; |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 030fe05ed43b..87fd01539fcb 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -570,7 +570,8 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], | |||
570 | 570 | ||
571 | buf[0] = CMD_DMAADDH; | 571 | buf[0] = CMD_DMAADDH; |
572 | buf[0] |= (da << 1); | 572 | buf[0] |= (da << 1); |
573 | *((__le16 *)&buf[1]) = cpu_to_le16(val); | 573 | buf[1] = val; |
574 | buf[2] = val >> 8; | ||
574 | 575 | ||
575 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", | 576 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", |
576 | da == 1 ? "DA" : "SA", val); | 577 | da == 1 ? "DA" : "SA", val); |
@@ -724,7 +725,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | |||
724 | 725 | ||
725 | buf[0] = CMD_DMAMOV; | 726 | buf[0] = CMD_DMAMOV; |
726 | buf[1] = dst; | 727 | buf[1] = dst; |
727 | *((__le32 *)&buf[2]) = cpu_to_le32(val); | 728 | buf[2] = val; |
729 | buf[3] = val >> 8; | ||
730 | buf[4] = val >> 16; | ||
731 | buf[5] = val >> 24; | ||
728 | 732 | ||
729 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | 733 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", |
730 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | 734 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); |
@@ -899,10 +903,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | |||
899 | 903 | ||
900 | buf[0] = CMD_DMAGO; | 904 | buf[0] = CMD_DMAGO; |
901 | buf[0] |= (ns << 1); | 905 | buf[0] |= (ns << 1); |
902 | |||
903 | buf[1] = chan & 0x7; | 906 | buf[1] = chan & 0x7; |
904 | 907 | buf[2] = addr; | |
905 | *((__le32 *)&buf[2]) = cpu_to_le32(addr); | 908 | buf[3] = addr >> 8; |
909 | buf[4] = addr >> 16; | ||
910 | buf[5] = addr >> 24; | ||
906 | 911 | ||
907 | return SZ_DMAGO; | 912 | return SZ_DMAGO; |
908 | } | 913 | } |
@@ -1883,11 +1888,8 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) | |||
1883 | 1888 | ||
1884 | static int pl330_add(struct pl330_dmac *pl330) | 1889 | static int pl330_add(struct pl330_dmac *pl330) |
1885 | { | 1890 | { |
1886 | void __iomem *regs; | ||
1887 | int i, ret; | 1891 | int i, ret; |
1888 | 1892 | ||
1889 | regs = pl330->base; | ||
1890 | |||
1891 | /* Check if we can handle this DMAC */ | 1893 | /* Check if we can handle this DMAC */ |
1892 | if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { | 1894 | if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { |
1893 | dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", | 1895 | dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", |
@@ -2263,6 +2265,11 @@ static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, | |||
2263 | } | 2265 | } |
2264 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); | 2266 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); |
2265 | pm_runtime_put_autosuspend(pl330->ddma.dev); | 2267 | pm_runtime_put_autosuspend(pl330->ddma.dev); |
2268 | |||
2269 | /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */ | ||
2270 | if (!val) | ||
2271 | return 0; | ||
2272 | |||
2266 | return val - addr; | 2273 | return val - addr; |
2267 | } | 2274 | } |
2268 | 2275 | ||
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 3f56f9ca4482..b53fb618bbf6 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -413,15 +413,6 @@ static inline void pxad_init_debugfs(struct pxad_device *pdev) {} | |||
413 | static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {} | 413 | static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {} |
414 | #endif | 414 | #endif |
415 | 415 | ||
416 | /* | ||
417 | * In the transition phase where legacy pxa handling is done at the same time as | ||
418 | * mmp_dma, the DMA physical channel split between the 2 DMA providers is done | ||
419 | * through legacy_reserved. Legacy code reserves DMA channels by settings | ||
420 | * corresponding bits in legacy_reserved. | ||
421 | */ | ||
422 | static u32 legacy_reserved; | ||
423 | static u32 legacy_unavailable; | ||
424 | |||
425 | static struct pxad_phy *lookup_phy(struct pxad_chan *pchan) | 416 | static struct pxad_phy *lookup_phy(struct pxad_chan *pchan) |
426 | { | 417 | { |
427 | int prio, i; | 418 | int prio, i; |
@@ -442,14 +433,10 @@ static struct pxad_phy *lookup_phy(struct pxad_chan *pchan) | |||
442 | for (i = 0; i < pdev->nr_chans; i++) { | 433 | for (i = 0; i < pdev->nr_chans; i++) { |
443 | if (prio != (i & 0xf) >> 2) | 434 | if (prio != (i & 0xf) >> 2) |
444 | continue; | 435 | continue; |
445 | if ((i < 32) && (legacy_reserved & BIT(i))) | ||
446 | continue; | ||
447 | phy = &pdev->phys[i]; | 436 | phy = &pdev->phys[i]; |
448 | if (!phy->vchan) { | 437 | if (!phy->vchan) { |
449 | phy->vchan = pchan; | 438 | phy->vchan = pchan; |
450 | found = phy; | 439 | found = phy; |
451 | if (i < 32) | ||
452 | legacy_unavailable |= BIT(i); | ||
453 | goto out_unlock; | 440 | goto out_unlock; |
454 | } | 441 | } |
455 | } | 442 | } |
@@ -469,7 +456,6 @@ static void pxad_free_phy(struct pxad_chan *chan) | |||
469 | struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); | 456 | struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); |
470 | unsigned long flags; | 457 | unsigned long flags; |
471 | u32 reg; | 458 | u32 reg; |
472 | int i; | ||
473 | 459 | ||
474 | dev_dbg(&chan->vc.chan.dev->device, | 460 | dev_dbg(&chan->vc.chan.dev->device, |
475 | "%s(): freeing\n", __func__); | 461 | "%s(): freeing\n", __func__); |
@@ -483,9 +469,6 @@ static void pxad_free_phy(struct pxad_chan *chan) | |||
483 | } | 469 | } |
484 | 470 | ||
485 | spin_lock_irqsave(&pdev->phy_lock, flags); | 471 | spin_lock_irqsave(&pdev->phy_lock, flags); |
486 | for (i = 0; i < 32; i++) | ||
487 | if (chan->phy == &pdev->phys[i]) | ||
488 | legacy_unavailable &= ~BIT(i); | ||
489 | chan->phy->vchan = NULL; | 472 | chan->phy->vchan = NULL; |
490 | chan->phy = NULL; | 473 | chan->phy = NULL; |
491 | spin_unlock_irqrestore(&pdev->phy_lock, flags); | 474 | spin_unlock_irqrestore(&pdev->phy_lock, flags); |
@@ -739,8 +722,6 @@ static irqreturn_t pxad_int_handler(int irq, void *dev_id) | |||
739 | i = __ffs(dint); | 722 | i = __ffs(dint); |
740 | dint &= (dint - 1); | 723 | dint &= (dint - 1); |
741 | phy = &pdev->phys[i]; | 724 | phy = &pdev->phys[i]; |
742 | if ((i < 32) && (legacy_reserved & BIT(i))) | ||
743 | continue; | ||
744 | if (pxad_chan_handler(irq, phy) == IRQ_HANDLED) | 725 | if (pxad_chan_handler(irq, phy) == IRQ_HANDLED) |
745 | ret = IRQ_HANDLED; | 726 | ret = IRQ_HANDLED; |
746 | } | 727 | } |
@@ -1522,15 +1503,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param) | |||
1522 | } | 1503 | } |
1523 | EXPORT_SYMBOL_GPL(pxad_filter_fn); | 1504 | EXPORT_SYMBOL_GPL(pxad_filter_fn); |
1524 | 1505 | ||
1525 | int pxad_toggle_reserved_channel(int legacy_channel) | ||
1526 | { | ||
1527 | if (legacy_unavailable & (BIT(legacy_channel))) | ||
1528 | return -EBUSY; | ||
1529 | legacy_reserved ^= BIT(legacy_channel); | ||
1530 | return 0; | ||
1531 | } | ||
1532 | EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel); | ||
1533 | |||
1534 | module_platform_driver(pxad_driver); | 1506 | module_platform_driver(pxad_driver); |
1535 | 1507 | ||
1536 | MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver"); | 1508 | MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver"); |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index e244e10a94b5..3c982c96b4b7 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/irq.h> | 56 | #include <linux/irq.h> |
57 | #include <linux/atomic.h> | 57 | #include <linux/atomic.h> |
58 | #include <linux/pm_runtime.h> | 58 | #include <linux/pm_runtime.h> |
59 | #include <linux/msi.h> | ||
59 | 60 | ||
60 | #include "../dmaengine.h" | 61 | #include "../dmaengine.h" |
61 | #include "hidma.h" | 62 | #include "hidma.h" |
@@ -70,6 +71,7 @@ | |||
70 | #define HIDMA_ERR_INFO_SW 0xFF | 71 | #define HIDMA_ERR_INFO_SW 0xFF |
71 | #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 | 72 | #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 |
72 | #define HIDMA_NR_DEFAULT_DESC 10 | 73 | #define HIDMA_NR_DEFAULT_DESC 10 |
74 | #define HIDMA_MSI_INTS 11 | ||
73 | 75 | ||
74 | static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) | 76 | static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) |
75 | { | 77 | { |
@@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | |||
553 | return hidma_ll_inthandler(chirq, lldev); | 555 | return hidma_ll_inthandler(chirq, lldev); |
554 | } | 556 | } |
555 | 557 | ||
558 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | ||
559 | static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg) | ||
560 | { | ||
561 | struct hidma_lldev **lldevp = arg; | ||
562 | struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp); | ||
563 | |||
564 | return hidma_ll_inthandler_msi(chirq, *lldevp, | ||
565 | 1 << (chirq - dmadev->msi_virqbase)); | ||
566 | } | ||
567 | #endif | ||
568 | |||
556 | static ssize_t hidma_show_values(struct device *dev, | 569 | static ssize_t hidma_show_values(struct device *dev, |
557 | struct device_attribute *attr, char *buf) | 570 | struct device_attribute *attr, char *buf) |
558 | { | 571 | { |
@@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev, | |||
567 | return strlen(buf); | 580 | return strlen(buf); |
568 | } | 581 | } |
569 | 582 | ||
570 | static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, | 583 | static inline void hidma_sysfs_uninit(struct hidma_dev *dev) |
571 | int mode) | 584 | { |
585 | device_remove_file(dev->ddev.dev, dev->chid_attrs); | ||
586 | } | ||
587 | |||
588 | static struct device_attribute* | ||
589 | hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode) | ||
572 | { | 590 | { |
573 | struct device_attribute *attrs; | 591 | struct device_attribute *attrs; |
574 | char *name_copy; | 592 | char *name_copy; |
@@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, | |||
576 | attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), | 594 | attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), |
577 | GFP_KERNEL); | 595 | GFP_KERNEL); |
578 | if (!attrs) | 596 | if (!attrs) |
579 | return -ENOMEM; | 597 | return NULL; |
580 | 598 | ||
581 | name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); | 599 | name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); |
582 | if (!name_copy) | 600 | if (!name_copy) |
583 | return -ENOMEM; | 601 | return NULL; |
584 | 602 | ||
585 | attrs->attr.name = name_copy; | 603 | attrs->attr.name = name_copy; |
586 | attrs->attr.mode = mode; | 604 | attrs->attr.mode = mode; |
587 | attrs->show = hidma_show_values; | 605 | attrs->show = hidma_show_values; |
588 | sysfs_attr_init(&attrs->attr); | 606 | sysfs_attr_init(&attrs->attr); |
589 | 607 | ||
590 | return device_create_file(dev->ddev.dev, attrs); | 608 | return attrs; |
609 | } | ||
610 | |||
611 | static int hidma_sysfs_init(struct hidma_dev *dev) | ||
612 | { | ||
613 | dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO); | ||
614 | if (!dev->chid_attrs) | ||
615 | return -ENOMEM; | ||
616 | |||
617 | return device_create_file(dev->ddev.dev, dev->chid_attrs); | ||
618 | } | ||
619 | |||
620 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | ||
621 | static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | ||
622 | { | ||
623 | struct device *dev = msi_desc_to_dev(desc); | ||
624 | struct hidma_dev *dmadev = dev_get_drvdata(dev); | ||
625 | |||
626 | if (!desc->platform.msi_index) { | ||
627 | writel(msg->address_lo, dmadev->dev_evca + 0x118); | ||
628 | writel(msg->address_hi, dmadev->dev_evca + 0x11C); | ||
629 | writel(msg->data, dmadev->dev_evca + 0x120); | ||
630 | } | ||
631 | } | ||
632 | #endif | ||
633 | |||
634 | static void hidma_free_msis(struct hidma_dev *dmadev) | ||
635 | { | ||
636 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | ||
637 | struct device *dev = dmadev->ddev.dev; | ||
638 | struct msi_desc *desc; | ||
639 | |||
640 | /* free allocated MSI interrupts above */ | ||
641 | for_each_msi_entry(desc, dev) | ||
642 | devm_free_irq(dev, desc->irq, &dmadev->lldev); | ||
643 | |||
644 | platform_msi_domain_free_irqs(dev); | ||
645 | #endif | ||
646 | } | ||
647 | |||
648 | static int hidma_request_msi(struct hidma_dev *dmadev, | ||
649 | struct platform_device *pdev) | ||
650 | { | ||
651 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | ||
652 | int rc; | ||
653 | struct msi_desc *desc; | ||
654 | struct msi_desc *failed_desc = NULL; | ||
655 | |||
656 | rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, | ||
657 | hidma_write_msi_msg); | ||
658 | if (rc) | ||
659 | return rc; | ||
660 | |||
661 | for_each_msi_entry(desc, &pdev->dev) { | ||
662 | if (!desc->platform.msi_index) | ||
663 | dmadev->msi_virqbase = desc->irq; | ||
664 | |||
665 | rc = devm_request_irq(&pdev->dev, desc->irq, | ||
666 | hidma_chirq_handler_msi, | ||
667 | 0, "qcom-hidma-msi", | ||
668 | &dmadev->lldev); | ||
669 | if (rc) { | ||
670 | failed_desc = desc; | ||
671 | break; | ||
672 | } | ||
673 | } | ||
674 | |||
675 | if (rc) { | ||
676 | /* free allocated MSI interrupts above */ | ||
677 | for_each_msi_entry(desc, &pdev->dev) { | ||
678 | if (desc == failed_desc) | ||
679 | break; | ||
680 | devm_free_irq(&pdev->dev, desc->irq, | ||
681 | &dmadev->lldev); | ||
682 | } | ||
683 | } else { | ||
684 | /* Add callback to free MSIs on teardown */ | ||
685 | hidma_ll_setup_irq(dmadev->lldev, true); | ||
686 | |||
687 | } | ||
688 | if (rc) | ||
689 | dev_warn(&pdev->dev, | ||
690 | "failed to request MSI irq, falling back to wired IRQ\n"); | ||
691 | return rc; | ||
692 | #else | ||
693 | return -EINVAL; | ||
694 | #endif | ||
695 | } | ||
696 | |||
697 | static bool hidma_msi_capable(struct device *dev) | ||
698 | { | ||
699 | struct acpi_device *adev = ACPI_COMPANION(dev); | ||
700 | const char *of_compat; | ||
701 | int ret = -EINVAL; | ||
702 | |||
703 | if (!adev || acpi_disabled) { | ||
704 | ret = device_property_read_string(dev, "compatible", | ||
705 | &of_compat); | ||
706 | if (ret) | ||
707 | return false; | ||
708 | |||
709 | ret = strcmp(of_compat, "qcom,hidma-1.1"); | ||
710 | } else { | ||
711 | #ifdef CONFIG_ACPI | ||
712 | ret = strcmp(acpi_device_hid(adev), "QCOM8062"); | ||
713 | #endif | ||
714 | } | ||
715 | return ret == 0; | ||
591 | } | 716 | } |
592 | 717 | ||
593 | static int hidma_probe(struct platform_device *pdev) | 718 | static int hidma_probe(struct platform_device *pdev) |
@@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev) | |||
599 | void __iomem *evca; | 724 | void __iomem *evca; |
600 | void __iomem *trca; | 725 | void __iomem *trca; |
601 | int rc; | 726 | int rc; |
727 | bool msi; | ||
602 | 728 | ||
603 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | 729 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); |
604 | pm_runtime_use_autosuspend(&pdev->dev); | 730 | pm_runtime_use_autosuspend(&pdev->dev); |
@@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev) | |||
660 | dmadev->ddev.device_terminate_all = hidma_terminate_all; | 786 | dmadev->ddev.device_terminate_all = hidma_terminate_all; |
661 | dmadev->ddev.copy_align = 8; | 787 | dmadev->ddev.copy_align = 8; |
662 | 788 | ||
789 | /* | ||
790 | * Determine the MSI capability of the platform. Old HW doesn't | ||
791 | * support MSI. | ||
792 | */ | ||
793 | msi = hidma_msi_capable(&pdev->dev); | ||
794 | |||
663 | device_property_read_u32(&pdev->dev, "desc-count", | 795 | device_property_read_u32(&pdev->dev, "desc-count", |
664 | &dmadev->nr_descriptors); | 796 | &dmadev->nr_descriptors); |
665 | 797 | ||
@@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev) | |||
688 | goto dmafree; | 820 | goto dmafree; |
689 | } | 821 | } |
690 | 822 | ||
691 | rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, | 823 | platform_set_drvdata(pdev, dmadev); |
692 | "qcom-hidma", dmadev->lldev); | 824 | if (msi) |
693 | if (rc) | 825 | rc = hidma_request_msi(dmadev, pdev); |
694 | goto uninit; | 826 | |
827 | if (!msi || rc) { | ||
828 | hidma_ll_setup_irq(dmadev->lldev, false); | ||
829 | rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, | ||
830 | 0, "qcom-hidma", dmadev->lldev); | ||
831 | if (rc) | ||
832 | goto uninit; | ||
833 | } | ||
695 | 834 | ||
696 | INIT_LIST_HEAD(&dmadev->ddev.channels); | 835 | INIT_LIST_HEAD(&dmadev->ddev.channels); |
697 | rc = hidma_chan_init(dmadev, 0); | 836 | rc = hidma_chan_init(dmadev, 0); |
@@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev) | |||
705 | dmadev->irq = chirq; | 844 | dmadev->irq = chirq; |
706 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | 845 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); |
707 | hidma_debug_init(dmadev); | 846 | hidma_debug_init(dmadev); |
708 | hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO); | 847 | hidma_sysfs_init(dmadev); |
709 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); | 848 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); |
710 | platform_set_drvdata(pdev, dmadev); | ||
711 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | 849 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
712 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | 850 | pm_runtime_put_autosuspend(dmadev->ddev.dev); |
713 | return 0; | 851 | return 0; |
714 | 852 | ||
715 | uninit: | 853 | uninit: |
854 | if (msi) | ||
855 | hidma_free_msis(dmadev); | ||
856 | |||
716 | hidma_debug_uninit(dmadev); | 857 | hidma_debug_uninit(dmadev); |
717 | hidma_ll_uninit(dmadev->lldev); | 858 | hidma_ll_uninit(dmadev->lldev); |
718 | dmafree: | 859 | dmafree: |
@@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev) | |||
730 | 871 | ||
731 | pm_runtime_get_sync(dmadev->ddev.dev); | 872 | pm_runtime_get_sync(dmadev->ddev.dev); |
732 | dma_async_device_unregister(&dmadev->ddev); | 873 | dma_async_device_unregister(&dmadev->ddev); |
733 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | 874 | if (!dmadev->lldev->msi_support) |
875 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | ||
876 | else | ||
877 | hidma_free_msis(dmadev); | ||
878 | |||
734 | tasklet_kill(&dmadev->task); | 879 | tasklet_kill(&dmadev->task); |
880 | hidma_sysfs_uninit(dmadev); | ||
735 | hidma_debug_uninit(dmadev); | 881 | hidma_debug_uninit(dmadev); |
736 | hidma_ll_uninit(dmadev->lldev); | 882 | hidma_ll_uninit(dmadev->lldev); |
737 | hidma_free(dmadev); | 883 | hidma_free(dmadev); |
@@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev) | |||
746 | #if IS_ENABLED(CONFIG_ACPI) | 892 | #if IS_ENABLED(CONFIG_ACPI) |
747 | static const struct acpi_device_id hidma_acpi_ids[] = { | 893 | static const struct acpi_device_id hidma_acpi_ids[] = { |
748 | {"QCOM8061"}, | 894 | {"QCOM8061"}, |
895 | {"QCOM8062"}, | ||
749 | {}, | 896 | {}, |
750 | }; | 897 | }; |
898 | MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); | ||
751 | #endif | 899 | #endif |
752 | 900 | ||
753 | static const struct of_device_id hidma_match[] = { | 901 | static const struct of_device_id hidma_match[] = { |
754 | {.compatible = "qcom,hidma-1.0",}, | 902 | {.compatible = "qcom,hidma-1.0",}, |
903 | {.compatible = "qcom,hidma-1.1",}, | ||
755 | {}, | 904 | {}, |
756 | }; | 905 | }; |
757 | MODULE_DEVICE_TABLE(of, hidma_match); | 906 | MODULE_DEVICE_TABLE(of, hidma_match); |
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index e52e20716303..c7d014235c32 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h | |||
@@ -46,6 +46,7 @@ struct hidma_tre { | |||
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct hidma_lldev { | 48 | struct hidma_lldev { |
49 | bool msi_support; /* flag indicating MSI support */ | ||
49 | bool initialized; /* initialized flag */ | 50 | bool initialized; /* initialized flag */ |
50 | u8 trch_state; /* trch_state of the device */ | 51 | u8 trch_state; /* trch_state of the device */ |
51 | u8 evch_state; /* evch_state of the device */ | 52 | u8 evch_state; /* evch_state of the device */ |
@@ -58,7 +59,7 @@ struct hidma_lldev { | |||
58 | void __iomem *evca; /* Event Channel address */ | 59 | void __iomem *evca; /* Event Channel address */ |
59 | struct hidma_tre | 60 | struct hidma_tre |
60 | **pending_tre_list; /* Pointers to pending TREs */ | 61 | **pending_tre_list; /* Pointers to pending TREs */ |
61 | s32 pending_tre_count; /* Number of TREs pending */ | 62 | atomic_t pending_tre_count; /* Number of TREs pending */ |
62 | 63 | ||
63 | void *tre_ring; /* TRE ring */ | 64 | void *tre_ring; /* TRE ring */ |
64 | dma_addr_t tre_dma; /* TRE ring to be shared with HW */ | 65 | dma_addr_t tre_dma; /* TRE ring to be shared with HW */ |
@@ -114,6 +115,7 @@ struct hidma_dev { | |||
114 | int irq; | 115 | int irq; |
115 | int chidx; | 116 | int chidx; |
116 | u32 nr_descriptors; | 117 | u32 nr_descriptors; |
118 | int msi_virqbase; | ||
117 | 119 | ||
118 | struct hidma_lldev *lldev; | 120 | struct hidma_lldev *lldev; |
119 | void __iomem *dev_trca; | 121 | void __iomem *dev_trca; |
@@ -128,6 +130,9 @@ struct hidma_dev { | |||
128 | struct dentry *debugfs; | 130 | struct dentry *debugfs; |
129 | struct dentry *stats; | 131 | struct dentry *stats; |
130 | 132 | ||
133 | /* sysfs entry for the channel id */ | ||
134 | struct device_attribute *chid_attrs; | ||
135 | |||
131 | /* Task delivering issue_pending */ | 136 | /* Task delivering issue_pending */ |
132 | struct tasklet_struct task; | 137 | struct tasklet_struct task; |
133 | }; | 138 | }; |
@@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev); | |||
145 | int hidma_ll_enable(struct hidma_lldev *llhndl); | 150 | int hidma_ll_enable(struct hidma_lldev *llhndl); |
146 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, | 151 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, |
147 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); | 152 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); |
153 | void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); | ||
148 | int hidma_ll_setup(struct hidma_lldev *lldev); | 154 | int hidma_ll_setup(struct hidma_lldev *lldev); |
149 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, | 155 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, |
150 | void __iomem *trca, void __iomem *evca, | 156 | void __iomem *trca, void __iomem *evca, |
151 | u8 chidx); | 157 | u8 chidx); |
152 | int hidma_ll_uninit(struct hidma_lldev *llhndl); | 158 | int hidma_ll_uninit(struct hidma_lldev *llhndl); |
153 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); | 159 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); |
160 | irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); | ||
154 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, | 161 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, |
155 | u8 err_code); | 162 | u8 err_code); |
156 | int hidma_debug_init(struct hidma_dev *dmadev); | 163 | int hidma_debug_init(struct hidma_dev *dmadev); |
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index fa827e5ffd68..3bdcb8056a36 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c | |||
@@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl) | |||
74 | seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma); | 74 | seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma); |
75 | seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size); | 75 | seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size); |
76 | seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off); | 76 | seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off); |
77 | seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count); | 77 | seq_printf(s, "pending_tre_count=%d\n", |
78 | atomic_read(&lldev->pending_tre_count)); | ||
78 | seq_printf(s, "evca=%p\n", lldev->evca); | 79 | seq_printf(s, "evca=%p\n", lldev->evca); |
79 | seq_printf(s, "evre_ring=%p\n", lldev->evre_ring); | 80 | seq_printf(s, "evre_ring=%p\n", lldev->evre_ring); |
80 | seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma); | 81 | seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma); |
@@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = { | |||
164 | void hidma_debug_uninit(struct hidma_dev *dmadev) | 165 | void hidma_debug_uninit(struct hidma_dev *dmadev) |
165 | { | 166 | { |
166 | debugfs_remove_recursive(dmadev->debugfs); | 167 | debugfs_remove_recursive(dmadev->debugfs); |
167 | debugfs_remove_recursive(dmadev->stats); | ||
168 | } | 168 | } |
169 | 169 | ||
170 | int hidma_debug_init(struct hidma_dev *dmadev) | 170 | int hidma_debug_init(struct hidma_dev *dmadev) |
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 3224f24c577b..6645bdf0d151 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c | |||
@@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg) | |||
198 | } | 198 | } |
199 | } | 199 | } |
200 | 200 | ||
201 | static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, | 201 | static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info, |
202 | u8 err_info, u8 err_code) | 202 | u8 err_code) |
203 | { | 203 | { |
204 | struct hidma_tre *tre; | 204 | struct hidma_tre *tre; |
205 | unsigned long flags; | 205 | unsigned long flags; |
206 | u32 tre_iterator; | ||
206 | 207 | ||
207 | spin_lock_irqsave(&lldev->lock, flags); | 208 | spin_lock_irqsave(&lldev->lock, flags); |
209 | |||
210 | tre_iterator = lldev->tre_processed_off; | ||
208 | tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE]; | 211 | tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE]; |
209 | if (!tre) { | 212 | if (!tre) { |
210 | spin_unlock_irqrestore(&lldev->lock, flags); | 213 | spin_unlock_irqrestore(&lldev->lock, flags); |
@@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, | |||
218 | * Keep track of pending TREs that SW is expecting to receive | 221 | * Keep track of pending TREs that SW is expecting to receive |
219 | * from HW. We got one now. Decrement our counter. | 222 | * from HW. We got one now. Decrement our counter. |
220 | */ | 223 | */ |
221 | lldev->pending_tre_count--; | 224 | if (atomic_dec_return(&lldev->pending_tre_count) < 0) { |
222 | if (lldev->pending_tre_count < 0) { | ||
223 | dev_warn(lldev->dev, "tre count mismatch on completion"); | 225 | dev_warn(lldev->dev, "tre count mismatch on completion"); |
224 | lldev->pending_tre_count = 0; | 226 | atomic_set(&lldev->pending_tre_count, 0); |
225 | } | 227 | } |
226 | 228 | ||
229 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | ||
230 | lldev->tre_ring_size); | ||
231 | lldev->tre_processed_off = tre_iterator; | ||
227 | spin_unlock_irqrestore(&lldev->lock, flags); | 232 | spin_unlock_irqrestore(&lldev->lock, flags); |
228 | 233 | ||
229 | tre->err_info = err_info; | 234 | tre->err_info = err_info; |
@@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, | |||
245 | static int hidma_handle_tre_completion(struct hidma_lldev *lldev) | 250 | static int hidma_handle_tre_completion(struct hidma_lldev *lldev) |
246 | { | 251 | { |
247 | u32 evre_ring_size = lldev->evre_ring_size; | 252 | u32 evre_ring_size = lldev->evre_ring_size; |
248 | u32 tre_ring_size = lldev->tre_ring_size; | ||
249 | u32 err_info, err_code, evre_write_off; | 253 | u32 err_info, err_code, evre_write_off; |
250 | u32 tre_iterator, evre_iterator; | 254 | u32 evre_iterator; |
251 | u32 num_completed = 0; | 255 | u32 num_completed = 0; |
252 | 256 | ||
253 | evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | 257 | evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); |
254 | tre_iterator = lldev->tre_processed_off; | ||
255 | evre_iterator = lldev->evre_processed_off; | 258 | evre_iterator = lldev->evre_processed_off; |
256 | 259 | ||
257 | if ((evre_write_off > evre_ring_size) || | 260 | if ((evre_write_off > evre_ring_size) || |
@@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev) | |||
274 | err_code = | 277 | err_code = |
275 | (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK; | 278 | (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK; |
276 | 279 | ||
277 | if (hidma_post_completed(lldev, tre_iterator, err_info, | 280 | if (hidma_post_completed(lldev, err_info, err_code)) |
278 | err_code)) | ||
279 | break; | 281 | break; |
280 | 282 | ||
281 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | ||
282 | tre_ring_size); | ||
283 | HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE, | 283 | HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE, |
284 | evre_ring_size); | 284 | evre_ring_size); |
285 | 285 | ||
@@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev) | |||
291 | evre_write_off = | 291 | evre_write_off = |
292 | readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | 292 | readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); |
293 | num_completed++; | 293 | num_completed++; |
294 | |||
295 | /* | ||
296 | * An error interrupt might have arrived while we are processing | ||
297 | * the completed interrupt. | ||
298 | */ | ||
299 | if (!hidma_ll_isenabled(lldev)) | ||
300 | break; | ||
294 | } | 301 | } |
295 | 302 | ||
296 | if (num_completed) { | 303 | if (num_completed) { |
297 | u32 evre_read_off = (lldev->evre_processed_off + | 304 | u32 evre_read_off = (lldev->evre_processed_off + |
298 | HIDMA_EVRE_SIZE * num_completed); | 305 | HIDMA_EVRE_SIZE * num_completed); |
299 | u32 tre_read_off = (lldev->tre_processed_off + | ||
300 | HIDMA_TRE_SIZE * num_completed); | ||
301 | |||
302 | evre_read_off = evre_read_off % evre_ring_size; | 306 | evre_read_off = evre_read_off % evre_ring_size; |
303 | tre_read_off = tre_read_off % tre_ring_size; | ||
304 | |||
305 | writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG); | 307 | writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG); |
306 | 308 | ||
307 | /* record the last processed tre offset */ | 309 | /* record the last processed tre offset */ |
308 | lldev->tre_processed_off = tre_read_off; | ||
309 | lldev->evre_processed_off = evre_read_off; | 310 | lldev->evre_processed_off = evre_read_off; |
310 | } | 311 | } |
311 | 312 | ||
@@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev) | |||
315 | void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info, | 316 | void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info, |
316 | u8 err_code) | 317 | u8 err_code) |
317 | { | 318 | { |
318 | u32 tre_iterator; | 319 | while (atomic_read(&lldev->pending_tre_count)) { |
319 | u32 tre_ring_size = lldev->tre_ring_size; | 320 | if (hidma_post_completed(lldev, err_info, err_code)) |
320 | int num_completed = 0; | ||
321 | u32 tre_read_off; | ||
322 | |||
323 | tre_iterator = lldev->tre_processed_off; | ||
324 | while (lldev->pending_tre_count) { | ||
325 | if (hidma_post_completed(lldev, tre_iterator, err_info, | ||
326 | err_code)) | ||
327 | break; | 321 | break; |
328 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | ||
329 | tre_ring_size); | ||
330 | num_completed++; | ||
331 | } | 322 | } |
332 | tre_read_off = (lldev->tre_processed_off + | ||
333 | HIDMA_TRE_SIZE * num_completed); | ||
334 | |||
335 | tre_read_off = tre_read_off % tre_ring_size; | ||
336 | |||
337 | /* record the last processed tre offset */ | ||
338 | lldev->tre_processed_off = tre_read_off; | ||
339 | } | 323 | } |
340 | 324 | ||
341 | static int hidma_ll_reset(struct hidma_lldev *lldev) | 325 | static int hidma_ll_reset(struct hidma_lldev *lldev) |
@@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev) | |||
412 | * requests traditionally to the destination, this concept does not apply | 396 | * requests traditionally to the destination, this concept does not apply |
413 | * here for this HW. | 397 | * here for this HW. |
414 | */ | 398 | */ |
415 | irqreturn_t hidma_ll_inthandler(int chirq, void *arg) | 399 | static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) |
416 | { | 400 | { |
417 | struct hidma_lldev *lldev = arg; | 401 | if (cause & HIDMA_ERR_INT_MASK) { |
418 | u32 status; | 402 | dev_err(lldev->dev, "error 0x%x, disabling...\n", |
419 | u32 enable; | 403 | cause); |
420 | u32 cause; | 404 | |
405 | /* Clear out pending interrupts */ | ||
406 | writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
407 | |||
408 | /* No further submissions. */ | ||
409 | hidma_ll_disable(lldev); | ||
410 | |||
411 | /* Driver completes the txn and intimates the client.*/ | ||
412 | hidma_cleanup_pending_tre(lldev, 0xFF, | ||
413 | HIDMA_EVRE_STATUS_ERROR); | ||
414 | |||
415 | return; | ||
416 | } | ||
421 | 417 | ||
422 | /* | 418 | /* |
423 | * Fine tuned for this HW... | 419 | * Fine tuned for this HW... |
@@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg) | |||
426 | * read and write accessors are used for performance reasons due to | 422 | * read and write accessors are used for performance reasons due to |
427 | * interrupt delivery guarantees. Do not copy this code blindly and | 423 | * interrupt delivery guarantees. Do not copy this code blindly and |
428 | * expect that to work. | 424 | * expect that to work. |
425 | * | ||
426 | * Try to consume as many EVREs as possible. | ||
429 | */ | 427 | */ |
428 | hidma_handle_tre_completion(lldev); | ||
429 | |||
430 | /* We consumed TREs or there are pending TREs or EVREs. */ | ||
431 | writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
432 | } | ||
433 | |||
434 | irqreturn_t hidma_ll_inthandler(int chirq, void *arg) | ||
435 | { | ||
436 | struct hidma_lldev *lldev = arg; | ||
437 | u32 status; | ||
438 | u32 enable; | ||
439 | u32 cause; | ||
440 | |||
430 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | 441 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); |
431 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | 442 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); |
432 | cause = status & enable; | 443 | cause = status & enable; |
433 | 444 | ||
434 | while (cause) { | 445 | while (cause) { |
435 | if (cause & HIDMA_ERR_INT_MASK) { | 446 | hidma_ll_int_handler_internal(lldev, cause); |
436 | dev_err(lldev->dev, "error 0x%x, disabling...\n", | ||
437 | cause); | ||
438 | |||
439 | /* Clear out pending interrupts */ | ||
440 | writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
441 | |||
442 | /* No further submissions. */ | ||
443 | hidma_ll_disable(lldev); | ||
444 | |||
445 | /* Driver completes the txn and intimates the client.*/ | ||
446 | hidma_cleanup_pending_tre(lldev, 0xFF, | ||
447 | HIDMA_EVRE_STATUS_ERROR); | ||
448 | goto out; | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Try to consume as many EVREs as possible. | ||
453 | */ | ||
454 | hidma_handle_tre_completion(lldev); | ||
455 | |||
456 | /* We consumed TREs or there are pending TREs or EVREs. */ | ||
457 | writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
458 | 447 | ||
459 | /* | 448 | /* |
460 | * Another interrupt might have arrived while we are | 449 | * Another interrupt might have arrived while we are |
@@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg) | |||
465 | cause = status & enable; | 454 | cause = status & enable; |
466 | } | 455 | } |
467 | 456 | ||
468 | out: | 457 | return IRQ_HANDLED; |
458 | } | ||
459 | |||
460 | irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause) | ||
461 | { | ||
462 | struct hidma_lldev *lldev = arg; | ||
463 | |||
464 | hidma_ll_int_handler_internal(lldev, cause); | ||
469 | return IRQ_HANDLED; | 465 | return IRQ_HANDLED; |
470 | } | 466 | } |
471 | 467 | ||
@@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch) | |||
548 | tre->err_code = 0; | 544 | tre->err_code = 0; |
549 | tre->err_info = 0; | 545 | tre->err_info = 0; |
550 | tre->queued = 1; | 546 | tre->queued = 1; |
551 | lldev->pending_tre_count++; | 547 | atomic_inc(&lldev->pending_tre_count); |
552 | lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE) | 548 | lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE) |
553 | % lldev->tre_ring_size; | 549 | % lldev->tre_ring_size; |
554 | spin_unlock_irqrestore(&lldev->lock, flags); | 550 | spin_unlock_irqrestore(&lldev->lock, flags); |
@@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev) | |||
564 | u32 val; | 560 | u32 val; |
565 | int ret; | 561 | int ret; |
566 | 562 | ||
567 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | 563 | /* The channel needs to be in working state */ |
568 | lldev->evch_state = HIDMA_CH_STATE(val); | 564 | if (!hidma_ll_isenabled(lldev)) |
569 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
570 | lldev->trch_state = HIDMA_CH_STATE(val); | ||
571 | |||
572 | /* already suspended by this OS */ | ||
573 | if ((lldev->trch_state == HIDMA_CH_SUSPENDED) || | ||
574 | (lldev->evch_state == HIDMA_CH_SUSPENDED)) | ||
575 | return 0; | ||
576 | |||
577 | /* already stopped by the manager */ | ||
578 | if ((lldev->trch_state == HIDMA_CH_STOPPED) || | ||
579 | (lldev->evch_state == HIDMA_CH_STOPPED)) | ||
580 | return 0; | 565 | return 0; |
581 | 566 | ||
582 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | 567 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); |
@@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev) | |||
654 | u32 val; | 639 | u32 val; |
655 | u32 nr_tres = lldev->nr_tres; | 640 | u32 nr_tres = lldev->nr_tres; |
656 | 641 | ||
657 | lldev->pending_tre_count = 0; | 642 | atomic_set(&lldev->pending_tre_count, 0); |
658 | lldev->tre_processed_off = 0; | 643 | lldev->tre_processed_off = 0; |
659 | lldev->evre_processed_off = 0; | 644 | lldev->evre_processed_off = 0; |
660 | lldev->tre_write_offset = 0; | 645 | lldev->tre_write_offset = 0; |
@@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev) | |||
691 | writel(HIDMA_EVRE_SIZE * nr_tres, | 676 | writel(HIDMA_EVRE_SIZE * nr_tres, |
692 | lldev->evca + HIDMA_EVCA_RING_LEN_REG); | 677 | lldev->evca + HIDMA_EVCA_RING_LEN_REG); |
693 | 678 | ||
694 | /* support IRQ only for now */ | 679 | /* configure interrupts */ |
680 | hidma_ll_setup_irq(lldev, lldev->msi_support); | ||
681 | |||
682 | rc = hidma_ll_enable(lldev); | ||
683 | if (rc) | ||
684 | return rc; | ||
685 | |||
686 | return rc; | ||
687 | } | ||
688 | |||
689 | void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi) | ||
690 | { | ||
691 | u32 val; | ||
692 | |||
693 | lldev->msi_support = msi; | ||
694 | |||
695 | /* disable interrupts again after reset */ | ||
696 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
697 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
698 | |||
699 | /* support IRQ by default */ | ||
695 | val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG); | 700 | val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG); |
696 | val &= ~0xF; | 701 | val &= ~0xF; |
697 | val |= 0x1; | 702 | if (!lldev->msi_support) |
703 | val = val | 0x1; | ||
698 | writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG); | 704 | writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG); |
699 | 705 | ||
700 | /* clear all pending interrupts and enable them */ | 706 | /* clear all pending interrupts and enable them */ |
701 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | 707 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); |
702 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | 708 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); |
703 | |||
704 | return hidma_ll_enable(lldev); | ||
705 | } | 709 | } |
706 | 710 | ||
707 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, | 711 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, |
@@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev) | |||
816 | tasklet_kill(&lldev->task); | 820 | tasklet_kill(&lldev->task); |
817 | memset(lldev->trepool, 0, required_bytes); | 821 | memset(lldev->trepool, 0, required_bytes); |
818 | lldev->trepool = NULL; | 822 | lldev->trepool = NULL; |
819 | lldev->pending_tre_count = 0; | 823 | atomic_set(&lldev->pending_tre_count, 0); |
820 | lldev->tre_write_offset = 0; | 824 | lldev->tre_write_offset = 0; |
821 | 825 | ||
822 | rc = hidma_ll_reset(lldev); | 826 | rc = hidma_ll_reset(lldev); |
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 82f36e466083..f847d32cc4b5 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { | |||
282 | {"QCOM8060"}, | 282 | {"QCOM8060"}, |
283 | {}, | 283 | {}, |
284 | }; | 284 | }; |
285 | MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); | ||
285 | #endif | 286 | #endif |
286 | 287 | ||
287 | static const struct of_device_id hidma_mgmt_match[] = { | 288 | static const struct of_device_id hidma_mgmt_match[] = { |
@@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) | |||
375 | ret = PTR_ERR(new_pdev); | 376 | ret = PTR_ERR(new_pdev); |
376 | goto out; | 377 | goto out; |
377 | } | 378 | } |
379 | of_node_get(child); | ||
380 | new_pdev->dev.of_node = child; | ||
378 | of_dma_configure(&new_pdev->dev, child); | 381 | of_dma_configure(&new_pdev->dev, child); |
379 | 382 | /* | |
383 | * It is assumed that calling of_msi_configure is safe on | ||
384 | * platforms with or without MSI support. | ||
385 | */ | ||
386 | of_msi_configure(&new_pdev->dev, child); | ||
387 | of_node_put(child); | ||
380 | kfree(res); | 388 | kfree(res); |
381 | res = NULL; | 389 | res = NULL; |
382 | } | 390 | } |
@@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void) | |||
395 | for_each_matching_node(child, hidma_mgmt_match) { | 403 | for_each_matching_node(child, hidma_mgmt_match) { |
396 | /* device tree based firmware here */ | 404 | /* device tree based firmware here */ |
397 | hidma_mgmt_of_populate_channels(child); | 405 | hidma_mgmt_of_populate_channels(child); |
398 | of_node_put(child); | ||
399 | } | 406 | } |
400 | #endif | 407 | #endif |
401 | platform_driver_register(&hidma_mgmt_driver); | 408 | platform_driver_register(&hidma_mgmt_driver); |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 3c579abbabb7..f04c4702d98b 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -289,16 +289,11 @@ static | |||
289 | struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan) | 289 | struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan) |
290 | { | 290 | { |
291 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | 291 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; |
292 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
293 | struct s3c24xx_dma_channel *cdata; | ||
294 | struct s3c24xx_dma_phy *phy = NULL; | 292 | struct s3c24xx_dma_phy *phy = NULL; |
295 | unsigned long flags; | 293 | unsigned long flags; |
296 | int i; | 294 | int i; |
297 | int ret; | 295 | int ret; |
298 | 296 | ||
299 | if (s3cchan->slave) | ||
300 | cdata = &pdata->channels[s3cchan->id]; | ||
301 | |||
302 | for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { | 297 | for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { |
303 | phy = &s3cdma->phy_chans[i]; | 298 | phy = &s3cdma->phy_chans[i]; |
304 | 299 | ||
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 06ecdc38cee0..72c649713ace 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
@@ -652,7 +652,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg) | |||
652 | static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, | 652 | static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, |
653 | struct of_dma *ofdma) | 653 | struct of_dma *ofdma) |
654 | { | 654 | { |
655 | struct usb_dmac_chan *uchan; | ||
656 | struct dma_chan *chan; | 655 | struct dma_chan *chan; |
657 | dma_cap_mask_t mask; | 656 | dma_cap_mask_t mask; |
658 | 657 | ||
@@ -667,8 +666,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, | |||
667 | if (!chan) | 666 | if (!chan) |
668 | return NULL; | 667 | return NULL; |
669 | 668 | ||
670 | uchan = to_usb_dmac_chan(chan); | ||
671 | |||
672 | return chan; | 669 | return chan; |
673 | } | 670 | } |
674 | 671 | ||
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 8f62edad51be..a0733ac3edb1 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -1011,7 +1011,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev) | |||
1011 | { | 1011 | { |
1012 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 1012 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
1013 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | 1013 | struct sirfsoc_dma_regs *save = &sdma->regs_save; |
1014 | struct sirfsoc_dma_desc *sdesc; | ||
1015 | struct sirfsoc_dma_chan *schan; | 1014 | struct sirfsoc_dma_chan *schan; |
1016 | int ch; | 1015 | int ch; |
1017 | int ret; | 1016 | int ret; |
@@ -1044,9 +1043,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev) | |||
1044 | schan = &sdma->channels[ch]; | 1043 | schan = &sdma->channels[ch]; |
1045 | if (list_empty(&schan->active)) | 1044 | if (list_empty(&schan->active)) |
1046 | continue; | 1045 | continue; |
1047 | sdesc = list_first_entry(&schan->active, | ||
1048 | struct sirfsoc_dma_desc, | ||
1049 | node); | ||
1050 | save->ctrl[ch] = readl_relaxed(sdma->base + | 1046 | save->ctrl[ch] = readl_relaxed(sdma->base + |
1051 | ch * 0x10 + SIRFSOC_DMA_CH_CTRL); | 1047 | ch * 0x10 + SIRFSOC_DMA_CH_CTRL); |
1052 | } | 1048 | } |
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 307547f4848d..3688d0873a3e 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c | |||
@@ -527,13 +527,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |||
527 | { | 527 | { |
528 | struct stm32_dma_chan *chan = devid; | 528 | struct stm32_dma_chan *chan = devid; |
529 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | 529 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
530 | u32 status, scr, sfcr; | 530 | u32 status, scr; |
531 | 531 | ||
532 | spin_lock(&chan->vchan.lock); | 532 | spin_lock(&chan->vchan.lock); |
533 | 533 | ||
534 | status = stm32_dma_irq_status(chan); | 534 | status = stm32_dma_irq_status(chan); |
535 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | 535 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
536 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | ||
537 | 536 | ||
538 | if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) { | 537 | if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) { |
539 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); | 538 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
@@ -574,15 +573,12 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | |||
574 | int src_bus_width, dst_bus_width; | 573 | int src_bus_width, dst_bus_width; |
575 | int src_burst_size, dst_burst_size; | 574 | int src_burst_size, dst_burst_size; |
576 | u32 src_maxburst, dst_maxburst; | 575 | u32 src_maxburst, dst_maxburst; |
577 | dma_addr_t src_addr, dst_addr; | ||
578 | u32 dma_scr = 0; | 576 | u32 dma_scr = 0; |
579 | 577 | ||
580 | src_addr_width = chan->dma_sconfig.src_addr_width; | 578 | src_addr_width = chan->dma_sconfig.src_addr_width; |
581 | dst_addr_width = chan->dma_sconfig.dst_addr_width; | 579 | dst_addr_width = chan->dma_sconfig.dst_addr_width; |
582 | src_maxburst = chan->dma_sconfig.src_maxburst; | 580 | src_maxburst = chan->dma_sconfig.src_maxburst; |
583 | dst_maxburst = chan->dma_sconfig.dst_maxburst; | 581 | dst_maxburst = chan->dma_sconfig.dst_maxburst; |
584 | src_addr = chan->dma_sconfig.src_addr; | ||
585 | dst_addr = chan->dma_sconfig.dst_addr; | ||
586 | 582 | ||
587 | switch (direction) { | 583 | switch (direction) { |
588 | case DMA_MEM_TO_DEV: | 584 | case DMA_MEM_TO_DEV: |
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 245d759d5ffc..380276d078b2 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c | |||
@@ -435,13 +435,12 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num, | |||
435 | if (!ds) | 435 | if (!ds) |
436 | return NULL; | 436 | return NULL; |
437 | 437 | ||
438 | ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); | 438 | ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); |
439 | if (!ds->desc_hw) { | 439 | if (!ds->desc_hw) { |
440 | dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); | 440 | dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); |
441 | kfree(ds); | 441 | kfree(ds); |
442 | return NULL; | 442 | return NULL; |
443 | } | 443 | } |
444 | memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num); | ||
445 | ds->desc_num = num; | 444 | ds->desc_num = num; |
446 | return ds; | 445 | return ds; |
447 | } | 446 | } |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 393fea85eb4e..3fda9a32defb 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -697,3 +697,4 @@ void of_msi_configure(struct device *dev, struct device_node *np) | |||
697 | dev_set_msi_domain(dev, | 697 | dev_set_msi_domain(dev, |
698 | of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI)); | 698 | of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI)); |
699 | } | 699 | } |
700 | EXPORT_SYMBOL_GPL(of_msi_configure); | ||
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 3c09e94cf827..28dfdce4beae 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -341,27 +341,20 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) | |||
341 | static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | 341 | static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) |
342 | { | 342 | { |
343 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); | 343 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); |
344 | dma_filter_fn filter = sdd->cntrlr_info->filter; | ||
345 | struct device *dev = &sdd->pdev->dev; | 344 | struct device *dev = &sdd->pdev->dev; |
346 | dma_cap_mask_t mask; | ||
347 | 345 | ||
348 | if (is_polling(sdd)) | 346 | if (is_polling(sdd)) |
349 | return 0; | 347 | return 0; |
350 | 348 | ||
351 | dma_cap_zero(mask); | ||
352 | dma_cap_set(DMA_SLAVE, mask); | ||
353 | |||
354 | /* Acquire DMA channels */ | 349 | /* Acquire DMA channels */ |
355 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 350 | sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx"); |
356 | sdd->cntrlr_info->dma_rx, dev, "rx"); | ||
357 | if (!sdd->rx_dma.ch) { | 351 | if (!sdd->rx_dma.ch) { |
358 | dev_err(dev, "Failed to get RX DMA channel\n"); | 352 | dev_err(dev, "Failed to get RX DMA channel\n"); |
359 | return -EBUSY; | 353 | return -EBUSY; |
360 | } | 354 | } |
361 | spi->dma_rx = sdd->rx_dma.ch; | 355 | spi->dma_rx = sdd->rx_dma.ch; |
362 | 356 | ||
363 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 357 | sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx"); |
364 | sdd->cntrlr_info->dma_tx, dev, "tx"); | ||
365 | if (!sdd->tx_dma.ch) { | 358 | if (!sdd->tx_dma.ch) { |
366 | dev_err(dev, "Failed to get TX DMA channel\n"); | 359 | dev_err(dev, "Failed to get TX DMA channel\n"); |
367 | dma_release_channel(sdd->rx_dma.ch); | 360 | dma_release_channel(sdd->rx_dma.ch); |
@@ -1091,11 +1084,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1091 | 1084 | ||
1092 | sdd->cur_bpw = 8; | 1085 | sdd->cur_bpw = 8; |
1093 | 1086 | ||
1094 | if (!sdd->pdev->dev.of_node && (!sci->dma_tx || !sci->dma_rx)) { | ||
1095 | dev_warn(&pdev->dev, "Unable to get SPI tx/rx DMA data. Switching to poll mode\n"); | ||
1096 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; | ||
1097 | } | ||
1098 | |||
1099 | sdd->tx_dma.direction = DMA_MEM_TO_DEV; | 1087 | sdd->tx_dma.direction = DMA_MEM_TO_DEV; |
1100 | sdd->rx_dma.direction = DMA_DEV_TO_MEM; | 1088 | sdd->rx_dma.direction = DMA_DEV_TO_MEM; |
1101 | 1089 | ||
@@ -1205,9 +1193,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1205 | 1193 | ||
1206 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", | 1194 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", |
1207 | sdd->port_id, master->num_chipselect); | 1195 | sdd->port_id, master->num_chipselect); |
1208 | dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%p, Tx-%p]\n", | 1196 | dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n", |
1209 | mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1, | 1197 | mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1); |
1210 | sci->dma_rx, sci->dma_tx); | ||
1211 | 1198 | ||
1212 | pm_runtime_mark_last_busy(&pdev->dev); | 1199 | pm_runtime_mark_last_busy(&pdev->dev); |
1213 | pm_runtime_put_autosuspend(&pdev->dev); | 1200 | pm_runtime_put_autosuspend(&pdev->dev); |
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index f607946fd996..58cbb30a9401 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c | |||
@@ -157,12 +157,12 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) | |||
157 | static const struct dw_dma_platform_data qrk_serial_dma_pdata = { | 157 | static const struct dw_dma_platform_data qrk_serial_dma_pdata = { |
158 | .nr_channels = 2, | 158 | .nr_channels = 2, |
159 | .is_private = true, | 159 | .is_private = true, |
160 | .is_nollp = true, | ||
161 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | 160 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, |
162 | .chan_priority = CHAN_PRIORITY_ASCENDING, | 161 | .chan_priority = CHAN_PRIORITY_ASCENDING, |
163 | .block_size = 4095, | 162 | .block_size = 4095, |
164 | .nr_masters = 1, | 163 | .nr_masters = 1, |
165 | .data_width = {4}, | 164 | .data_width = {4}, |
165 | .multi_block = {0}, | ||
166 | }; | 166 | }; |
167 | 167 | ||
168 | static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) | 168 | static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) |
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 27e9ec8778eb..5308eae9ce35 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
@@ -84,6 +84,8 @@ struct pl08x_channel_data { | |||
84 | * running any DMA transfer and multiplexing can be recycled | 84 | * running any DMA transfer and multiplexing can be recycled |
85 | * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 | 85 | * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 |
86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 | 86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 |
87 | * @slave_map: DMA slave matching table | ||
88 | * @slave_map_len: number of elements in @slave_map | ||
87 | */ | 89 | */ |
88 | struct pl08x_platform_data { | 90 | struct pl08x_platform_data { |
89 | struct pl08x_channel_data *slave_channels; | 91 | struct pl08x_channel_data *slave_channels; |
@@ -93,6 +95,8 @@ struct pl08x_platform_data { | |||
93 | void (*put_xfer_signal)(const struct pl08x_channel_data *, int); | 95 | void (*put_xfer_signal)(const struct pl08x_channel_data *, int); |
94 | u8 lli_buses; | 96 | u8 lli_buses; |
95 | u8 mem_buses; | 97 | u8 mem_buses; |
98 | const struct dma_slave_map *slave_map; | ||
99 | int slave_map_len; | ||
96 | }; | 100 | }; |
97 | 101 | ||
98 | #ifdef CONFIG_AMBA_PL08X | 102 | #ifdef CONFIG_AMBA_PL08X |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index cc535a478bae..feee6ec6a13b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -336,6 +336,12 @@ enum dma_slave_buswidth { | |||
336 | * may or may not be applicable on memory sources. | 336 | * may or may not be applicable on memory sources. |
337 | * @dst_maxburst: same as src_maxburst but for destination target | 337 | * @dst_maxburst: same as src_maxburst but for destination target |
338 | * mutatis mutandis. | 338 | * mutatis mutandis. |
339 | * @src_port_window_size: The length of the register area in words the data need | ||
340 | * to be accessed on the device side. It is only used for devices which is using | ||
341 | * an area instead of a single register to receive the data. Typically the DMA | ||
342 | * loops in this area in order to transfer the data. | ||
343 | * @dst_port_window_size: same as src_port_window_size but for the destination | ||
344 | * port. | ||
339 | * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill | 345 | * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill |
340 | * with 'true' if peripheral should be flow controller. Direction will be | 346 | * with 'true' if peripheral should be flow controller. Direction will be |
341 | * selected at Runtime. | 347 | * selected at Runtime. |
@@ -363,6 +369,8 @@ struct dma_slave_config { | |||
363 | enum dma_slave_buswidth dst_addr_width; | 369 | enum dma_slave_buswidth dst_addr_width; |
364 | u32 src_maxburst; | 370 | u32 src_maxburst; |
365 | u32 dst_maxburst; | 371 | u32 dst_maxburst; |
372 | u32 src_port_window_size; | ||
373 | u32 dst_port_window_size; | ||
366 | bool device_fc; | 374 | bool device_fc; |
367 | unsigned int slave_id; | 375 | unsigned int slave_id; |
368 | }; | 376 | }; |
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 5f0e11e7354c..e69e415d0d98 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | 15 | ||
16 | #define DW_DMA_MAX_NR_MASTERS 4 | 16 | #define DW_DMA_MAX_NR_MASTERS 4 |
17 | #define DW_DMA_MAX_NR_CHANNELS 8 | ||
17 | 18 | ||
18 | /** | 19 | /** |
19 | * struct dw_dma_slave - Controller-specific information about a slave | 20 | * struct dw_dma_slave - Controller-specific information about a slave |
@@ -40,19 +41,18 @@ struct dw_dma_slave { | |||
40 | * @is_private: The device channels should be marked as private and not for | 41 | * @is_private: The device channels should be marked as private and not for |
41 | * by the general purpose DMA channel allocator. | 42 | * by the general purpose DMA channel allocator. |
42 | * @is_memcpy: The device channels do support memory-to-memory transfers. | 43 | * @is_memcpy: The device channels do support memory-to-memory transfers. |
43 | * @is_nollp: The device channels does not support multi block transfers. | ||
44 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | 44 | * @chan_allocation_order: Allocate channels starting from 0 or 7 |
45 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | 45 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. |
46 | * @block_size: Maximum block size supported by the controller | 46 | * @block_size: Maximum block size supported by the controller |
47 | * @nr_masters: Number of AHB masters supported by the controller | 47 | * @nr_masters: Number of AHB masters supported by the controller |
48 | * @data_width: Maximum data width supported by hardware per AHB master | 48 | * @data_width: Maximum data width supported by hardware per AHB master |
49 | * (in bytes, power of 2) | 49 | * (in bytes, power of 2) |
50 | * @multi_block: Multi block transfers supported by hardware per channel. | ||
50 | */ | 51 | */ |
51 | struct dw_dma_platform_data { | 52 | struct dw_dma_platform_data { |
52 | unsigned int nr_channels; | 53 | unsigned int nr_channels; |
53 | bool is_private; | 54 | bool is_private; |
54 | bool is_memcpy; | 55 | bool is_memcpy; |
55 | bool is_nollp; | ||
56 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | 56 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ |
57 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | 57 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ |
58 | unsigned char chan_allocation_order; | 58 | unsigned char chan_allocation_order; |
@@ -62,6 +62,7 @@ struct dw_dma_platform_data { | |||
62 | unsigned int block_size; | 62 | unsigned int block_size; |
63 | unsigned char nr_masters; | 63 | unsigned char nr_masters; |
64 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; | 64 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; |
65 | unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; | ||
65 | }; | 66 | }; |
66 | 67 | ||
67 | #endif /* _PLATFORM_DATA_DMA_DW_H */ | 68 | #endif /* _PLATFORM_DATA_DMA_DW_H */ |
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 5c1e21c87270..da79774078a7 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h | |||
@@ -40,9 +40,6 @@ struct s3c64xx_spi_info { | |||
40 | int num_cs; | 40 | int num_cs; |
41 | bool no_cs; | 41 | bool no_cs; |
42 | int (*cfg_gpio)(void); | 42 | int (*cfg_gpio)(void); |
43 | dma_filter_fn filter; | ||
44 | void *dma_tx; | ||
45 | void *dma_rx; | ||
46 | }; | 43 | }; |
47 | 44 | ||
48 | /** | 45 | /** |