diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-13 13:59:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-13 13:59:52 -0500 |
commit | d6a322774cb7096ca683fc46ddc9482e02ee6133 (patch) | |
tree | 0e5b95d9899eefa5c58f23af8ecadd7e93010586 | |
parent | 4c257ec37bc365614933c7f0a7fe9b0688dfd1e7 (diff) | |
parent | 8b648436eb45c1f561164b24aafd35fb2bee9cfc (diff) |
Merge tag 'dmaengine-4.5-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This round we have few new features, new driver and updates to few
drivers.
The new features to dmaengine core are:
- Synchronized transfer termination API to terminate the dmaengine
transfers in synchronized and async fashion as required by users.
We have its user now in ALSA dmaengine lib, img, at_xdma, axi_dmac
drivers.
- Universal API for channel request and start consolidation of
request flows. It's user is ompa-dma driver.
- Introduce reuse of descriptors and use in pxa_dma driver
Add/Remove:
- New STM32 DMA driver
- Removal of unused R-Car HPB-DMAC driver
Updates:
- ti-dma-crossbar updates for supporting eDMA
- tegra-apb pm updates
- idma64
- mv_xor updates
- ste_dma updates"
* tag 'dmaengine-4.5-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (54 commits)
dmaengine: mv_xor: add suspend/resume support
dmaengine: mv_xor: de-duplicate mv_chan_set_mode*()
dmaengine: mv_xor: remove mv_xor_chan->current_type field
dmaengine: omap-dma: Add support for DMA filter mapping to slave devices
dmaengine: edma: Add support for DMA filter mapping to slave devices
dmaengine: core: Introduce new, universal API to request a channel
dmaengine: core: Move and merge the code paths using private_candidate
dmaengine: core: Skip mask matching when it is not provided to private_candidate
dmaengine: mdc: Correct terminate_all handling
dmaengine: edma: Add probe callback to edma_tptc_driver
dmaengine: dw: fix potential memory leak in dw_dma_parse_dt()
dmaengine: stm32-dma: Fix unchecked deference of chan->desc
dmaengine: sh: Remove unused R-Car HPB-DMAC driver
dmaengine: usb-dmac: Document SoC specific compatibility strings
ste_dma40: Delete an unnecessary variable initialisation in d40_probe()
ste_dma40: Delete another unnecessary check in d40_probe()
ste_dma40: Delete an unnecessary check before the function call "kmem_cache_destroy"
dmaengine: tegra-apb: Free interrupts before killing tasklets
dmaengine: tegra-apb: Update driver to use GFP_NOWAIT
dmaengine: tegra-apb: Only save channel state for those in use
...
44 files changed, 2088 insertions, 1181 deletions
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt index 040f365954cc..e7780a186a36 100644 --- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt | |||
@@ -1,7 +1,13 @@ | |||
1 | * Renesas USB DMA Controller Device Tree bindings | 1 | * Renesas USB DMA Controller Device Tree bindings |
2 | 2 | ||
3 | Required Properties: | 3 | Required Properties: |
4 | - compatible: must contain "renesas,usb-dmac" | 4 | -compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback. |
5 | Examples with soctypes are: | ||
6 | - "renesas,r8a7790-usb-dmac" (R-Car H2) | ||
7 | - "renesas,r8a7791-usb-dmac" (R-Car M2-W) | ||
8 | - "renesas,r8a7793-usb-dmac" (R-Car M2-N) | ||
9 | - "renesas,r8a7794-usb-dmac" (R-Car E2) | ||
10 | - "renesas,r8a7795-usb-dmac" (R-Car H3) | ||
5 | - reg: base address and length of the registers block for the DMAC | 11 | - reg: base address and length of the registers block for the DMAC |
6 | - interrupts: interrupt specifiers for the DMAC, one for each entry in | 12 | - interrupts: interrupt specifiers for the DMAC, one for each entry in |
7 | interrupt-names. | 13 | interrupt-names. |
@@ -15,7 +21,7 @@ Required Properties: | |||
15 | Example: R8A7790 (R-Car H2) USB-DMACs | 21 | Example: R8A7790 (R-Car H2) USB-DMACs |
16 | 22 | ||
17 | usb_dmac0: dma-controller@e65a0000 { | 23 | usb_dmac0: dma-controller@e65a0000 { |
18 | compatible = "renesas,usb-dmac"; | 24 | compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac"; |
19 | reg = <0 0xe65a0000 0 0x100>; | 25 | reg = <0 0xe65a0000 0 0x100>; |
20 | interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH | 26 | interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH |
21 | 0 109 IRQ_TYPE_LEVEL_HIGH>; | 27 | 0 109 IRQ_TYPE_LEVEL_HIGH>; |
diff --git a/Documentation/devicetree/bindings/dma/stm32-dma.txt b/Documentation/devicetree/bindings/dma/stm32-dma.txt new file mode 100644 index 000000000000..70cd13f1588a --- /dev/null +++ b/Documentation/devicetree/bindings/dma/stm32-dma.txt | |||
@@ -0,0 +1,82 @@ | |||
1 | * STMicroelectronics STM32 DMA controller | ||
2 | |||
3 | The STM32 DMA is a general-purpose direct memory access controller capable of | ||
4 | supporting 8 independent DMA channels. Each channel can have up to 8 requests. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible: Should be "st,stm32-dma" | ||
8 | - reg: Should contain DMA registers location and length. This should include | ||
9 | all of the per-channel registers. | ||
10 | - interrupts: Should contain all of the per-channel DMA interrupts in | ||
11 | ascending order with respect to the DMA channel index. | ||
12 | - clocks: Should contain the input clock of the DMA instance. | ||
13 | - #dma-cells : Must be <4>. See DMA client paragraph for more details. | ||
14 | |||
15 | Optional properties: | ||
16 | - resets: Reference to a reset controller asserting the DMA controller | ||
17 | - st,mem2mem: boolean; if defined, it indicates that the controller supports | ||
18 | memory-to-memory transfer | ||
19 | |||
20 | Example: | ||
21 | |||
22 | dma2: dma-controller@40026400 { | ||
23 | compatible = "st,stm32-dma"; | ||
24 | reg = <0x40026400 0x400>; | ||
25 | interrupts = <56>, | ||
26 | <57>, | ||
27 | <58>, | ||
28 | <59>, | ||
29 | <60>, | ||
30 | <68>, | ||
31 | <69>, | ||
32 | <70>; | ||
33 | clocks = <&clk_hclk>; | ||
34 | #dma-cells = <4>; | ||
35 | st,mem2mem; | ||
36 | resets = <&rcc 150>; | ||
37 | }; | ||
38 | |||
39 | * DMA client | ||
40 | |||
41 | DMA clients connected to the STM32 DMA controller must use the format | ||
42 | described in the dma.txt file, using a five-cell specifier for each | ||
43 | channel: a phandle plus four integer cells. | ||
44 | The four cells in order are: | ||
45 | |||
46 | 1. The channel id | ||
47 | 2. The request line number | ||
48 | 3. A 32bit mask specifying the DMA channel configuration which are device | ||
49 | dependent: | ||
50 | -bit 9: Peripheral Increment Address | ||
51 | 0x0: no address increment between transfers | ||
52 | 0x1: increment address between transfers | ||
53 | -bit 10: Memory Increment Address | ||
54 | 0x0: no address increment between transfers | ||
55 | 0x1: increment address between transfers | ||
56 | -bit 15: Peripheral Increment Offset Size | ||
57 | 0x0: offset size is linked to the peripheral bus width | ||
58 | 0x1: offset size is fixed to 4 (32-bit alignment) | ||
59 | -bit 16-17: Priority level | ||
60 | 0x0: low | ||
61 | 0x1: medium | ||
62 | 0x2: high | ||
63 | 0x3: very high | ||
64 | 5. A 32bit mask specifying the DMA FIFO threshold configuration which are device | ||
65 | dependent: | ||
66 | -bit 0-1: Fifo threshold | ||
67 | 0x0: 1/4 full FIFO | ||
68 | 0x1: 1/2 full FIFO | ||
69 | 0x2: 3/4 full FIFO | ||
70 | 0x3: full FIFO | ||
71 | |||
72 | Example: | ||
73 | |||
74 | usart1: serial@40011000 { | ||
75 | compatible = "st,stm32-usart", "st,stm32-uart"; | ||
76 | reg = <0x40011000 0x400>; | ||
77 | interrupts = <37>; | ||
78 | clocks = <&clk_pclk2>; | ||
79 | dmas = <&dma2 2 4 0x10400 0x3>, | ||
80 | <&dma2 7 5 0x10200 0x3>; | ||
81 | dma-names = "rx", "tx"; | ||
82 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt index b152a75dceae..aead5869a28d 100644 --- a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt +++ b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt | |||
@@ -14,6 +14,10 @@ The DMA controller node need to have the following poroperties: | |||
14 | 14 | ||
15 | Optional properties: | 15 | Optional properties: |
16 | - ti,dma-safe-map: Safe routing value for unused request lines | 16 | - ti,dma-safe-map: Safe routing value for unused request lines |
17 | - ti,reserved-dma-request-ranges: DMA request ranges which should not be used | ||
18 | when mapping xbar input to DMA request, they are either | ||
19 | allocated to be used by for example the DSP or they are used as | ||
20 | memcpy channels in eDMA. | ||
17 | 21 | ||
18 | Notes: | 22 | Notes: |
19 | When requesting channel via ti,dra7-dma-crossbar, the DMA clinet must request | 23 | When requesting channel via ti,dra7-dma-crossbar, the DMA clinet must request |
@@ -46,6 +50,8 @@ sdma_xbar: dma-router@4a002b78 { | |||
46 | #dma-cells = <1>; | 50 | #dma-cells = <1>; |
47 | dma-requests = <205>; | 51 | dma-requests = <205>; |
48 | ti,dma-safe-map = <0>; | 52 | ti,dma-safe-map = <0>; |
53 | /* Protect the sDMA request ranges: 10-14 and 100-126 */ | ||
54 | ti,reserved-dma-request-ranges = <10 5>, <100 27>; | ||
49 | dma-masters = <&sdma>; | 55 | dma-masters = <&sdma>; |
50 | }; | 56 | }; |
51 | 57 | ||
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt index 11fb87ff6cd0..9e33189745f0 100644 --- a/Documentation/dmaengine/client.txt +++ b/Documentation/dmaengine/client.txt | |||
@@ -22,25 +22,14 @@ The slave DMA usage consists of following steps: | |||
22 | Channel allocation is slightly different in the slave DMA context, | 22 | Channel allocation is slightly different in the slave DMA context, |
23 | client drivers typically need a channel from a particular DMA | 23 | client drivers typically need a channel from a particular DMA |
24 | controller only and even in some cases a specific channel is desired. | 24 | controller only and even in some cases a specific channel is desired. |
25 | To request a channel dma_request_channel() API is used. | 25 | To request a channel dma_request_chan() API is used. |
26 | 26 | ||
27 | Interface: | 27 | Interface: |
28 | struct dma_chan *dma_request_channel(dma_cap_mask_t mask, | 28 | struct dma_chan *dma_request_chan(struct device *dev, const char *name); |
29 | dma_filter_fn filter_fn, | ||
30 | void *filter_param); | ||
31 | where dma_filter_fn is defined as: | ||
32 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | ||
33 | 29 | ||
34 | The 'filter_fn' parameter is optional, but highly recommended for | 30 | Which will find and return the 'name' DMA channel associated with the 'dev' |
35 | slave and cyclic channels as they typically need to obtain a specific | 31 | device. The association is done via DT, ACPI or board file based |
36 | DMA channel. | 32 | dma_slave_map matching table. |
37 | |||
38 | When the optional 'filter_fn' parameter is NULL, dma_request_channel() | ||
39 | simply returns the first channel that satisfies the capability mask. | ||
40 | |||
41 | Otherwise, the 'filter_fn' routine will be called once for each free | ||
42 | channel which has a capability in 'mask'. 'filter_fn' is expected to | ||
43 | return 'true' when the desired DMA channel is found. | ||
44 | 33 | ||
45 | A channel allocated via this interface is exclusive to the caller, | 34 | A channel allocated via this interface is exclusive to the caller, |
46 | until dma_release_channel() is called. | 35 | until dma_release_channel() is called. |
@@ -128,7 +117,7 @@ The slave DMA usage consists of following steps: | |||
128 | transaction. | 117 | transaction. |
129 | 118 | ||
130 | For cyclic DMA, a callback function may wish to terminate the | 119 | For cyclic DMA, a callback function may wish to terminate the |
131 | DMA via dmaengine_terminate_all(). | 120 | DMA via dmaengine_terminate_async(). |
132 | 121 | ||
133 | Therefore, it is important that DMA engine drivers drop any | 122 | Therefore, it is important that DMA engine drivers drop any |
134 | locks before calling the callback function which may cause a | 123 | locks before calling the callback function which may cause a |
@@ -166,12 +155,29 @@ The slave DMA usage consists of following steps: | |||
166 | 155 | ||
167 | Further APIs: | 156 | Further APIs: |
168 | 157 | ||
169 | 1. int dmaengine_terminate_all(struct dma_chan *chan) | 158 | 1. int dmaengine_terminate_sync(struct dma_chan *chan) |
159 | int dmaengine_terminate_async(struct dma_chan *chan) | ||
160 | int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */ | ||
170 | 161 | ||
171 | This causes all activity for the DMA channel to be stopped, and may | 162 | This causes all activity for the DMA channel to be stopped, and may |
172 | discard data in the DMA FIFO which hasn't been fully transferred. | 163 | discard data in the DMA FIFO which hasn't been fully transferred. |
173 | No callback functions will be called for any incomplete transfers. | 164 | No callback functions will be called for any incomplete transfers. |
174 | 165 | ||
166 | Two variants of this function are available. | ||
167 | |||
168 | dmaengine_terminate_async() might not wait until the DMA has been fully | ||
169 | stopped or until any running complete callbacks have finished. But it is | ||
170 | possible to call dmaengine_terminate_async() from atomic context or from | ||
171 | within a complete callback. dmaengine_synchronize() must be called before it | ||
172 | is safe to free the memory accessed by the DMA transfer or free resources | ||
173 | accessed from within the complete callback. | ||
174 | |||
175 | dmaengine_terminate_sync() will wait for the transfer and any running | ||
176 | complete callbacks to finish before it returns. But the function must not be | ||
177 | called from atomic context or from within a complete callback. | ||
178 | |||
179 | dmaengine_terminate_all() is deprecated and should not be used in new code. | ||
180 | |||
175 | 2. int dmaengine_pause(struct dma_chan *chan) | 181 | 2. int dmaengine_pause(struct dma_chan *chan) |
176 | 182 | ||
177 | This pauses activity on the DMA channel without data loss. | 183 | This pauses activity on the DMA channel without data loss. |
@@ -197,3 +203,20 @@ Further APIs: | |||
197 | a running DMA channel. It is recommended that DMA engine users | 203 | a running DMA channel. It is recommended that DMA engine users |
198 | pause or stop (via dmaengine_terminate_all()) the channel before | 204 | pause or stop (via dmaengine_terminate_all()) the channel before |
199 | using this API. | 205 | using this API. |
206 | |||
207 | 5. void dmaengine_synchronize(struct dma_chan *chan) | ||
208 | |||
209 | Synchronize the termination of the DMA channel to the current context. | ||
210 | |||
211 | This function should be used after dmaengine_terminate_async() to synchronize | ||
212 | the termination of the DMA channel to the current context. The function will | ||
213 | wait for the transfer and any running complete callbacks to finish before it | ||
214 | returns. | ||
215 | |||
216 | If dmaengine_terminate_async() is used to stop the DMA channel this function | ||
217 | must be called before it is safe to free memory accessed by previously | ||
218 | submitted descriptors or to free any resources accessed within the complete | ||
219 | callback of previously submitted descriptors. | ||
220 | |||
221 | The behavior of this function is undefined if dma_async_issue_pending() has | ||
222 | been called between dmaengine_terminate_async() and this function. | ||
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index 67d4ce4df109..122b7f4876bb 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt | |||
@@ -327,8 +327,24 @@ supported. | |||
327 | 327 | ||
328 | * device_terminate_all | 328 | * device_terminate_all |
329 | - Aborts all the pending and ongoing transfers on the channel | 329 | - Aborts all the pending and ongoing transfers on the channel |
330 | - This command should operate synchronously on the channel, | 330 | - For aborted transfers the complete callback should not be called |
331 | terminating right away all the channels | 331 | - Can be called from atomic context or from within a complete |
332 | callback of a descriptor. Must not sleep. Drivers must be able | ||
333 | to handle this correctly. | ||
334 | - Termination may be asynchronous. The driver does not have to | ||
335 | wait until the currently active transfer has completely stopped. | ||
336 | See device_synchronize. | ||
337 | |||
338 | * device_synchronize | ||
339 | - Must synchronize the termination of a channel to the current | ||
340 | context. | ||
341 | - Must make sure that memory for previously submitted | ||
342 | descriptors is no longer accessed by the DMA controller. | ||
343 | - Must make sure that all complete callbacks for previously | ||
344 | submitted descriptors have finished running and none are | ||
345 | scheduled to run. | ||
346 | - May sleep. | ||
347 | |||
332 | 348 | ||
333 | Misc notes (stuff that should be documented, but don't really know | 349 | Misc notes (stuff that should be documented, but don't really know |
334 | where to put them) | 350 | where to put them) |
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig index 4725fab562cb..ec5250547d14 100644 --- a/arch/arm/configs/stm32_defconfig +++ b/arch/arm/configs/stm32_defconfig | |||
@@ -54,6 +54,8 @@ CONFIG_NEW_LEDS=y | |||
54 | CONFIG_LEDS_CLASS=y | 54 | CONFIG_LEDS_CLASS=y |
55 | CONFIG_LEDS_TRIGGERS=y | 55 | CONFIG_LEDS_TRIGGERS=y |
56 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | 56 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y |
57 | CONFIG_DMADEVICES=y | ||
58 | CONFIG_STM32_DMA=y | ||
57 | # CONFIG_FILE_LOCKING is not set | 59 | # CONFIG_FILE_LOCKING is not set |
58 | # CONFIG_DNOTIFY is not set | 60 | # CONFIG_DNOTIFY is not set |
59 | # CONFIG_INOTIFY_USER is not set | 61 | # CONFIG_INOTIFY_USER is not set |
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 819dfda88236..7afbb28d6a0f 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -321,7 +321,8 @@ EXPORT_SYMBOL_GPL(dca_get_tag); | |||
321 | * @ops - pointer to struct of dca operation function pointers | 321 | * @ops - pointer to struct of dca operation function pointers |
322 | * @priv_size - size of extra mem to be added for provider's needs | 322 | * @priv_size - size of extra mem to be added for provider's needs |
323 | */ | 323 | */ |
324 | struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) | 324 | struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, |
325 | int priv_size) | ||
325 | { | 326 | { |
326 | struct dca_provider *dca; | 327 | struct dca_provider *dca; |
327 | int alloc_size; | 328 | int alloc_size; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e6cd1a32025a..3a8ce67910c2 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -431,6 +431,18 @@ config STE_DMA40 | |||
431 | help | 431 | help |
432 | Support for ST-Ericsson DMA40 controller | 432 | Support for ST-Ericsson DMA40 controller |
433 | 433 | ||
434 | config STM32_DMA | ||
435 | bool "STMicroelectronics STM32 DMA support" | ||
436 | depends on ARCH_STM32 | ||
437 | select DMA_ENGINE | ||
438 | select DMA_OF | ||
439 | select DMA_VIRTUAL_CHANNELS | ||
440 | help | ||
441 | Enable support for the on-chip DMA controller on STMicroelectronics | ||
442 | STM32 MCUs. | ||
443 | If you have a board based on such a MCU and wish to use DMA say Y or M | ||
444 | here. | ||
445 | |||
434 | config S3C24XX_DMAC | 446 | config S3C24XX_DMAC |
435 | tristate "Samsung S3C24XX DMA support" | 447 | tristate "Samsung S3C24XX DMA support" |
436 | depends on ARCH_S3C24XX | 448 | depends on ARCH_S3C24XX |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index ef9c099bd2b6..2dd0a067a0ca 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -56,6 +56,7 @@ obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o | |||
56 | obj-$(CONFIG_RENESAS_DMA) += sh/ | 56 | obj-$(CONFIG_RENESAS_DMA) += sh/ |
57 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 57 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
58 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 58 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
59 | obj-$(CONFIG_STM32_DMA) += stm32-dma.o | ||
59 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o | 60 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o |
60 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 61 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
61 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 62 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 16d0daa058a5..eed6bda01790 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/kernel.h> | ||
18 | #include <linux/list.h> | 19 | #include <linux/list.h> |
19 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
@@ -72,7 +73,9 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, | |||
72 | si = (const struct acpi_csrt_shared_info *)&grp[1]; | 73 | si = (const struct acpi_csrt_shared_info *)&grp[1]; |
73 | 74 | ||
74 | /* Match device by MMIO and IRQ */ | 75 | /* Match device by MMIO and IRQ */ |
75 | if (si->mmio_base_low != mem || si->gsi_interrupt != irq) | 76 | if (si->mmio_base_low != lower_32_bits(mem) || |
77 | si->mmio_base_high != upper_32_bits(mem) || | ||
78 | si->gsi_interrupt != irq) | ||
76 | return 0; | 79 | return 0; |
77 | 80 | ||
78 | dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", | 81 | dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 370c661c7d7b..39f59666f93f 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -863,8 +863,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |||
863 | * access. Hopefully we can access DDR through both ports (at least on | 863 | * access. Hopefully we can access DDR through both ports (at least on |
864 | * SAMA5D4x), so we can use the same interface for source and dest, | 864 | * SAMA5D4x), so we can use the same interface for source and dest, |
865 | * that solves the fact we don't know the direction. | 865 | * that solves the fact we don't know the direction. |
866 | * ERRATA: Even if useless for memory transfers, the PERID has to not | ||
867 | * match the one of another channel. If not, it could lead to spurious | ||
868 | * flag status. | ||
866 | */ | 869 | */ |
867 | u32 chan_cc = AT_XDMAC_CC_DIF(0) | 870 | u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) |
871 | | AT_XDMAC_CC_DIF(0) | ||
868 | | AT_XDMAC_CC_SIF(0) | 872 | | AT_XDMAC_CC_SIF(0) |
869 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | 873 | | AT_XDMAC_CC_MBSIZE_SIXTEEN |
870 | | AT_XDMAC_CC_TYPE_MEM_TRAN; | 874 | | AT_XDMAC_CC_TYPE_MEM_TRAN; |
@@ -1041,8 +1045,12 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
1041 | * access DDR through both ports (at least on SAMA5D4x), so we can use | 1045 | * access DDR through both ports (at least on SAMA5D4x), so we can use |
1042 | * the same interface for source and dest, that solves the fact we | 1046 | * the same interface for source and dest, that solves the fact we |
1043 | * don't know the direction. | 1047 | * don't know the direction. |
1048 | * ERRATA: Even if useless for memory transfers, the PERID has to not | ||
1049 | * match the one of another channel. If not, it could lead to spurious | ||
1050 | * flag status. | ||
1044 | */ | 1051 | */ |
1045 | u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM | 1052 | u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) |
1053 | | AT_XDMAC_CC_DAM_INCREMENTED_AM | ||
1046 | | AT_XDMAC_CC_SAM_INCREMENTED_AM | 1054 | | AT_XDMAC_CC_SAM_INCREMENTED_AM |
1047 | | AT_XDMAC_CC_DIF(0) | 1055 | | AT_XDMAC_CC_DIF(0) |
1048 | | AT_XDMAC_CC_SIF(0) | 1056 | | AT_XDMAC_CC_SIF(0) |
@@ -1143,8 +1151,12 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, | |||
1143 | * access. Hopefully we can access DDR through both ports (at least on | 1151 | * access. Hopefully we can access DDR through both ports (at least on |
1144 | * SAMA5D4x), so we can use the same interface for source and dest, | 1152 | * SAMA5D4x), so we can use the same interface for source and dest, |
1145 | * that solves the fact we don't know the direction. | 1153 | * that solves the fact we don't know the direction. |
1154 | * ERRATA: Even if useless for memory transfers, the PERID has to not | ||
1155 | * match the one of another channel. If not, it could lead to spurious | ||
1156 | * flag status. | ||
1146 | */ | 1157 | */ |
1147 | u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM | 1158 | u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) |
1159 | | AT_XDMAC_CC_DAM_UBS_AM | ||
1148 | | AT_XDMAC_CC_SAM_INCREMENTED_AM | 1160 | | AT_XDMAC_CC_SAM_INCREMENTED_AM |
1149 | | AT_XDMAC_CC_DIF(0) | 1161 | | AT_XDMAC_CC_DIF(0) |
1150 | | AT_XDMAC_CC_SIF(0) | 1162 | | AT_XDMAC_CC_SIF(0) |
@@ -1998,8 +2010,6 @@ static int at_xdmac_remove(struct platform_device *pdev) | |||
1998 | dma_async_device_unregister(&atxdmac->dma); | 2010 | dma_async_device_unregister(&atxdmac->dma); |
1999 | clk_disable_unprepare(atxdmac->clk); | 2011 | clk_disable_unprepare(atxdmac->clk); |
2000 | 2012 | ||
2001 | synchronize_irq(atxdmac->irq); | ||
2002 | |||
2003 | free_irq(atxdmac->irq, atxdmac->dma.dev); | 2013 | free_irq(atxdmac->irq, atxdmac->dma.dev); |
2004 | 2014 | ||
2005 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | 2015 | for (i = 0; i < atxdmac->dma.chancnt; i++) { |
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 5b2395e7e04d..c3468094393e 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c | |||
@@ -307,6 +307,13 @@ static int axi_dmac_terminate_all(struct dma_chan *c) | |||
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | static void axi_dmac_synchronize(struct dma_chan *c) | ||
311 | { | ||
312 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | ||
313 | |||
314 | vchan_synchronize(&chan->vchan); | ||
315 | } | ||
316 | |||
310 | static void axi_dmac_issue_pending(struct dma_chan *c) | 317 | static void axi_dmac_issue_pending(struct dma_chan *c) |
311 | { | 318 | { |
312 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | 319 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
@@ -613,6 +620,7 @@ static int axi_dmac_probe(struct platform_device *pdev) | |||
613 | dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; | 620 | dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; |
614 | dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; | 621 | dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; |
615 | dma_dev->device_terminate_all = axi_dmac_terminate_all; | 622 | dma_dev->device_terminate_all = axi_dmac_terminate_all; |
623 | dma_dev->device_synchronize = axi_dmac_synchronize; | ||
616 | dma_dev->dev = &pdev->dev; | 624 | dma_dev->dev = &pdev->dev; |
617 | dma_dev->chancnt = 1; | 625 | dma_dev->chancnt = 1; |
618 | dma_dev->src_addr_widths = BIT(dmac->chan.src_width); | 626 | dma_dev->src_addr_widths = BIT(dmac->chan.src_width); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ecec1445adf..c50a247be2e0 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -43,6 +43,7 @@ | |||
43 | 43 | ||
44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
45 | 45 | ||
46 | #include <linux/platform_device.h> | ||
46 | #include <linux/dma-mapping.h> | 47 | #include <linux/dma-mapping.h> |
47 | #include <linux/init.h> | 48 | #include <linux/init.h> |
48 | #include <linux/module.h> | 49 | #include <linux/module.h> |
@@ -265,8 +266,11 @@ static void dma_chan_put(struct dma_chan *chan) | |||
265 | module_put(dma_chan_to_owner(chan)); | 266 | module_put(dma_chan_to_owner(chan)); |
266 | 267 | ||
267 | /* This channel is not in use anymore, free it */ | 268 | /* This channel is not in use anymore, free it */ |
268 | if (!chan->client_count && chan->device->device_free_chan_resources) | 269 | if (!chan->client_count && chan->device->device_free_chan_resources) { |
270 | /* Make sure all operations have completed */ | ||
271 | dmaengine_synchronize(chan); | ||
269 | chan->device->device_free_chan_resources(chan); | 272 | chan->device->device_free_chan_resources(chan); |
273 | } | ||
270 | 274 | ||
271 | /* If the channel is used via a DMA request router, free the mapping */ | 275 | /* If the channel is used via a DMA request router, free the mapping */ |
272 | if (chan->router && chan->router->route_free) { | 276 | if (chan->router && chan->router->route_free) { |
@@ -493,6 +497,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | |||
493 | caps->dst_addr_widths = device->dst_addr_widths; | 497 | caps->dst_addr_widths = device->dst_addr_widths; |
494 | caps->directions = device->directions; | 498 | caps->directions = device->directions; |
495 | caps->residue_granularity = device->residue_granularity; | 499 | caps->residue_granularity = device->residue_granularity; |
500 | caps->descriptor_reuse = device->descriptor_reuse; | ||
496 | 501 | ||
497 | /* | 502 | /* |
498 | * Some devices implement only pause (e.g. to get residuum) but no | 503 | * Some devices implement only pause (e.g. to get residuum) but no |
@@ -511,7 +516,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |||
511 | { | 516 | { |
512 | struct dma_chan *chan; | 517 | struct dma_chan *chan; |
513 | 518 | ||
514 | if (!__dma_device_satisfies_mask(dev, mask)) { | 519 | if (mask && !__dma_device_satisfies_mask(dev, mask)) { |
515 | pr_debug("%s: wrong capabilities\n", __func__); | 520 | pr_debug("%s: wrong capabilities\n", __func__); |
516 | return NULL; | 521 | return NULL; |
517 | } | 522 | } |
@@ -542,6 +547,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |||
542 | return NULL; | 547 | return NULL; |
543 | } | 548 | } |
544 | 549 | ||
550 | static struct dma_chan *find_candidate(struct dma_device *device, | ||
551 | const dma_cap_mask_t *mask, | ||
552 | dma_filter_fn fn, void *fn_param) | ||
553 | { | ||
554 | struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); | ||
555 | int err; | ||
556 | |||
557 | if (chan) { | ||
558 | /* Found a suitable channel, try to grab, prep, and return it. | ||
559 | * We first set DMA_PRIVATE to disable balance_ref_count as this | ||
560 | * channel will not be published in the general-purpose | ||
561 | * allocator | ||
562 | */ | ||
563 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
564 | device->privatecnt++; | ||
565 | err = dma_chan_get(chan); | ||
566 | |||
567 | if (err) { | ||
568 | if (err == -ENODEV) { | ||
569 | pr_debug("%s: %s module removed\n", __func__, | ||
570 | dma_chan_name(chan)); | ||
571 | list_del_rcu(&device->global_node); | ||
572 | } else | ||
573 | pr_debug("%s: failed to get %s: (%d)\n", | ||
574 | __func__, dma_chan_name(chan), err); | ||
575 | |||
576 | if (--device->privatecnt == 0) | ||
577 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
578 | |||
579 | chan = ERR_PTR(err); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | ||
584 | } | ||
585 | |||
545 | /** | 586 | /** |
546 | * dma_get_slave_channel - try to get specific channel exclusively | 587 | * dma_get_slave_channel - try to get specific channel exclusively |
547 | * @chan: target channel | 588 | * @chan: target channel |
@@ -580,7 +621,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) | |||
580 | { | 621 | { |
581 | dma_cap_mask_t mask; | 622 | dma_cap_mask_t mask; |
582 | struct dma_chan *chan; | 623 | struct dma_chan *chan; |
583 | int err; | ||
584 | 624 | ||
585 | dma_cap_zero(mask); | 625 | dma_cap_zero(mask); |
586 | dma_cap_set(DMA_SLAVE, mask); | 626 | dma_cap_set(DMA_SLAVE, mask); |
@@ -588,23 +628,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) | |||
588 | /* lock against __dma_request_channel */ | 628 | /* lock against __dma_request_channel */ |
589 | mutex_lock(&dma_list_mutex); | 629 | mutex_lock(&dma_list_mutex); |
590 | 630 | ||
591 | chan = private_candidate(&mask, device, NULL, NULL); | 631 | chan = find_candidate(device, &mask, NULL, NULL); |
592 | if (chan) { | ||
593 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
594 | device->privatecnt++; | ||
595 | err = dma_chan_get(chan); | ||
596 | if (err) { | ||
597 | pr_debug("%s: failed to get %s: (%d)\n", | ||
598 | __func__, dma_chan_name(chan), err); | ||
599 | chan = NULL; | ||
600 | if (--device->privatecnt == 0) | ||
601 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
602 | } | ||
603 | } | ||
604 | 632 | ||
605 | mutex_unlock(&dma_list_mutex); | 633 | mutex_unlock(&dma_list_mutex); |
606 | 634 | ||
607 | return chan; | 635 | return IS_ERR(chan) ? NULL : chan; |
608 | } | 636 | } |
609 | EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); | 637 | EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); |
610 | 638 | ||
@@ -621,35 +649,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
621 | { | 649 | { |
622 | struct dma_device *device, *_d; | 650 | struct dma_device *device, *_d; |
623 | struct dma_chan *chan = NULL; | 651 | struct dma_chan *chan = NULL; |
624 | int err; | ||
625 | 652 | ||
626 | /* Find a channel */ | 653 | /* Find a channel */ |
627 | mutex_lock(&dma_list_mutex); | 654 | mutex_lock(&dma_list_mutex); |
628 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | 655 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
629 | chan = private_candidate(mask, device, fn, fn_param); | 656 | chan = find_candidate(device, mask, fn, fn_param); |
630 | if (chan) { | 657 | if (!IS_ERR(chan)) |
631 | /* Found a suitable channel, try to grab, prep, and | 658 | break; |
632 | * return it. We first set DMA_PRIVATE to disable | ||
633 | * balance_ref_count as this channel will not be | ||
634 | * published in the general-purpose allocator | ||
635 | */ | ||
636 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
637 | device->privatecnt++; | ||
638 | err = dma_chan_get(chan); | ||
639 | 659 | ||
640 | if (err == -ENODEV) { | 660 | chan = NULL; |
641 | pr_debug("%s: %s module removed\n", | ||
642 | __func__, dma_chan_name(chan)); | ||
643 | list_del_rcu(&device->global_node); | ||
644 | } else if (err) | ||
645 | pr_debug("%s: failed to get %s: (%d)\n", | ||
646 | __func__, dma_chan_name(chan), err); | ||
647 | else | ||
648 | break; | ||
649 | if (--device->privatecnt == 0) | ||
650 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
651 | chan = NULL; | ||
652 | } | ||
653 | } | 661 | } |
654 | mutex_unlock(&dma_list_mutex); | 662 | mutex_unlock(&dma_list_mutex); |
655 | 663 | ||
@@ -662,27 +670,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
662 | } | 670 | } |
663 | EXPORT_SYMBOL_GPL(__dma_request_channel); | 671 | EXPORT_SYMBOL_GPL(__dma_request_channel); |
664 | 672 | ||
673 | static const struct dma_slave_map *dma_filter_match(struct dma_device *device, | ||
674 | const char *name, | ||
675 | struct device *dev) | ||
676 | { | ||
677 | int i; | ||
678 | |||
679 | if (!device->filter.mapcnt) | ||
680 | return NULL; | ||
681 | |||
682 | for (i = 0; i < device->filter.mapcnt; i++) { | ||
683 | const struct dma_slave_map *map = &device->filter.map[i]; | ||
684 | |||
685 | if (!strcmp(map->devname, dev_name(dev)) && | ||
686 | !strcmp(map->slave, name)) | ||
687 | return map; | ||
688 | } | ||
689 | |||
690 | return NULL; | ||
691 | } | ||
692 | |||
665 | /** | 693 | /** |
666 | * dma_request_slave_channel_reason - try to allocate an exclusive slave channel | 694 | * dma_request_chan - try to allocate an exclusive slave channel |
667 | * @dev: pointer to client device structure | 695 | * @dev: pointer to client device structure |
668 | * @name: slave channel name | 696 | * @name: slave channel name |
669 | * | 697 | * |
670 | * Returns pointer to appropriate DMA channel on success or an error pointer. | 698 | * Returns pointer to appropriate DMA channel on success or an error pointer. |
671 | */ | 699 | */ |
672 | struct dma_chan *dma_request_slave_channel_reason(struct device *dev, | 700 | struct dma_chan *dma_request_chan(struct device *dev, const char *name) |
673 | const char *name) | ||
674 | { | 701 | { |
702 | struct dma_device *d, *_d; | ||
703 | struct dma_chan *chan = NULL; | ||
704 | |||
675 | /* If device-tree is present get slave info from here */ | 705 | /* If device-tree is present get slave info from here */ |
676 | if (dev->of_node) | 706 | if (dev->of_node) |
677 | return of_dma_request_slave_channel(dev->of_node, name); | 707 | chan = of_dma_request_slave_channel(dev->of_node, name); |
678 | 708 | ||
679 | /* If device was enumerated by ACPI get slave info from here */ | 709 | /* If device was enumerated by ACPI get slave info from here */ |
680 | if (ACPI_HANDLE(dev)) | 710 | if (has_acpi_companion(dev) && !chan) |
681 | return acpi_dma_request_slave_chan_by_name(dev, name); | 711 | chan = acpi_dma_request_slave_chan_by_name(dev, name); |
712 | |||
713 | if (chan) { | ||
714 | /* Valid channel found or requester need to be deferred */ | ||
715 | if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) | ||
716 | return chan; | ||
717 | } | ||
682 | 718 | ||
683 | return ERR_PTR(-ENODEV); | 719 | /* Try to find the channel via the DMA filter map(s) */ |
720 | mutex_lock(&dma_list_mutex); | ||
721 | list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { | ||
722 | dma_cap_mask_t mask; | ||
723 | const struct dma_slave_map *map = dma_filter_match(d, name, dev); | ||
724 | |||
725 | if (!map) | ||
726 | continue; | ||
727 | |||
728 | dma_cap_zero(mask); | ||
729 | dma_cap_set(DMA_SLAVE, mask); | ||
730 | |||
731 | chan = find_candidate(d, &mask, d->filter.fn, map->param); | ||
732 | if (!IS_ERR(chan)) | ||
733 | break; | ||
734 | } | ||
735 | mutex_unlock(&dma_list_mutex); | ||
736 | |||
737 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | ||
684 | } | 738 | } |
685 | EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); | 739 | EXPORT_SYMBOL_GPL(dma_request_chan); |
686 | 740 | ||
687 | /** | 741 | /** |
688 | * dma_request_slave_channel - try to allocate an exclusive slave channel | 742 | * dma_request_slave_channel - try to allocate an exclusive slave channel |
@@ -694,17 +748,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); | |||
694 | struct dma_chan *dma_request_slave_channel(struct device *dev, | 748 | struct dma_chan *dma_request_slave_channel(struct device *dev, |
695 | const char *name) | 749 | const char *name) |
696 | { | 750 | { |
697 | struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); | 751 | struct dma_chan *ch = dma_request_chan(dev, name); |
698 | if (IS_ERR(ch)) | 752 | if (IS_ERR(ch)) |
699 | return NULL; | 753 | return NULL; |
700 | 754 | ||
701 | dma_cap_set(DMA_PRIVATE, ch->device->cap_mask); | ||
702 | ch->device->privatecnt++; | ||
703 | |||
704 | return ch; | 755 | return ch; |
705 | } | 756 | } |
706 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | 757 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); |
707 | 758 | ||
759 | /** | ||
760 | * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities | ||
761 | * @mask: capabilities that the channel must satisfy | ||
762 | * | ||
763 | * Returns pointer to appropriate DMA channel on success or an error pointer. | ||
764 | */ | ||
765 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) | ||
766 | { | ||
767 | struct dma_chan *chan; | ||
768 | |||
769 | if (!mask) | ||
770 | return ERR_PTR(-ENODEV); | ||
771 | |||
772 | chan = __dma_request_channel(mask, NULL, NULL); | ||
773 | if (!chan) | ||
774 | chan = ERR_PTR(-ENODEV); | ||
775 | |||
776 | return chan; | ||
777 | } | ||
778 | EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); | ||
779 | |||
708 | void dma_release_channel(struct dma_chan *chan) | 780 | void dma_release_channel(struct dma_chan *chan) |
709 | { | 781 | { |
710 | mutex_lock(&dma_list_mutex); | 782 | mutex_lock(&dma_list_mutex); |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 127093a0c0e8..26edbe3a27ac 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -103,18 +103,21 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
103 | struct device_node *np = pdev->dev.of_node; | 103 | struct device_node *np = pdev->dev.of_node; |
104 | struct dw_dma_platform_data *pdata; | 104 | struct dw_dma_platform_data *pdata; |
105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; | 105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; |
106 | u32 nr_channels; | ||
106 | 107 | ||
107 | if (!np) { | 108 | if (!np) { |
108 | dev_err(&pdev->dev, "Missing DT data\n"); | 109 | dev_err(&pdev->dev, "Missing DT data\n"); |
109 | return NULL; | 110 | return NULL; |
110 | } | 111 | } |
111 | 112 | ||
113 | if (of_property_read_u32(np, "dma-channels", &nr_channels)) | ||
114 | return NULL; | ||
115 | |||
112 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 116 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
113 | if (!pdata) | 117 | if (!pdata) |
114 | return NULL; | 118 | return NULL; |
115 | 119 | ||
116 | if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) | 120 | pdata->nr_channels = nr_channels; |
117 | return NULL; | ||
118 | 121 | ||
119 | if (of_property_read_bool(np, "is_private")) | 122 | if (of_property_read_bool(np, "is_private")) |
120 | pdata->is_private = true; | 123 | pdata->is_private = true; |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 16fe773fb846..50584015e046 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -2314,6 +2314,10 @@ static int edma_probe(struct platform_device *pdev) | |||
2314 | edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); | 2314 | edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); |
2315 | } | 2315 | } |
2316 | 2316 | ||
2317 | ecc->dma_slave.filter.map = info->slave_map; | ||
2318 | ecc->dma_slave.filter.mapcnt = info->slavecnt; | ||
2319 | ecc->dma_slave.filter.fn = edma_filter_fn; | ||
2320 | |||
2317 | ret = dma_async_device_register(&ecc->dma_slave); | 2321 | ret = dma_async_device_register(&ecc->dma_slave); |
2318 | if (ret) { | 2322 | if (ret) { |
2319 | dev_err(dev, "slave ddev registration failed (%d)\n", ret); | 2323 | dev_err(dev, "slave ddev registration failed (%d)\n", ret); |
@@ -2421,7 +2425,13 @@ static struct platform_driver edma_driver = { | |||
2421 | }, | 2425 | }, |
2422 | }; | 2426 | }; |
2423 | 2427 | ||
2428 | static int edma_tptc_probe(struct platform_device *pdev) | ||
2429 | { | ||
2430 | return 0; | ||
2431 | } | ||
2432 | |||
2424 | static struct platform_driver edma_tptc_driver = { | 2433 | static struct platform_driver edma_tptc_driver = { |
2434 | .probe = edma_tptc_probe, | ||
2425 | .driver = { | 2435 | .driver = { |
2426 | .name = "edma3-tptc", | 2436 | .name = "edma3-tptc", |
2427 | .of_match_table = edma_tptc_of_ids, | 2437 | .of_match_table = edma_tptc_of_ids, |
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 915eec3cc279..be2e62b87948 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c | |||
@@ -116,6 +116,10 @@ | |||
116 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | 116 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
117 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | 117 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
118 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | 118 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) |
119 | enum fsl_edma_pm_state { | ||
120 | RUNNING = 0, | ||
121 | SUSPENDED, | ||
122 | }; | ||
119 | 123 | ||
120 | struct fsl_edma_hw_tcd { | 124 | struct fsl_edma_hw_tcd { |
121 | __le32 saddr; | 125 | __le32 saddr; |
@@ -147,6 +151,9 @@ struct fsl_edma_slave_config { | |||
147 | struct fsl_edma_chan { | 151 | struct fsl_edma_chan { |
148 | struct virt_dma_chan vchan; | 152 | struct virt_dma_chan vchan; |
149 | enum dma_status status; | 153 | enum dma_status status; |
154 | enum fsl_edma_pm_state pm_state; | ||
155 | bool idle; | ||
156 | u32 slave_id; | ||
150 | struct fsl_edma_engine *edma; | 157 | struct fsl_edma_engine *edma; |
151 | struct fsl_edma_desc *edesc; | 158 | struct fsl_edma_desc *edesc; |
152 | struct fsl_edma_slave_config fsc; | 159 | struct fsl_edma_slave_config fsc; |
@@ -298,6 +305,7 @@ static int fsl_edma_terminate_all(struct dma_chan *chan) | |||
298 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | 305 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
299 | fsl_edma_disable_request(fsl_chan); | 306 | fsl_edma_disable_request(fsl_chan); |
300 | fsl_chan->edesc = NULL; | 307 | fsl_chan->edesc = NULL; |
308 | fsl_chan->idle = true; | ||
301 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | 309 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); |
302 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | 310 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
303 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | 311 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); |
@@ -313,6 +321,7 @@ static int fsl_edma_pause(struct dma_chan *chan) | |||
313 | if (fsl_chan->edesc) { | 321 | if (fsl_chan->edesc) { |
314 | fsl_edma_disable_request(fsl_chan); | 322 | fsl_edma_disable_request(fsl_chan); |
315 | fsl_chan->status = DMA_PAUSED; | 323 | fsl_chan->status = DMA_PAUSED; |
324 | fsl_chan->idle = true; | ||
316 | } | 325 | } |
317 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | 326 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
318 | return 0; | 327 | return 0; |
@@ -327,6 +336,7 @@ static int fsl_edma_resume(struct dma_chan *chan) | |||
327 | if (fsl_chan->edesc) { | 336 | if (fsl_chan->edesc) { |
328 | fsl_edma_enable_request(fsl_chan); | 337 | fsl_edma_enable_request(fsl_chan); |
329 | fsl_chan->status = DMA_IN_PROGRESS; | 338 | fsl_chan->status = DMA_IN_PROGRESS; |
339 | fsl_chan->idle = false; | ||
330 | } | 340 | } |
331 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | 341 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
332 | return 0; | 342 | return 0; |
@@ -648,6 +658,7 @@ static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) | |||
648 | fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); | 658 | fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); |
649 | fsl_edma_enable_request(fsl_chan); | 659 | fsl_edma_enable_request(fsl_chan); |
650 | fsl_chan->status = DMA_IN_PROGRESS; | 660 | fsl_chan->status = DMA_IN_PROGRESS; |
661 | fsl_chan->idle = false; | ||
651 | } | 662 | } |
652 | 663 | ||
653 | static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) | 664 | static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) |
@@ -676,6 +687,7 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) | |||
676 | vchan_cookie_complete(&fsl_chan->edesc->vdesc); | 687 | vchan_cookie_complete(&fsl_chan->edesc->vdesc); |
677 | fsl_chan->edesc = NULL; | 688 | fsl_chan->edesc = NULL; |
678 | fsl_chan->status = DMA_COMPLETE; | 689 | fsl_chan->status = DMA_COMPLETE; |
690 | fsl_chan->idle = true; | ||
679 | } else { | 691 | } else { |
680 | vchan_cyclic_callback(&fsl_chan->edesc->vdesc); | 692 | vchan_cyclic_callback(&fsl_chan->edesc->vdesc); |
681 | } | 693 | } |
@@ -704,6 +716,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id) | |||
704 | edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), | 716 | edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), |
705 | fsl_edma->membase + EDMA_CERR); | 717 | fsl_edma->membase + EDMA_CERR); |
706 | fsl_edma->chans[ch].status = DMA_ERROR; | 718 | fsl_edma->chans[ch].status = DMA_ERROR; |
719 | fsl_edma->chans[ch].idle = true; | ||
707 | } | 720 | } |
708 | } | 721 | } |
709 | return IRQ_HANDLED; | 722 | return IRQ_HANDLED; |
@@ -724,6 +737,12 @@ static void fsl_edma_issue_pending(struct dma_chan *chan) | |||
724 | 737 | ||
725 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | 738 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
726 | 739 | ||
740 | if (unlikely(fsl_chan->pm_state != RUNNING)) { | ||
741 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
742 | /* cannot submit due to suspend */ | ||
743 | return; | ||
744 | } | ||
745 | |||
727 | if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) | 746 | if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) |
728 | fsl_edma_xfer_desc(fsl_chan); | 747 | fsl_edma_xfer_desc(fsl_chan); |
729 | 748 | ||
@@ -735,6 +754,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, | |||
735 | { | 754 | { |
736 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; | 755 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; |
737 | struct dma_chan *chan, *_chan; | 756 | struct dma_chan *chan, *_chan; |
757 | struct fsl_edma_chan *fsl_chan; | ||
738 | unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; | 758 | unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; |
739 | 759 | ||
740 | if (dma_spec->args_count != 2) | 760 | if (dma_spec->args_count != 2) |
@@ -748,8 +768,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, | |||
748 | chan = dma_get_slave_channel(chan); | 768 | chan = dma_get_slave_channel(chan); |
749 | if (chan) { | 769 | if (chan) { |
750 | chan->device->privatecnt++; | 770 | chan->device->privatecnt++; |
751 | fsl_edma_chan_mux(to_fsl_edma_chan(chan), | 771 | fsl_chan = to_fsl_edma_chan(chan); |
752 | dma_spec->args[1], true); | 772 | fsl_chan->slave_id = dma_spec->args[1]; |
773 | fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, | ||
774 | true); | ||
753 | mutex_unlock(&fsl_edma->fsl_edma_mutex); | 775 | mutex_unlock(&fsl_edma->fsl_edma_mutex); |
754 | return chan; | 776 | return chan; |
755 | } | 777 | } |
@@ -888,7 +910,9 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
888 | struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; | 910 | struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; |
889 | 911 | ||
890 | fsl_chan->edma = fsl_edma; | 912 | fsl_chan->edma = fsl_edma; |
891 | 913 | fsl_chan->pm_state = RUNNING; | |
914 | fsl_chan->slave_id = 0; | ||
915 | fsl_chan->idle = true; | ||
892 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; | 916 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; |
893 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); | 917 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); |
894 | 918 | ||
@@ -959,6 +983,60 @@ static int fsl_edma_remove(struct platform_device *pdev) | |||
959 | return 0; | 983 | return 0; |
960 | } | 984 | } |
961 | 985 | ||
986 | static int fsl_edma_suspend_late(struct device *dev) | ||
987 | { | ||
988 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); | ||
989 | struct fsl_edma_chan *fsl_chan; | ||
990 | unsigned long flags; | ||
991 | int i; | ||
992 | |||
993 | for (i = 0; i < fsl_edma->n_chans; i++) { | ||
994 | fsl_chan = &fsl_edma->chans[i]; | ||
995 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | ||
996 | /* Make sure chan is idle or will force disable. */ | ||
997 | if (unlikely(!fsl_chan->idle)) { | ||
998 | dev_warn(dev, "WARN: There is non-idle channel."); | ||
999 | fsl_edma_disable_request(fsl_chan); | ||
1000 | fsl_edma_chan_mux(fsl_chan, 0, false); | ||
1001 | } | ||
1002 | |||
1003 | fsl_chan->pm_state = SUSPENDED; | ||
1004 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
1005 | } | ||
1006 | |||
1007 | return 0; | ||
1008 | } | ||
1009 | |||
1010 | static int fsl_edma_resume_early(struct device *dev) | ||
1011 | { | ||
1012 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); | ||
1013 | struct fsl_edma_chan *fsl_chan; | ||
1014 | int i; | ||
1015 | |||
1016 | for (i = 0; i < fsl_edma->n_chans; i++) { | ||
1017 | fsl_chan = &fsl_edma->chans[i]; | ||
1018 | fsl_chan->pm_state = RUNNING; | ||
1019 | edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i)); | ||
1020 | if (fsl_chan->slave_id != 0) | ||
1021 | fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); | ||
1022 | } | ||
1023 | |||
1024 | edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, | ||
1025 | fsl_edma->membase + EDMA_CR); | ||
1026 | |||
1027 | return 0; | ||
1028 | } | ||
1029 | |||
1030 | /* | ||
1031 | * eDMA provides the service to others, so it should be suspend late | ||
1032 | * and resume early. When eDMA suspend, all of the clients should stop | ||
1033 | * the DMA data transmission and let the channel idle. | ||
1034 | */ | ||
1035 | static const struct dev_pm_ops fsl_edma_pm_ops = { | ||
1036 | .suspend_late = fsl_edma_suspend_late, | ||
1037 | .resume_early = fsl_edma_resume_early, | ||
1038 | }; | ||
1039 | |||
962 | static const struct of_device_id fsl_edma_dt_ids[] = { | 1040 | static const struct of_device_id fsl_edma_dt_ids[] = { |
963 | { .compatible = "fsl,vf610-edma", }, | 1041 | { .compatible = "fsl,vf610-edma", }, |
964 | { /* sentinel */ } | 1042 | { /* sentinel */ } |
@@ -969,6 +1047,7 @@ static struct platform_driver fsl_edma_driver = { | |||
969 | .driver = { | 1047 | .driver = { |
970 | .name = "fsl-edma", | 1048 | .name = "fsl-edma", |
971 | .of_match_table = fsl_edma_dt_ids, | 1049 | .of_match_table = fsl_edma_dt_ids, |
1050 | .pm = &fsl_edma_pm_ops, | ||
972 | }, | 1051 | }, |
973 | .probe = fsl_edma_probe, | 1052 | .probe = fsl_edma_probe, |
974 | .remove = fsl_edma_remove, | 1053 | .remove = fsl_edma_remove, |
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 823ad728aecf..eef145edb936 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c | |||
@@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg( | |||
228 | for_each_sg(sgl, sg, sg_len, i) { | 228 | for_each_sg(sgl, sg, sg_len, i) { |
229 | desc->sg[i].addr = sg_dma_address(sg); | 229 | desc->sg[i].addr = sg_dma_address(sg); |
230 | desc->sg[i].len = sg_dma_len(sg); | 230 | desc->sg[i].len = sg_dma_len(sg); |
231 | |||
232 | desc->length += sg_dma_len(sg); | ||
231 | } | 233 | } |
232 | 234 | ||
233 | desc->nents = sg_len; | 235 | desc->nents = sg_len; |
@@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan) | |||
249 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | 251 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); |
250 | } | 252 | } |
251 | 253 | ||
252 | static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc) | ||
253 | { | ||
254 | size_t bytes = 0; | ||
255 | unsigned int i; | ||
256 | |||
257 | for (i = desc->active; i < desc->nents; i++) | ||
258 | bytes += desc->sg[i].len; | ||
259 | |||
260 | return bytes; | ||
261 | } | ||
262 | |||
263 | static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) | 254 | static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) |
264 | { | 255 | { |
265 | struct hsu_dma_desc *desc = hsuc->desc; | 256 | struct hsu_dma_desc *desc = hsuc->desc; |
266 | size_t bytes = hsu_dma_desc_size(desc); | 257 | size_t bytes = desc->length; |
267 | int i; | 258 | int i; |
268 | 259 | ||
269 | i = desc->active % HSU_DMA_CHAN_NR_DESC; | 260 | i = desc->active % HSU_DMA_CHAN_NR_DESC; |
@@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan, | |||
294 | dma_set_residue(state, bytes); | 285 | dma_set_residue(state, bytes); |
295 | status = hsuc->desc->status; | 286 | status = hsuc->desc->status; |
296 | } else if (vdesc) { | 287 | } else if (vdesc) { |
297 | bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc)); | 288 | bytes = to_hsu_dma_desc(vdesc)->length; |
298 | dma_set_residue(state, bytes); | 289 | dma_set_residue(state, bytes); |
299 | } | 290 | } |
300 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | 291 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); |
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h index f06579c6d548..578a8ee8cd05 100644 --- a/drivers/dma/hsu/hsu.h +++ b/drivers/dma/hsu/hsu.h | |||
@@ -65,6 +65,7 @@ struct hsu_dma_desc { | |||
65 | enum dma_transfer_direction direction; | 65 | enum dma_transfer_direction direction; |
66 | struct hsu_dma_sg *sg; | 66 | struct hsu_dma_sg *sg; |
67 | unsigned int nents; | 67 | unsigned int nents; |
68 | size_t length; | ||
68 | unsigned int active; | 69 | unsigned int active; |
69 | enum dma_status status; | 70 | enum dma_status status; |
70 | }; | 71 | }; |
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 7d56b47e4fcf..3cb7b2c78197 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c | |||
@@ -178,20 +178,12 @@ static irqreturn_t idma64_irq(int irq, void *dev) | |||
178 | if (!status) | 178 | if (!status) |
179 | return IRQ_NONE; | 179 | return IRQ_NONE; |
180 | 180 | ||
181 | /* Disable interrupts */ | ||
182 | channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask); | ||
183 | channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask); | ||
184 | |||
185 | status_xfer = dma_readl(idma64, RAW(XFER)); | 181 | status_xfer = dma_readl(idma64, RAW(XFER)); |
186 | status_err = dma_readl(idma64, RAW(ERROR)); | 182 | status_err = dma_readl(idma64, RAW(ERROR)); |
187 | 183 | ||
188 | for (i = 0; i < idma64->dma.chancnt; i++) | 184 | for (i = 0; i < idma64->dma.chancnt; i++) |
189 | idma64_chan_irq(idma64, i, status_err, status_xfer); | 185 | idma64_chan_irq(idma64, i, status_err, status_xfer); |
190 | 186 | ||
191 | /* Re-enable interrupts */ | ||
192 | channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask); | ||
193 | channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask); | ||
194 | |||
195 | return IRQ_HANDLED; | 187 | return IRQ_HANDLED; |
196 | } | 188 | } |
197 | 189 | ||
@@ -239,7 +231,7 @@ static void idma64_vdesc_free(struct virt_dma_desc *vdesc) | |||
239 | idma64_desc_free(idma64c, to_idma64_desc(vdesc)); | 231 | idma64_desc_free(idma64c, to_idma64_desc(vdesc)); |
240 | } | 232 | } |
241 | 233 | ||
242 | static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw, | 234 | static void idma64_hw_desc_fill(struct idma64_hw_desc *hw, |
243 | struct dma_slave_config *config, | 235 | struct dma_slave_config *config, |
244 | enum dma_transfer_direction direction, u64 llp) | 236 | enum dma_transfer_direction direction, u64 llp) |
245 | { | 237 | { |
@@ -276,26 +268,26 @@ static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw, | |||
276 | IDMA64C_CTLL_SRC_WIDTH(src_width); | 268 | IDMA64C_CTLL_SRC_WIDTH(src_width); |
277 | 269 | ||
278 | lli->llp = llp; | 270 | lli->llp = llp; |
279 | return hw->llp; | ||
280 | } | 271 | } |
281 | 272 | ||
282 | static void idma64_desc_fill(struct idma64_chan *idma64c, | 273 | static void idma64_desc_fill(struct idma64_chan *idma64c, |
283 | struct idma64_desc *desc) | 274 | struct idma64_desc *desc) |
284 | { | 275 | { |
285 | struct dma_slave_config *config = &idma64c->config; | 276 | struct dma_slave_config *config = &idma64c->config; |
286 | struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1]; | 277 | unsigned int i = desc->ndesc; |
278 | struct idma64_hw_desc *hw = &desc->hw[i - 1]; | ||
287 | struct idma64_lli *lli = hw->lli; | 279 | struct idma64_lli *lli = hw->lli; |
288 | u64 llp = 0; | 280 | u64 llp = 0; |
289 | unsigned int i = desc->ndesc; | ||
290 | 281 | ||
291 | /* Fill the hardware descriptors and link them to a list */ | 282 | /* Fill the hardware descriptors and link them to a list */ |
292 | do { | 283 | do { |
293 | hw = &desc->hw[--i]; | 284 | hw = &desc->hw[--i]; |
294 | llp = idma64_hw_desc_fill(hw, config, desc->direction, llp); | 285 | idma64_hw_desc_fill(hw, config, desc->direction, llp); |
286 | llp = hw->llp; | ||
295 | desc->length += hw->len; | 287 | desc->length += hw->len; |
296 | } while (i); | 288 | } while (i); |
297 | 289 | ||
298 | /* Trigger interrupt after last block */ | 290 | /* Trigger an interrupt after the last block is transfered */ |
299 | lli->ctllo |= IDMA64C_CTLL_INT_EN; | 291 | lli->ctllo |= IDMA64C_CTLL_INT_EN; |
300 | } | 292 | } |
301 | 293 | ||
@@ -596,6 +588,8 @@ static int idma64_probe(struct idma64_chip *chip) | |||
596 | 588 | ||
597 | idma64->dma.dev = chip->dev; | 589 | idma64->dma.dev = chip->dev; |
598 | 590 | ||
591 | dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); | ||
592 | |||
599 | ret = dma_async_device_register(&idma64->dma); | 593 | ret = dma_async_device_register(&idma64->dma); |
600 | if (ret) | 594 | if (ret) |
601 | return ret; | 595 | return ret; |
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index f6aeff0af8a5..8423f13ed0da 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h | |||
@@ -54,7 +54,8 @@ | |||
54 | #define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */ | 54 | #define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */ |
55 | 55 | ||
56 | /* Bitfields in CTL_HI */ | 56 | /* Bitfields in CTL_HI */ |
57 | #define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1)) | 57 | #define IDMA64C_CTLH_BLOCK_TS_MASK ((1 << 17) - 1) |
58 | #define IDMA64C_CTLH_BLOCK_TS(x) ((x) & IDMA64C_CTLH_BLOCK_TS_MASK) | ||
58 | #define IDMA64C_CTLH_DONE (1 << 17) | 59 | #define IDMA64C_CTLH_DONE (1 << 17) |
59 | 60 | ||
60 | /* Bitfields in CFG_LO */ | 61 | /* Bitfields in CFG_LO */ |
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 9ca56830cc63..a4c53be482cf 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c | |||
@@ -651,6 +651,48 @@ static enum dma_status mdc_tx_status(struct dma_chan *chan, | |||
651 | return ret; | 651 | return ret; |
652 | } | 652 | } |
653 | 653 | ||
654 | static unsigned int mdc_get_new_events(struct mdc_chan *mchan) | ||
655 | { | ||
656 | u32 val, processed, done1, done2; | ||
657 | unsigned int ret; | ||
658 | |||
659 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
660 | processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | ||
661 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | ||
662 | /* | ||
663 | * CMDS_DONE may have incremented between reading CMDS_PROCESSED | ||
664 | * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we | ||
665 | * didn't miss a command completion. | ||
666 | */ | ||
667 | do { | ||
668 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
669 | |||
670 | done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | ||
671 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | ||
672 | |||
673 | val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK << | ||
674 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) | | ||
675 | MDC_CMDS_PROCESSED_INT_ACTIVE); | ||
676 | |||
677 | val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT; | ||
678 | |||
679 | mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED); | ||
680 | |||
681 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
682 | |||
683 | done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | ||
684 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | ||
685 | } while (done1 != done2); | ||
686 | |||
687 | if (done1 >= processed) | ||
688 | ret = done1 - processed; | ||
689 | else | ||
690 | ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) - | ||
691 | processed) + done1; | ||
692 | |||
693 | return ret; | ||
694 | } | ||
695 | |||
654 | static int mdc_terminate_all(struct dma_chan *chan) | 696 | static int mdc_terminate_all(struct dma_chan *chan) |
655 | { | 697 | { |
656 | struct mdc_chan *mchan = to_mdc_chan(chan); | 698 | struct mdc_chan *mchan = to_mdc_chan(chan); |
@@ -667,6 +709,8 @@ static int mdc_terminate_all(struct dma_chan *chan) | |||
667 | mchan->desc = NULL; | 709 | mchan->desc = NULL; |
668 | vchan_get_all_descriptors(&mchan->vc, &head); | 710 | vchan_get_all_descriptors(&mchan->vc, &head); |
669 | 711 | ||
712 | mdc_get_new_events(mchan); | ||
713 | |||
670 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | 714 | spin_unlock_irqrestore(&mchan->vc.lock, flags); |
671 | 715 | ||
672 | if (mdesc) | 716 | if (mdesc) |
@@ -703,35 +747,17 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id) | |||
703 | { | 747 | { |
704 | struct mdc_chan *mchan = (struct mdc_chan *)dev_id; | 748 | struct mdc_chan *mchan = (struct mdc_chan *)dev_id; |
705 | struct mdc_tx_desc *mdesc; | 749 | struct mdc_tx_desc *mdesc; |
706 | u32 val, processed, done1, done2; | 750 | unsigned int i, new_events; |
707 | unsigned int i; | ||
708 | 751 | ||
709 | spin_lock(&mchan->vc.lock); | 752 | spin_lock(&mchan->vc.lock); |
710 | 753 | ||
711 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
712 | processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | ||
713 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | ||
714 | /* | ||
715 | * CMDS_DONE may have incremented between reading CMDS_PROCESSED | ||
716 | * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we | ||
717 | * didn't miss a command completion. | ||
718 | */ | ||
719 | do { | ||
720 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
721 | done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | ||
722 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | ||
723 | val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK << | ||
724 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) | | ||
725 | MDC_CMDS_PROCESSED_INT_ACTIVE); | ||
726 | val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT; | ||
727 | mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED); | ||
728 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
729 | done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | ||
730 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | ||
731 | } while (done1 != done2); | ||
732 | |||
733 | dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); | 754 | dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); |
734 | 755 | ||
756 | new_events = mdc_get_new_events(mchan); | ||
757 | |||
758 | if (!new_events) | ||
759 | goto out; | ||
760 | |||
735 | mdesc = mchan->desc; | 761 | mdesc = mchan->desc; |
736 | if (!mdesc) { | 762 | if (!mdesc) { |
737 | dev_warn(mdma2dev(mchan->mdma), | 763 | dev_warn(mdma2dev(mchan->mdma), |
@@ -740,8 +766,7 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id) | |||
740 | goto out; | 766 | goto out; |
741 | } | 767 | } |
742 | 768 | ||
743 | for (i = processed; i != done1; | 769 | for (i = 0; i < new_events; i++) { |
744 | i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) { | ||
745 | /* | 770 | /* |
746 | * The first interrupt in a transfer indicates that the | 771 | * The first interrupt in a transfer indicates that the |
747 | * command list has been loaded, not that a command has | 772 | * command list has been loaded, not that a command has |
@@ -979,7 +1004,6 @@ static int mdc_dma_remove(struct platform_device *pdev) | |||
979 | vc.chan.device_node) { | 1004 | vc.chan.device_node) { |
980 | list_del(&mchan->vc.chan.device_node); | 1005 | list_del(&mchan->vc.chan.device_node); |
981 | 1006 | ||
982 | synchronize_irq(mchan->irq); | ||
983 | devm_free_irq(&pdev->dev, mchan->irq, mchan); | 1007 | devm_free_irq(&pdev->dev, mchan->irq, mchan); |
984 | 1008 | ||
985 | tasklet_kill(&mchan->vc.task); | 1009 | tasklet_kill(&mchan->vc.task); |
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 2cb7c308d5c7..0b9b6b07db9e 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -224,7 +224,7 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, | |||
224 | return tag; | 224 | return tag; |
225 | } | 225 | } |
226 | 226 | ||
227 | static struct dca_ops ioat_dca_ops = { | 227 | static const struct dca_ops ioat_dca_ops = { |
228 | .add_requester = ioat_dca_add_requester, | 228 | .add_requester = ioat_dca_add_requester, |
229 | .remove_requester = ioat_dca_remove_requester, | 229 | .remove_requester = ioat_dca_remove_requester, |
230 | .get_tag = ioat_dca_get_tag, | 230 | .get_tag = ioat_dca_get_tag, |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 8f4e607d5817..b8f48074789f 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -235,43 +235,11 @@ ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) | |||
235 | return ioat_dma->idx[index]; | 235 | return ioat_dma->idx[index]; |
236 | } | 236 | } |
237 | 237 | ||
238 | static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) | ||
239 | { | ||
240 | u8 ver = ioat_chan->ioat_dma->version; | ||
241 | u64 status; | ||
242 | u32 status_lo; | ||
243 | |||
244 | /* We need to read the low address first as this causes the | ||
245 | * chipset to latch the upper bits for the subsequent read | ||
246 | */ | ||
247 | status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); | ||
248 | status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); | ||
249 | status <<= 32; | ||
250 | status |= status_lo; | ||
251 | |||
252 | return status; | ||
253 | } | ||
254 | |||
255 | #if BITS_PER_LONG == 64 | ||
256 | |||
257 | static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) | 238 | static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) |
258 | { | 239 | { |
259 | u8 ver = ioat_chan->ioat_dma->version; | 240 | return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); |
260 | u64 status; | ||
261 | |||
262 | /* With IOAT v3.3 the status register is 64bit. */ | ||
263 | if (ver >= IOAT_VER_3_3) | ||
264 | status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); | ||
265 | else | ||
266 | status = ioat_chansts_32(ioat_chan); | ||
267 | |||
268 | return status; | ||
269 | } | 241 | } |
270 | 242 | ||
271 | #else | ||
272 | #define ioat_chansts ioat_chansts_32 | ||
273 | #endif | ||
274 | |||
275 | static inline u64 ioat_chansts_to_addr(u64 status) | 243 | static inline u64 ioat_chansts_to_addr(u64 status) |
276 | { | 244 | { |
277 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | 245 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 909352f74c89..4994a3623aee 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -99,19 +99,9 @@ | |||
99 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ | 99 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ |
100 | #define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */ | 100 | #define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */ |
101 | 101 | ||
102 | 102 | /* IOAT1 define left for i7300_idle driver to not fail compiling */ | |
103 | #define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ | 103 | #define IOAT1_CHANSTS_OFFSET 0x04 |
104 | #define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */ | 104 | #define IOAT_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */ |
105 | #define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ | ||
106 | ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET) | ||
107 | #define IOAT1_CHANSTS_OFFSET_LOW 0x04 | ||
108 | #define IOAT2_CHANSTS_OFFSET_LOW 0x08 | ||
109 | #define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \ | ||
110 | ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW) | ||
111 | #define IOAT1_CHANSTS_OFFSET_HIGH 0x08 | ||
112 | #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C | ||
113 | #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ | ||
114 | ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) | ||
115 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) | 105 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) |
116 | #define IOAT_CHANSTS_SOFT_ERR 0x10ULL | 106 | #define IOAT_CHANSTS_SOFT_ERR 0x10ULL |
117 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL | 107 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 1c2de9a834a9..14091f878f80 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -139,46 +139,10 @@ static void mv_chan_clear_err_status(struct mv_xor_chan *chan) | |||
139 | } | 139 | } |
140 | 140 | ||
141 | static void mv_chan_set_mode(struct mv_xor_chan *chan, | 141 | static void mv_chan_set_mode(struct mv_xor_chan *chan, |
142 | enum dma_transaction_type type) | 142 | u32 op_mode) |
143 | { | 143 | { |
144 | u32 op_mode; | ||
145 | u32 config = readl_relaxed(XOR_CONFIG(chan)); | 144 | u32 config = readl_relaxed(XOR_CONFIG(chan)); |
146 | 145 | ||
147 | switch (type) { | ||
148 | case DMA_XOR: | ||
149 | op_mode = XOR_OPERATION_MODE_XOR; | ||
150 | break; | ||
151 | case DMA_MEMCPY: | ||
152 | op_mode = XOR_OPERATION_MODE_MEMCPY; | ||
153 | break; | ||
154 | default: | ||
155 | dev_err(mv_chan_to_devp(chan), | ||
156 | "error: unsupported operation %d\n", | ||
157 | type); | ||
158 | BUG(); | ||
159 | return; | ||
160 | } | ||
161 | |||
162 | config &= ~0x7; | ||
163 | config |= op_mode; | ||
164 | |||
165 | #if defined(__BIG_ENDIAN) | ||
166 | config |= XOR_DESCRIPTOR_SWAP; | ||
167 | #else | ||
168 | config &= ~XOR_DESCRIPTOR_SWAP; | ||
169 | #endif | ||
170 | |||
171 | writel_relaxed(config, XOR_CONFIG(chan)); | ||
172 | chan->current_type = type; | ||
173 | } | ||
174 | |||
175 | static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan) | ||
176 | { | ||
177 | u32 op_mode; | ||
178 | u32 config = readl_relaxed(XOR_CONFIG(chan)); | ||
179 | |||
180 | op_mode = XOR_OPERATION_MODE_IN_DESC; | ||
181 | |||
182 | config &= ~0x7; | 146 | config &= ~0x7; |
183 | config |= op_mode; | 147 | config |= op_mode; |
184 | 148 | ||
@@ -1043,9 +1007,9 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1043 | mv_chan_unmask_interrupts(mv_chan); | 1007 | mv_chan_unmask_interrupts(mv_chan); |
1044 | 1008 | ||
1045 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) | 1009 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) |
1046 | mv_chan_set_mode_to_desc(mv_chan); | 1010 | mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC); |
1047 | else | 1011 | else |
1048 | mv_chan_set_mode(mv_chan, DMA_XOR); | 1012 | mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR); |
1049 | 1013 | ||
1050 | spin_lock_init(&mv_chan->lock); | 1014 | spin_lock_init(&mv_chan->lock); |
1051 | INIT_LIST_HEAD(&mv_chan->chain); | 1015 | INIT_LIST_HEAD(&mv_chan->chain); |
@@ -1121,6 +1085,57 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | |||
1121 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | 1085 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); |
1122 | } | 1086 | } |
1123 | 1087 | ||
1088 | /* | ||
1089 | * Since this XOR driver is basically used only for RAID5, we don't | ||
1090 | * need to care about synchronizing ->suspend with DMA activity, | ||
1091 | * because the DMA engine will naturally be quiet due to the block | ||
1092 | * devices being suspended. | ||
1093 | */ | ||
1094 | static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state) | ||
1095 | { | ||
1096 | struct mv_xor_device *xordev = platform_get_drvdata(pdev); | ||
1097 | int i; | ||
1098 | |||
1099 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | ||
1100 | struct mv_xor_chan *mv_chan = xordev->channels[i]; | ||
1101 | |||
1102 | if (!mv_chan) | ||
1103 | continue; | ||
1104 | |||
1105 | mv_chan->saved_config_reg = | ||
1106 | readl_relaxed(XOR_CONFIG(mv_chan)); | ||
1107 | mv_chan->saved_int_mask_reg = | ||
1108 | readl_relaxed(XOR_INTR_MASK(mv_chan)); | ||
1109 | } | ||
1110 | |||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static int mv_xor_resume(struct platform_device *dev) | ||
1115 | { | ||
1116 | struct mv_xor_device *xordev = platform_get_drvdata(dev); | ||
1117 | const struct mbus_dram_target_info *dram; | ||
1118 | int i; | ||
1119 | |||
1120 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | ||
1121 | struct mv_xor_chan *mv_chan = xordev->channels[i]; | ||
1122 | |||
1123 | if (!mv_chan) | ||
1124 | continue; | ||
1125 | |||
1126 | writel_relaxed(mv_chan->saved_config_reg, | ||
1127 | XOR_CONFIG(mv_chan)); | ||
1128 | writel_relaxed(mv_chan->saved_int_mask_reg, | ||
1129 | XOR_INTR_MASK(mv_chan)); | ||
1130 | } | ||
1131 | |||
1132 | dram = mv_mbus_dram_info(); | ||
1133 | if (dram) | ||
1134 | mv_xor_conf_mbus_windows(xordev, dram); | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1124 | static const struct of_device_id mv_xor_dt_ids[] = { | 1139 | static const struct of_device_id mv_xor_dt_ids[] = { |
1125 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, | 1140 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, |
1126 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, | 1141 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, |
@@ -1282,6 +1297,8 @@ err_channel_add: | |||
1282 | 1297 | ||
1283 | static struct platform_driver mv_xor_driver = { | 1298 | static struct platform_driver mv_xor_driver = { |
1284 | .probe = mv_xor_probe, | 1299 | .probe = mv_xor_probe, |
1300 | .suspend = mv_xor_suspend, | ||
1301 | .resume = mv_xor_resume, | ||
1285 | .driver = { | 1302 | .driver = { |
1286 | .name = MV_XOR_NAME, | 1303 | .name = MV_XOR_NAME, |
1287 | .of_match_table = of_match_ptr(mv_xor_dt_ids), | 1304 | .of_match_table = of_match_ptr(mv_xor_dt_ids), |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index b7455b42137b..c19fe30e5ae9 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -110,7 +110,6 @@ struct mv_xor_chan { | |||
110 | void __iomem *mmr_high_base; | 110 | void __iomem *mmr_high_base; |
111 | unsigned int idx; | 111 | unsigned int idx; |
112 | int irq; | 112 | int irq; |
113 | enum dma_transaction_type current_type; | ||
114 | struct list_head chain; | 113 | struct list_head chain; |
115 | struct list_head free_slots; | 114 | struct list_head free_slots; |
116 | struct list_head allocated_slots; | 115 | struct list_head allocated_slots; |
@@ -126,6 +125,7 @@ struct mv_xor_chan { | |||
126 | char dummy_src[MV_XOR_MIN_BYTE_COUNT]; | 125 | char dummy_src[MV_XOR_MIN_BYTE_COUNT]; |
127 | char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; | 126 | char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; |
128 | dma_addr_t dummy_src_addr, dummy_dst_addr; | 127 | dma_addr_t dummy_src_addr, dummy_dst_addr; |
128 | u32 saved_config_reg, saved_int_mask_reg; | ||
129 | }; | 129 | }; |
130 | 130 | ||
131 | /** | 131 | /** |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 1dfc71c90123..9794b073d7d7 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -28,8 +28,6 @@ | |||
28 | struct omap_dmadev { | 28 | struct omap_dmadev { |
29 | struct dma_device ddev; | 29 | struct dma_device ddev; |
30 | spinlock_t lock; | 30 | spinlock_t lock; |
31 | struct tasklet_struct task; | ||
32 | struct list_head pending; | ||
33 | void __iomem *base; | 31 | void __iomem *base; |
34 | const struct omap_dma_reg *reg_map; | 32 | const struct omap_dma_reg *reg_map; |
35 | struct omap_system_dma_plat_info *plat; | 33 | struct omap_system_dma_plat_info *plat; |
@@ -42,7 +40,6 @@ struct omap_dmadev { | |||
42 | 40 | ||
43 | struct omap_chan { | 41 | struct omap_chan { |
44 | struct virt_dma_chan vc; | 42 | struct virt_dma_chan vc; |
45 | struct list_head node; | ||
46 | void __iomem *channel_base; | 43 | void __iomem *channel_base; |
47 | const struct omap_dma_reg *reg_map; | 44 | const struct omap_dma_reg *reg_map; |
48 | uint32_t ccr; | 45 | uint32_t ccr; |
@@ -454,33 +451,6 @@ static void omap_dma_callback(int ch, u16 status, void *data) | |||
454 | spin_unlock_irqrestore(&c->vc.lock, flags); | 451 | spin_unlock_irqrestore(&c->vc.lock, flags); |
455 | } | 452 | } |
456 | 453 | ||
457 | /* | ||
458 | * This callback schedules all pending channels. We could be more | ||
459 | * clever here by postponing allocation of the real DMA channels to | ||
460 | * this point, and freeing them when our virtual channel becomes idle. | ||
461 | * | ||
462 | * We would then need to deal with 'all channels in-use' | ||
463 | */ | ||
464 | static void omap_dma_sched(unsigned long data) | ||
465 | { | ||
466 | struct omap_dmadev *d = (struct omap_dmadev *)data; | ||
467 | LIST_HEAD(head); | ||
468 | |||
469 | spin_lock_irq(&d->lock); | ||
470 | list_splice_tail_init(&d->pending, &head); | ||
471 | spin_unlock_irq(&d->lock); | ||
472 | |||
473 | while (!list_empty(&head)) { | ||
474 | struct omap_chan *c = list_first_entry(&head, | ||
475 | struct omap_chan, node); | ||
476 | |||
477 | spin_lock_irq(&c->vc.lock); | ||
478 | list_del_init(&c->node); | ||
479 | omap_dma_start_desc(c); | ||
480 | spin_unlock_irq(&c->vc.lock); | ||
481 | } | ||
482 | } | ||
483 | |||
484 | static irqreturn_t omap_dma_irq(int irq, void *devid) | 454 | static irqreturn_t omap_dma_irq(int irq, void *devid) |
485 | { | 455 | { |
486 | struct omap_dmadev *od = devid; | 456 | struct omap_dmadev *od = devid; |
@@ -703,8 +673,14 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | |||
703 | struct omap_chan *c = to_omap_dma_chan(chan); | 673 | struct omap_chan *c = to_omap_dma_chan(chan); |
704 | struct virt_dma_desc *vd; | 674 | struct virt_dma_desc *vd; |
705 | enum dma_status ret; | 675 | enum dma_status ret; |
676 | uint32_t ccr; | ||
706 | unsigned long flags; | 677 | unsigned long flags; |
707 | 678 | ||
679 | ccr = omap_dma_chan_read(c, CCR); | ||
680 | /* The channel is no longer active, handle the completion right away */ | ||
681 | if (!(ccr & CCR_ENABLE)) | ||
682 | omap_dma_callback(c->dma_ch, 0, c); | ||
683 | |||
708 | ret = dma_cookie_status(chan, cookie, txstate); | 684 | ret = dma_cookie_status(chan, cookie, txstate); |
709 | if (ret == DMA_COMPLETE || !txstate) | 685 | if (ret == DMA_COMPLETE || !txstate) |
710 | return ret; | 686 | return ret; |
@@ -719,7 +695,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | |||
719 | 695 | ||
720 | if (d->dir == DMA_MEM_TO_DEV) | 696 | if (d->dir == DMA_MEM_TO_DEV) |
721 | pos = omap_dma_get_src_pos(c); | 697 | pos = omap_dma_get_src_pos(c); |
722 | else if (d->dir == DMA_DEV_TO_MEM) | 698 | else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) |
723 | pos = omap_dma_get_dst_pos(c); | 699 | pos = omap_dma_get_dst_pos(c); |
724 | else | 700 | else |
725 | pos = 0; | 701 | pos = 0; |
@@ -739,22 +715,8 @@ static void omap_dma_issue_pending(struct dma_chan *chan) | |||
739 | unsigned long flags; | 715 | unsigned long flags; |
740 | 716 | ||
741 | spin_lock_irqsave(&c->vc.lock, flags); | 717 | spin_lock_irqsave(&c->vc.lock, flags); |
742 | if (vchan_issue_pending(&c->vc) && !c->desc) { | 718 | if (vchan_issue_pending(&c->vc) && !c->desc) |
743 | /* | 719 | omap_dma_start_desc(c); |
744 | * c->cyclic is used only by audio and in this case the DMA need | ||
745 | * to be started without delay. | ||
746 | */ | ||
747 | if (!c->cyclic) { | ||
748 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | ||
749 | spin_lock(&d->lock); | ||
750 | if (list_empty(&c->node)) | ||
751 | list_add_tail(&c->node, &d->pending); | ||
752 | spin_unlock(&d->lock); | ||
753 | tasklet_schedule(&d->task); | ||
754 | } else { | ||
755 | omap_dma_start_desc(c); | ||
756 | } | ||
757 | } | ||
758 | spin_unlock_irqrestore(&c->vc.lock, flags); | 720 | spin_unlock_irqrestore(&c->vc.lock, flags); |
759 | } | 721 | } |
760 | 722 | ||
@@ -768,7 +730,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
768 | struct scatterlist *sgent; | 730 | struct scatterlist *sgent; |
769 | struct omap_desc *d; | 731 | struct omap_desc *d; |
770 | dma_addr_t dev_addr; | 732 | dma_addr_t dev_addr; |
771 | unsigned i, j = 0, es, en, frame_bytes; | 733 | unsigned i, es, en, frame_bytes; |
772 | u32 burst; | 734 | u32 burst; |
773 | 735 | ||
774 | if (dir == DMA_DEV_TO_MEM) { | 736 | if (dir == DMA_DEV_TO_MEM) { |
@@ -845,13 +807,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
845 | en = burst; | 807 | en = burst; |
846 | frame_bytes = es_bytes[es] * en; | 808 | frame_bytes = es_bytes[es] * en; |
847 | for_each_sg(sgl, sgent, sglen, i) { | 809 | for_each_sg(sgl, sgent, sglen, i) { |
848 | d->sg[j].addr = sg_dma_address(sgent); | 810 | d->sg[i].addr = sg_dma_address(sgent); |
849 | d->sg[j].en = en; | 811 | d->sg[i].en = en; |
850 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; | 812 | d->sg[i].fn = sg_dma_len(sgent) / frame_bytes; |
851 | j++; | ||
852 | } | 813 | } |
853 | 814 | ||
854 | d->sglen = j; | 815 | d->sglen = sglen; |
855 | 816 | ||
856 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); | 817 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); |
857 | } | 818 | } |
@@ -1018,17 +979,11 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config | |||
1018 | static int omap_dma_terminate_all(struct dma_chan *chan) | 979 | static int omap_dma_terminate_all(struct dma_chan *chan) |
1019 | { | 980 | { |
1020 | struct omap_chan *c = to_omap_dma_chan(chan); | 981 | struct omap_chan *c = to_omap_dma_chan(chan); |
1021 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); | ||
1022 | unsigned long flags; | 982 | unsigned long flags; |
1023 | LIST_HEAD(head); | 983 | LIST_HEAD(head); |
1024 | 984 | ||
1025 | spin_lock_irqsave(&c->vc.lock, flags); | 985 | spin_lock_irqsave(&c->vc.lock, flags); |
1026 | 986 | ||
1027 | /* Prevent this channel being scheduled */ | ||
1028 | spin_lock(&d->lock); | ||
1029 | list_del_init(&c->node); | ||
1030 | spin_unlock(&d->lock); | ||
1031 | |||
1032 | /* | 987 | /* |
1033 | * Stop DMA activity: we assume the callback will not be called | 988 | * Stop DMA activity: we assume the callback will not be called |
1034 | * after omap_dma_stop() returns (even if it does, it will see | 989 | * after omap_dma_stop() returns (even if it does, it will see |
@@ -1102,14 +1057,12 @@ static int omap_dma_chan_init(struct omap_dmadev *od) | |||
1102 | c->reg_map = od->reg_map; | 1057 | c->reg_map = od->reg_map; |
1103 | c->vc.desc_free = omap_dma_desc_free; | 1058 | c->vc.desc_free = omap_dma_desc_free; |
1104 | vchan_init(&c->vc, &od->ddev); | 1059 | vchan_init(&c->vc, &od->ddev); |
1105 | INIT_LIST_HEAD(&c->node); | ||
1106 | 1060 | ||
1107 | return 0; | 1061 | return 0; |
1108 | } | 1062 | } |
1109 | 1063 | ||
1110 | static void omap_dma_free(struct omap_dmadev *od) | 1064 | static void omap_dma_free(struct omap_dmadev *od) |
1111 | { | 1065 | { |
1112 | tasklet_kill(&od->task); | ||
1113 | while (!list_empty(&od->ddev.channels)) { | 1066 | while (!list_empty(&od->ddev.channels)) { |
1114 | struct omap_chan *c = list_first_entry(&od->ddev.channels, | 1067 | struct omap_chan *c = list_first_entry(&od->ddev.channels, |
1115 | struct omap_chan, vc.chan.device_node); | 1068 | struct omap_chan, vc.chan.device_node); |
@@ -1165,12 +1118,9 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
1165 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1118 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1166 | od->ddev.dev = &pdev->dev; | 1119 | od->ddev.dev = &pdev->dev; |
1167 | INIT_LIST_HEAD(&od->ddev.channels); | 1120 | INIT_LIST_HEAD(&od->ddev.channels); |
1168 | INIT_LIST_HEAD(&od->pending); | ||
1169 | spin_lock_init(&od->lock); | 1121 | spin_lock_init(&od->lock); |
1170 | spin_lock_init(&od->irq_lock); | 1122 | spin_lock_init(&od->irq_lock); |
1171 | 1123 | ||
1172 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); | ||
1173 | |||
1174 | od->dma_requests = OMAP_SDMA_REQUESTS; | 1124 | od->dma_requests = OMAP_SDMA_REQUESTS; |
1175 | if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, | 1125 | if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, |
1176 | "dma-requests", | 1126 | "dma-requests", |
@@ -1203,6 +1153,10 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
1203 | return rc; | 1153 | return rc; |
1204 | } | 1154 | } |
1205 | 1155 | ||
1156 | od->ddev.filter.map = od->plat->slave_map; | ||
1157 | od->ddev.filter.mapcnt = od->plat->slavecnt; | ||
1158 | od->ddev.filter.fn = omap_dma_filter_fn; | ||
1159 | |||
1206 | rc = dma_async_device_register(&od->ddev); | 1160 | rc = dma_async_device_register(&od->ddev); |
1207 | if (rc) { | 1161 | if (rc) { |
1208 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | 1162 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", |
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index fc4156afa070..f2a0310ae771 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -1414,6 +1414,7 @@ static int pxad_probe(struct platform_device *op) | |||
1414 | pdev->slave.dst_addr_widths = widths; | 1414 | pdev->slave.dst_addr_widths = widths; |
1415 | pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | 1415 | pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); |
1416 | pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | 1416 | pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
1417 | pdev->slave.descriptor_reuse = true; | ||
1417 | 1418 | ||
1418 | pdev->slave.dev = &op->dev; | 1419 | pdev->slave.dev = &op->dev; |
1419 | ret = pxad_init_dmadev(op, pdev, dma_channels); | 1420 | ret = pxad_init_dmadev(op, pdev, dma_channels); |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 9fda65af841e..f32c430eb16c 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -47,12 +47,6 @@ config RCAR_DMAC | |||
47 | This driver supports the general purpose DMA controller found in the | 47 | This driver supports the general purpose DMA controller found in the |
48 | Renesas R-Car second generation SoCs. | 48 | Renesas R-Car second generation SoCs. |
49 | 49 | ||
50 | config RCAR_HPB_DMAE | ||
51 | tristate "Renesas R-Car HPB DMAC support" | ||
52 | depends on SH_DMAE_BASE | ||
53 | help | ||
54 | Enable support for the Renesas R-Car series DMA controllers. | ||
55 | |||
56 | config RENESAS_USB_DMAC | 50 | config RENESAS_USB_DMAC |
57 | tristate "Renesas USB-DMA Controller" | 51 | tristate "Renesas USB-DMA Controller" |
58 | depends on ARCH_SHMOBILE || COMPILE_TEST | 52 | depends on ARCH_SHMOBILE || COMPILE_TEST |
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 0133e4658196..f1e2fd64f279 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -14,6 +14,5 @@ shdma-objs := $(shdma-y) | |||
14 | obj-$(CONFIG_SH_DMAE) += shdma.o | 14 | obj-$(CONFIG_SH_DMAE) += shdma.o |
15 | 15 | ||
16 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o | 16 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o |
17 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o | ||
18 | obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o | 17 | obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o |
19 | obj-$(CONFIG_SUDMAC) += sudmac.o | 18 | obj-$(CONFIG_SUDMAC) += sudmac.o |
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c deleted file mode 100644 index 749f26ecd3b3..000000000000 --- a/drivers/dma/sh/rcar-hpbdma.c +++ /dev/null | |||
@@ -1,669 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011-2013 Renesas Electronics Corporation | ||
3 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
4 | * | ||
5 | * This file is based on the drivers/dma/sh/shdma.c | ||
6 | * | ||
7 | * Renesas SuperH DMA Engine support | ||
8 | * | ||
9 | * This is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * - DMA of SuperH does not have Hardware DMA chain mode. | ||
15 | * - max DMA size is 16MB. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/dmaengine.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/platform_data/dma-rcar-hpbdma.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/pm_runtime.h> | ||
28 | #include <linux/shdma-base.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | /* DMA channel registers */ | ||
32 | #define HPB_DMAE_DSAR0 0x00 | ||
33 | #define HPB_DMAE_DDAR0 0x04 | ||
34 | #define HPB_DMAE_DTCR0 0x08 | ||
35 | #define HPB_DMAE_DSAR1 0x0C | ||
36 | #define HPB_DMAE_DDAR1 0x10 | ||
37 | #define HPB_DMAE_DTCR1 0x14 | ||
38 | #define HPB_DMAE_DSASR 0x18 | ||
39 | #define HPB_DMAE_DDASR 0x1C | ||
40 | #define HPB_DMAE_DTCSR 0x20 | ||
41 | #define HPB_DMAE_DPTR 0x24 | ||
42 | #define HPB_DMAE_DCR 0x28 | ||
43 | #define HPB_DMAE_DCMDR 0x2C | ||
44 | #define HPB_DMAE_DSTPR 0x30 | ||
45 | #define HPB_DMAE_DSTSR 0x34 | ||
46 | #define HPB_DMAE_DDBGR 0x38 | ||
47 | #define HPB_DMAE_DDBGR2 0x3C | ||
48 | #define HPB_DMAE_CHAN(n) (0x40 * (n)) | ||
49 | |||
50 | /* DMA command register (DCMDR) bits */ | ||
51 | #define HPB_DMAE_DCMDR_BDOUT BIT(7) | ||
52 | #define HPB_DMAE_DCMDR_DQSPD BIT(6) | ||
53 | #define HPB_DMAE_DCMDR_DQSPC BIT(5) | ||
54 | #define HPB_DMAE_DCMDR_DMSPD BIT(4) | ||
55 | #define HPB_DMAE_DCMDR_DMSPC BIT(3) | ||
56 | #define HPB_DMAE_DCMDR_DQEND BIT(2) | ||
57 | #define HPB_DMAE_DCMDR_DNXT BIT(1) | ||
58 | #define HPB_DMAE_DCMDR_DMEN BIT(0) | ||
59 | |||
60 | /* DMA forced stop register (DSTPR) bits */ | ||
61 | #define HPB_DMAE_DSTPR_DMSTP BIT(0) | ||
62 | |||
63 | /* DMA status register (DSTSR) bits */ | ||
64 | #define HPB_DMAE_DSTSR_DQSTS BIT(2) | ||
65 | #define HPB_DMAE_DSTSR_DMSTS BIT(0) | ||
66 | |||
67 | /* DMA common registers */ | ||
68 | #define HPB_DMAE_DTIMR 0x00 | ||
69 | #define HPB_DMAE_DINTSR0 0x0C | ||
70 | #define HPB_DMAE_DINTSR1 0x10 | ||
71 | #define HPB_DMAE_DINTCR0 0x14 | ||
72 | #define HPB_DMAE_DINTCR1 0x18 | ||
73 | #define HPB_DMAE_DINTMR0 0x1C | ||
74 | #define HPB_DMAE_DINTMR1 0x20 | ||
75 | #define HPB_DMAE_DACTSR0 0x24 | ||
76 | #define HPB_DMAE_DACTSR1 0x28 | ||
77 | #define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4) | ||
78 | #define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4) | ||
79 | #define HPB_DMAE_HPB_DMLVLR0 0x160 | ||
80 | #define HPB_DMAE_HPB_DMLVLR1 0x164 | ||
81 | #define HPB_DMAE_HPB_DMSHPT0 0x168 | ||
82 | #define HPB_DMAE_HPB_DMSHPT1 0x16C | ||
83 | |||
84 | #define HPB_DMA_SLAVE_NUMBER 256 | ||
85 | #define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */ | ||
86 | |||
87 | struct hpb_dmae_chan { | ||
88 | struct shdma_chan shdma_chan; | ||
89 | int xfer_mode; /* DMA transfer mode */ | ||
90 | #define XFER_SINGLE 1 | ||
91 | #define XFER_DOUBLE 2 | ||
92 | unsigned plane_idx; /* current DMA information set */ | ||
93 | bool first_desc; /* first/next transfer */ | ||
94 | int xmit_shift; /* log_2(bytes_per_xfer) */ | ||
95 | void __iomem *base; | ||
96 | const struct hpb_dmae_slave_config *cfg; | ||
97 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
98 | dma_addr_t slave_addr; | ||
99 | }; | ||
100 | |||
101 | struct hpb_dmae_device { | ||
102 | struct shdma_dev shdma_dev; | ||
103 | spinlock_t reg_lock; /* comm_reg operation lock */ | ||
104 | struct hpb_dmae_pdata *pdata; | ||
105 | void __iomem *chan_reg; | ||
106 | void __iomem *comm_reg; | ||
107 | void __iomem *reset_reg; | ||
108 | void __iomem *mode_reg; | ||
109 | }; | ||
110 | |||
111 | struct hpb_dmae_regs { | ||
112 | u32 sar; /* SAR / source address */ | ||
113 | u32 dar; /* DAR / destination address */ | ||
114 | u32 tcr; /* TCR / transfer count */ | ||
115 | }; | ||
116 | |||
117 | struct hpb_desc { | ||
118 | struct shdma_desc shdma_desc; | ||
119 | struct hpb_dmae_regs hw; | ||
120 | unsigned plane_idx; | ||
121 | }; | ||
122 | |||
123 | #define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan) | ||
124 | #define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc) | ||
125 | #define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \ | ||
126 | struct hpb_dmae_device, shdma_dev.dma_dev) | ||
127 | |||
128 | static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg) | ||
129 | { | ||
130 | iowrite32(data, hpb_dc->base + reg); | ||
131 | } | ||
132 | |||
133 | static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg) | ||
134 | { | ||
135 | return ioread32(hpb_dc->base + reg); | ||
136 | } | ||
137 | |||
138 | static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
139 | { | ||
140 | iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR); | ||
141 | } | ||
142 | |||
143 | static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
144 | { | ||
145 | iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch)); | ||
146 | } | ||
147 | |||
148 | static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch) | ||
149 | { | ||
150 | u32 v; | ||
151 | |||
152 | if (ch < 32) | ||
153 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch; | ||
154 | else | ||
155 | v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32); | ||
156 | return v & 0x1; | ||
157 | } | ||
158 | |||
159 | static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch) | ||
160 | { | ||
161 | if (ch < 32) | ||
162 | iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0); | ||
163 | else | ||
164 | iowrite32((0x1 << (ch - 32)), | ||
165 | hpbdev->comm_reg + HPB_DMAE_DINTCR1); | ||
166 | } | ||
167 | |||
168 | static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data) | ||
169 | { | ||
170 | iowrite32(data, hpbdev->mode_reg); | ||
171 | } | ||
172 | |||
173 | static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev) | ||
174 | { | ||
175 | return ioread32(hpbdev->mode_reg); | ||
176 | } | ||
177 | |||
178 | static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch) | ||
179 | { | ||
180 | u32 intreg; | ||
181 | |||
182 | spin_lock_irq(&hpbdev->reg_lock); | ||
183 | if (ch < 32) { | ||
184 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
185 | iowrite32(BIT(ch) | intreg, | ||
186 | hpbdev->comm_reg + HPB_DMAE_DINTMR0); | ||
187 | } else { | ||
188 | intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
189 | iowrite32(BIT(ch - 32) | intreg, | ||
190 | hpbdev->comm_reg + HPB_DMAE_DINTMR1); | ||
191 | } | ||
192 | spin_unlock_irq(&hpbdev->reg_lock); | ||
193 | } | ||
194 | |||
195 | static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data) | ||
196 | { | ||
197 | u32 rstr; | ||
198 | int timeout = 10000; /* 100 ms */ | ||
199 | |||
200 | spin_lock(&hpbdev->reg_lock); | ||
201 | rstr = ioread32(hpbdev->reset_reg); | ||
202 | rstr |= data; | ||
203 | iowrite32(rstr, hpbdev->reset_reg); | ||
204 | do { | ||
205 | rstr = ioread32(hpbdev->reset_reg); | ||
206 | if ((rstr & data) == data) | ||
207 | break; | ||
208 | udelay(10); | ||
209 | } while (timeout--); | ||
210 | |||
211 | if (timeout < 0) | ||
212 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
213 | "%s timeout\n", __func__); | ||
214 | |||
215 | rstr &= ~data; | ||
216 | iowrite32(rstr, hpbdev->reset_reg); | ||
217 | spin_unlock(&hpbdev->reg_lock); | ||
218 | } | ||
219 | |||
220 | static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev, | ||
221 | u32 mask, u32 data) | ||
222 | { | ||
223 | u32 mode; | ||
224 | |||
225 | spin_lock_irq(&hpbdev->reg_lock); | ||
226 | mode = asyncmdr_read(hpbdev); | ||
227 | mode &= ~mask; | ||
228 | mode |= data; | ||
229 | asyncmdr_write(hpbdev, mode); | ||
230 | spin_unlock_irq(&hpbdev->reg_lock); | ||
231 | } | ||
232 | |||
233 | static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev) | ||
234 | { | ||
235 | dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD); | ||
236 | } | ||
237 | |||
238 | static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev) | ||
239 | { | ||
240 | u32 ch; | ||
241 | |||
242 | for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++) | ||
243 | hsrstr_write(hpbdev, ch); | ||
244 | } | ||
245 | |||
246 | static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan) | ||
247 | { | ||
248 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
249 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
250 | int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR); | ||
251 | int i; | ||
252 | |||
253 | switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) { | ||
254 | case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT: | ||
255 | default: | ||
256 | i = XMIT_SZ_8BIT; | ||
257 | break; | ||
258 | case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT: | ||
259 | i = XMIT_SZ_16BIT; | ||
260 | break; | ||
261 | case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT: | ||
262 | i = XMIT_SZ_32BIT; | ||
263 | break; | ||
264 | } | ||
265 | return pdata->ts_shift[i]; | ||
266 | } | ||
267 | |||
268 | static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan, | ||
269 | struct hpb_dmae_regs *hw, unsigned plane) | ||
270 | { | ||
271 | ch_reg_write(hpb_chan, hw->sar, | ||
272 | plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0); | ||
273 | ch_reg_write(hpb_chan, hw->dar, | ||
274 | plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0); | ||
275 | ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift, | ||
276 | plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
277 | } | ||
278 | |||
279 | static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next) | ||
280 | { | ||
281 | ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) | | ||
282 | HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR); | ||
283 | } | ||
284 | |||
285 | static void hpb_dmae_halt(struct shdma_chan *schan) | ||
286 | { | ||
287 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
288 | |||
289 | ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); | ||
290 | ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); | ||
291 | |||
292 | chan->plane_idx = 0; | ||
293 | chan->first_desc = true; | ||
294 | } | ||
295 | |||
296 | static const struct hpb_dmae_slave_config * | ||
297 | hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id) | ||
298 | { | ||
299 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
300 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
301 | int i; | ||
302 | |||
303 | if (slave_id >= HPB_DMA_SLAVE_NUMBER) | ||
304 | return NULL; | ||
305 | |||
306 | for (i = 0; i < pdata->num_slaves; i++) | ||
307 | if (pdata->slaves[i].id == slave_id) | ||
308 | return pdata->slaves + i; | ||
309 | |||
310 | return NULL; | ||
311 | } | ||
312 | |||
313 | static void hpb_dmae_start_xfer(struct shdma_chan *schan, | ||
314 | struct shdma_desc *sdesc) | ||
315 | { | ||
316 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
317 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
318 | struct hpb_desc *desc = to_desc(sdesc); | ||
319 | |||
320 | if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET) | ||
321 | hpb_dmae_async_reset(hpbdev, chan->cfg->rstr); | ||
322 | |||
323 | desc->plane_idx = chan->plane_idx; | ||
324 | hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx); | ||
325 | hpb_dmae_start(chan, !chan->first_desc); | ||
326 | |||
327 | if (chan->xfer_mode == XFER_DOUBLE) { | ||
328 | chan->plane_idx ^= 1; | ||
329 | chan->first_desc = false; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | static bool hpb_dmae_desc_completed(struct shdma_chan *schan, | ||
334 | struct shdma_desc *sdesc) | ||
335 | { | ||
336 | /* | ||
337 | * This is correct since we always have at most single | ||
338 | * outstanding DMA transfer per channel, and by the time | ||
339 | * we get completion interrupt the transfer is completed. | ||
340 | * This will change if we ever use alternating DMA | ||
341 | * information sets and submit two descriptors at once. | ||
342 | */ | ||
343 | return true; | ||
344 | } | ||
345 | |||
346 | static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq) | ||
347 | { | ||
348 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
349 | struct hpb_dmae_device *hpbdev = to_dev(chan); | ||
350 | int ch = chan->cfg->dma_ch; | ||
351 | |||
352 | /* Check Complete DMA Transfer */ | ||
353 | if (dintsr_read(hpbdev, ch)) { | ||
354 | /* Clear Interrupt status */ | ||
355 | dintcr_write(hpbdev, ch); | ||
356 | return true; | ||
357 | } | ||
358 | return false; | ||
359 | } | ||
360 | |||
361 | static int hpb_dmae_desc_setup(struct shdma_chan *schan, | ||
362 | struct shdma_desc *sdesc, | ||
363 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
364 | { | ||
365 | struct hpb_desc *desc = to_desc(sdesc); | ||
366 | |||
367 | if (*len > (size_t)HPB_DMA_TCR_MAX) | ||
368 | *len = (size_t)HPB_DMA_TCR_MAX; | ||
369 | |||
370 | desc->hw.sar = src; | ||
371 | desc->hw.dar = dst; | ||
372 | desc->hw.tcr = *len; | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | static size_t hpb_dmae_get_partial(struct shdma_chan *schan, | ||
378 | struct shdma_desc *sdesc) | ||
379 | { | ||
380 | struct hpb_desc *desc = to_desc(sdesc); | ||
381 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
382 | u32 tcr = ch_reg_read(chan, desc->plane_idx ? | ||
383 | HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0); | ||
384 | |||
385 | return (desc->hw.tcr - tcr) << chan->xmit_shift; | ||
386 | } | ||
387 | |||
388 | static bool hpb_dmae_channel_busy(struct shdma_chan *schan) | ||
389 | { | ||
390 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
391 | u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); | ||
392 | |||
393 | if (chan->xfer_mode == XFER_DOUBLE) | ||
394 | return dstsr & HPB_DMAE_DSTSR_DQSTS; | ||
395 | else | ||
396 | return dstsr & HPB_DMAE_DSTSR_DMSTS; | ||
397 | } | ||
398 | |||
399 | static int | ||
400 | hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan, | ||
401 | const struct hpb_dmae_slave_config *cfg) | ||
402 | { | ||
403 | struct hpb_dmae_device *hpbdev = to_dev(hpb_chan); | ||
404 | struct hpb_dmae_pdata *pdata = hpbdev->pdata; | ||
405 | const struct hpb_dmae_channel *channel = pdata->channels; | ||
406 | int slave_id = cfg->id; | ||
407 | int i, err; | ||
408 | |||
409 | for (i = 0; i < pdata->num_channels; i++, channel++) { | ||
410 | if (channel->s_id == slave_id) { | ||
411 | struct device *dev = hpb_chan->shdma_chan.dev; | ||
412 | |||
413 | hpb_chan->base = hpbdev->chan_reg + | ||
414 | HPB_DMAE_CHAN(cfg->dma_ch); | ||
415 | |||
416 | dev_dbg(dev, "Detected Slave device\n"); | ||
417 | dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id); | ||
418 | dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch); | ||
419 | dev_dbg(dev, " -- channel->ch_irq: %d\n", | ||
420 | channel->ch_irq); | ||
421 | break; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq, | ||
426 | IRQF_SHARED, hpb_chan->dev_id); | ||
427 | if (err) { | ||
428 | dev_err(hpb_chan->shdma_chan.dev, | ||
429 | "DMA channel request_irq %d failed with error %d\n", | ||
430 | channel->ch_irq, err); | ||
431 | return err; | ||
432 | } | ||
433 | |||
434 | hpb_chan->plane_idx = 0; | ||
435 | hpb_chan->first_desc = true; | ||
436 | |||
437 | if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) { | ||
438 | hpb_chan->xfer_mode = XFER_SINGLE; | ||
439 | } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == | ||
440 | (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) { | ||
441 | hpb_chan->xfer_mode = XFER_DOUBLE; | ||
442 | } else { | ||
443 | dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); | ||
444 | return -EINVAL; | ||
445 | } | ||
446 | |||
447 | if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE) | ||
448 | hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr); | ||
449 | ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR); | ||
450 | ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR); | ||
451 | hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan); | ||
452 | hpb_dmae_enable_int(hpbdev, cfg->dma_ch); | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, | ||
458 | dma_addr_t slave_addr, bool try) | ||
459 | { | ||
460 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
461 | const struct hpb_dmae_slave_config *sc = | ||
462 | hpb_dmae_find_slave(chan, slave_id); | ||
463 | |||
464 | if (!sc) | ||
465 | return -ENODEV; | ||
466 | if (try) | ||
467 | return 0; | ||
468 | chan->cfg = sc; | ||
469 | chan->slave_addr = slave_addr ? : sc->addr; | ||
470 | return hpb_dmae_alloc_chan_resources(chan, sc); | ||
471 | } | ||
472 | |||
473 | static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) | ||
474 | { | ||
475 | } | ||
476 | |||
477 | static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan) | ||
478 | { | ||
479 | struct hpb_dmae_chan *chan = to_chan(schan); | ||
480 | |||
481 | return chan->slave_addr; | ||
482 | } | ||
483 | |||
484 | static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) | ||
485 | { | ||
486 | return &((struct hpb_desc *)buf)[i].shdma_desc; | ||
487 | } | ||
488 | |||
489 | static const struct shdma_ops hpb_dmae_ops = { | ||
490 | .desc_completed = hpb_dmae_desc_completed, | ||
491 | .halt_channel = hpb_dmae_halt, | ||
492 | .channel_busy = hpb_dmae_channel_busy, | ||
493 | .slave_addr = hpb_dmae_slave_addr, | ||
494 | .desc_setup = hpb_dmae_desc_setup, | ||
495 | .set_slave = hpb_dmae_set_slave, | ||
496 | .setup_xfer = hpb_dmae_setup_xfer, | ||
497 | .start_xfer = hpb_dmae_start_xfer, | ||
498 | .embedded_desc = hpb_dmae_embedded_desc, | ||
499 | .chan_irq = hpb_dmae_chan_irq, | ||
500 | .get_partial = hpb_dmae_get_partial, | ||
501 | }; | ||
502 | |||
503 | static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) | ||
504 | { | ||
505 | struct shdma_dev *sdev = &hpbdev->shdma_dev; | ||
506 | struct platform_device *pdev = | ||
507 | to_platform_device(hpbdev->shdma_dev.dma_dev.dev); | ||
508 | struct hpb_dmae_chan *new_hpb_chan; | ||
509 | struct shdma_chan *schan; | ||
510 | |||
511 | /* Alloc channel */ | ||
512 | new_hpb_chan = devm_kzalloc(&pdev->dev, | ||
513 | sizeof(struct hpb_dmae_chan), GFP_KERNEL); | ||
514 | if (!new_hpb_chan) { | ||
515 | dev_err(hpbdev->shdma_dev.dma_dev.dev, | ||
516 | "No free memory for allocating DMA channels!\n"); | ||
517 | return -ENOMEM; | ||
518 | } | ||
519 | |||
520 | schan = &new_hpb_chan->shdma_chan; | ||
521 | schan->max_xfer_len = HPB_DMA_TCR_MAX; | ||
522 | |||
523 | shdma_chan_probe(sdev, schan, id); | ||
524 | |||
525 | if (pdev->id >= 0) | ||
526 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
527 | "hpb-dmae%d.%d", pdev->id, id); | ||
528 | else | ||
529 | snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id), | ||
530 | "hpb-dma.%d", id); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static int hpb_dmae_probe(struct platform_device *pdev) | ||
536 | { | ||
537 | const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | | ||
538 | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
539 | struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; | ||
540 | struct hpb_dmae_device *hpbdev; | ||
541 | struct dma_device *dma_dev; | ||
542 | struct resource *chan, *comm, *rest, *mode, *irq_res; | ||
543 | int err, i; | ||
544 | |||
545 | /* Get platform data */ | ||
546 | if (!pdata || !pdata->num_channels) | ||
547 | return -ENODEV; | ||
548 | |||
549 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
550 | comm = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
551 | rest = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
552 | mode = platform_get_resource(pdev, IORESOURCE_MEM, 3); | ||
553 | |||
554 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
555 | if (!irq_res) | ||
556 | return -ENODEV; | ||
557 | |||
558 | hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device), | ||
559 | GFP_KERNEL); | ||
560 | if (!hpbdev) { | ||
561 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
562 | return -ENOMEM; | ||
563 | } | ||
564 | |||
565 | hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); | ||
566 | if (IS_ERR(hpbdev->chan_reg)) | ||
567 | return PTR_ERR(hpbdev->chan_reg); | ||
568 | |||
569 | hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm); | ||
570 | if (IS_ERR(hpbdev->comm_reg)) | ||
571 | return PTR_ERR(hpbdev->comm_reg); | ||
572 | |||
573 | hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest); | ||
574 | if (IS_ERR(hpbdev->reset_reg)) | ||
575 | return PTR_ERR(hpbdev->reset_reg); | ||
576 | |||
577 | hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode); | ||
578 | if (IS_ERR(hpbdev->mode_reg)) | ||
579 | return PTR_ERR(hpbdev->mode_reg); | ||
580 | |||
581 | dma_dev = &hpbdev->shdma_dev.dma_dev; | ||
582 | |||
583 | spin_lock_init(&hpbdev->reg_lock); | ||
584 | |||
585 | /* Platform data */ | ||
586 | hpbdev->pdata = pdata; | ||
587 | |||
588 | pm_runtime_enable(&pdev->dev); | ||
589 | err = pm_runtime_get_sync(&pdev->dev); | ||
590 | if (err < 0) | ||
591 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
592 | |||
593 | /* Reset DMA controller */ | ||
594 | hpb_dmae_reset(hpbdev); | ||
595 | |||
596 | pm_runtime_put(&pdev->dev); | ||
597 | |||
598 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
599 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
600 | dma_dev->src_addr_widths = widths; | ||
601 | dma_dev->dst_addr_widths = widths; | ||
602 | dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
603 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
604 | |||
605 | hpbdev->shdma_dev.ops = &hpb_dmae_ops; | ||
606 | hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); | ||
607 | err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels); | ||
608 | if (err < 0) | ||
609 | goto error; | ||
610 | |||
611 | /* Create DMA channels */ | ||
612 | for (i = 0; i < pdata->num_channels; i++) | ||
613 | hpb_dmae_chan_probe(hpbdev, i); | ||
614 | |||
615 | platform_set_drvdata(pdev, hpbdev); | ||
616 | err = dma_async_device_register(dma_dev); | ||
617 | if (!err) | ||
618 | return 0; | ||
619 | |||
620 | shdma_cleanup(&hpbdev->shdma_dev); | ||
621 | error: | ||
622 | pm_runtime_disable(&pdev->dev); | ||
623 | return err; | ||
624 | } | ||
625 | |||
626 | static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) | ||
627 | { | ||
628 | struct shdma_chan *schan; | ||
629 | int i; | ||
630 | |||
631 | shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { | ||
632 | BUG_ON(!schan); | ||
633 | |||
634 | shdma_chan_remove(schan); | ||
635 | } | ||
636 | } | ||
637 | |||
638 | static int hpb_dmae_remove(struct platform_device *pdev) | ||
639 | { | ||
640 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
641 | |||
642 | dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev); | ||
643 | |||
644 | pm_runtime_disable(&pdev->dev); | ||
645 | |||
646 | hpb_dmae_chan_remove(hpbdev); | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static void hpb_dmae_shutdown(struct platform_device *pdev) | ||
652 | { | ||
653 | struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev); | ||
654 | hpb_dmae_ctl_stop(hpbdev); | ||
655 | } | ||
656 | |||
657 | static struct platform_driver hpb_dmae_driver = { | ||
658 | .probe = hpb_dmae_probe, | ||
659 | .remove = hpb_dmae_remove, | ||
660 | .shutdown = hpb_dmae_shutdown, | ||
661 | .driver = { | ||
662 | .name = "hpb-dma-engine", | ||
663 | }, | ||
664 | }; | ||
665 | module_platform_driver(hpb_dmae_driver); | ||
666 | |||
667 | MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>"); | ||
668 | MODULE_DESCRIPTION("Renesas HPB DMA Engine driver"); | ||
669 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index f1bcc2a163b3..749f1bd5d65d 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
@@ -448,7 +448,7 @@ usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
448 | static int usb_dmac_chan_terminate_all(struct dma_chan *chan) | 448 | static int usb_dmac_chan_terminate_all(struct dma_chan *chan) |
449 | { | 449 | { |
450 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | 450 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); |
451 | struct usb_dmac_desc *desc; | 451 | struct usb_dmac_desc *desc, *_desc; |
452 | unsigned long flags; | 452 | unsigned long flags; |
453 | LIST_HEAD(head); | 453 | LIST_HEAD(head); |
454 | LIST_HEAD(list); | 454 | LIST_HEAD(list); |
@@ -459,7 +459,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan) | |||
459 | if (uchan->desc) | 459 | if (uchan->desc) |
460 | uchan->desc = NULL; | 460 | uchan->desc = NULL; |
461 | list_splice_init(&uchan->desc_got, &list); | 461 | list_splice_init(&uchan->desc_got, &list); |
462 | list_for_each_entry(desc, &list, node) | 462 | list_for_each_entry_safe(desc, _desc, &list, node) |
463 | list_move_tail(&desc->node, &uchan->desc_freed); | 463 | list_move_tail(&desc->node, &uchan->desc_freed); |
464 | spin_unlock_irqrestore(&uchan->vc.lock, flags); | 464 | spin_unlock_irqrestore(&uchan->vc.lock, flags); |
465 | vchan_dma_desc_free_list(&uchan->vc, &head); | 465 | vchan_dma_desc_free_list(&uchan->vc, &head); |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index dd3e7ba273ad..6fb8307468ab 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -3543,8 +3543,8 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3543 | struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); | 3543 | struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); |
3544 | struct device_node *np = pdev->dev.of_node; | 3544 | struct device_node *np = pdev->dev.of_node; |
3545 | int ret = -ENOENT; | 3545 | int ret = -ENOENT; |
3546 | struct d40_base *base = NULL; | 3546 | struct d40_base *base; |
3547 | struct resource *res = NULL; | 3547 | struct resource *res; |
3548 | int num_reserved_chans; | 3548 | int num_reserved_chans; |
3549 | u32 val; | 3549 | u32 val; |
3550 | 3550 | ||
@@ -3552,17 +3552,17 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3552 | if (np) { | 3552 | if (np) { |
3553 | if (d40_of_probe(pdev, np)) { | 3553 | if (d40_of_probe(pdev, np)) { |
3554 | ret = -ENOMEM; | 3554 | ret = -ENOMEM; |
3555 | goto failure; | 3555 | goto report_failure; |
3556 | } | 3556 | } |
3557 | } else { | 3557 | } else { |
3558 | d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); | 3558 | d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); |
3559 | goto failure; | 3559 | goto report_failure; |
3560 | } | 3560 | } |
3561 | } | 3561 | } |
3562 | 3562 | ||
3563 | base = d40_hw_detect_init(pdev); | 3563 | base = d40_hw_detect_init(pdev); |
3564 | if (!base) | 3564 | if (!base) |
3565 | goto failure; | 3565 | goto report_failure; |
3566 | 3566 | ||
3567 | num_reserved_chans = d40_phy_res_init(base); | 3567 | num_reserved_chans = d40_phy_res_init(base); |
3568 | 3568 | ||
@@ -3693,51 +3693,48 @@ static int __init d40_probe(struct platform_device *pdev) | |||
3693 | return 0; | 3693 | return 0; |
3694 | 3694 | ||
3695 | failure: | 3695 | failure: |
3696 | if (base) { | 3696 | kmem_cache_destroy(base->desc_slab); |
3697 | if (base->desc_slab) | 3697 | if (base->virtbase) |
3698 | kmem_cache_destroy(base->desc_slab); | 3698 | iounmap(base->virtbase); |
3699 | if (base->virtbase) | ||
3700 | iounmap(base->virtbase); | ||
3701 | |||
3702 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { | ||
3703 | iounmap(base->lcla_pool.base); | ||
3704 | base->lcla_pool.base = NULL; | ||
3705 | } | ||
3706 | 3699 | ||
3707 | if (base->lcla_pool.dma_addr) | 3700 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { |
3708 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | 3701 | iounmap(base->lcla_pool.base); |
3709 | SZ_1K * base->num_phy_chans, | 3702 | base->lcla_pool.base = NULL; |
3710 | DMA_TO_DEVICE); | 3703 | } |
3711 | |||
3712 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) | ||
3713 | free_pages((unsigned long)base->lcla_pool.base, | ||
3714 | base->lcla_pool.pages); | ||
3715 | |||
3716 | kfree(base->lcla_pool.base_unaligned); | ||
3717 | |||
3718 | if (base->phy_lcpa) | ||
3719 | release_mem_region(base->phy_lcpa, | ||
3720 | base->lcpa_size); | ||
3721 | if (base->phy_start) | ||
3722 | release_mem_region(base->phy_start, | ||
3723 | base->phy_size); | ||
3724 | if (base->clk) { | ||
3725 | clk_disable_unprepare(base->clk); | ||
3726 | clk_put(base->clk); | ||
3727 | } | ||
3728 | 3704 | ||
3729 | if (base->lcpa_regulator) { | 3705 | if (base->lcla_pool.dma_addr) |
3730 | regulator_disable(base->lcpa_regulator); | 3706 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
3731 | regulator_put(base->lcpa_regulator); | 3707 | SZ_1K * base->num_phy_chans, |
3732 | } | 3708 | DMA_TO_DEVICE); |
3733 | 3709 | ||
3734 | kfree(base->lcla_pool.alloc_map); | 3710 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
3735 | kfree(base->lookup_log_chans); | 3711 | free_pages((unsigned long)base->lcla_pool.base, |
3736 | kfree(base->lookup_phy_chans); | 3712 | base->lcla_pool.pages); |
3737 | kfree(base->phy_res); | 3713 | |
3738 | kfree(base); | 3714 | kfree(base->lcla_pool.base_unaligned); |
3715 | |||
3716 | if (base->phy_lcpa) | ||
3717 | release_mem_region(base->phy_lcpa, | ||
3718 | base->lcpa_size); | ||
3719 | if (base->phy_start) | ||
3720 | release_mem_region(base->phy_start, | ||
3721 | base->phy_size); | ||
3722 | if (base->clk) { | ||
3723 | clk_disable_unprepare(base->clk); | ||
3724 | clk_put(base->clk); | ||
3725 | } | ||
3726 | |||
3727 | if (base->lcpa_regulator) { | ||
3728 | regulator_disable(base->lcpa_regulator); | ||
3729 | regulator_put(base->lcpa_regulator); | ||
3739 | } | 3730 | } |
3740 | 3731 | ||
3732 | kfree(base->lcla_pool.alloc_map); | ||
3733 | kfree(base->lookup_log_chans); | ||
3734 | kfree(base->lookup_phy_chans); | ||
3735 | kfree(base->phy_res); | ||
3736 | kfree(base); | ||
3737 | report_failure: | ||
3741 | d40_err(&pdev->dev, "probe failed\n"); | 3738 | d40_err(&pdev->dev, "probe failed\n"); |
3742 | return ret; | 3739 | return ret; |
3743 | } | 3740 | } |
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c new file mode 100644 index 000000000000..047476a1383d --- /dev/null +++ b/drivers/dma/stm32-dma.c | |||
@@ -0,0 +1,1141 @@ | |||
1 | /* | ||
2 | * Driver for STM32 DMA controller | ||
3 | * | ||
4 | * Inspired by dma-jz4740.c and tegra20-apb-dma.c | ||
5 | * | ||
6 | * Copyright (C) M'boumba Cedric Madianga 2015 | ||
7 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | ||
8 | * | ||
9 | * License terms: GNU General Public License (GPL), version 2 | ||
10 | */ | ||
11 | |||
12 | #include <linux/clk.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/jiffies.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/of_device.h> | ||
23 | #include <linux/of_dma.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/reset.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include "virt-dma.h" | ||
30 | |||
31 | #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */ | ||
32 | #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */ | ||
33 | #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ | ||
34 | #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ | ||
35 | #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ | ||
36 | #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ | ||
37 | #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ | ||
38 | #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ | ||
39 | |||
40 | /* DMA Stream x Configuration Register */ | ||
41 | #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ | ||
42 | #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25) | ||
43 | #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23) | ||
44 | #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23) | ||
45 | #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21) | ||
46 | #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21) | ||
47 | #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16) | ||
48 | #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16) | ||
49 | #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13) | ||
50 | #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13) | ||
51 | #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11) | ||
52 | #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11) | ||
53 | #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11) | ||
54 | #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6) | ||
55 | #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6) | ||
56 | #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */ | ||
57 | #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */ | ||
58 | #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */ | ||
59 | #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */ | ||
60 | #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ | ||
61 | #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ | ||
62 | #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ | ||
63 | #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Cplete Int Enable*/ | ||
64 | #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ | ||
65 | #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ | ||
66 | #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ | ||
67 | #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \ | ||
68 | | STM32_DMA_SCR_MINC \ | ||
69 | | STM32_DMA_SCR_PINCOS \ | ||
70 | | STM32_DMA_SCR_PL_MASK) | ||
71 | #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \ | ||
72 | | STM32_DMA_SCR_TEIE \ | ||
73 | | STM32_DMA_SCR_DMEIE) | ||
74 | |||
75 | /* DMA Stream x number of data register */ | ||
76 | #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x)) | ||
77 | |||
78 | /* DMA stream peripheral address register */ | ||
79 | #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x)) | ||
80 | |||
81 | /* DMA stream x memory 0 address register */ | ||
82 | #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x)) | ||
83 | |||
84 | /* DMA stream x memory 1 address register */ | ||
85 | #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x)) | ||
86 | |||
87 | /* DMA stream x FIFO control register */ | ||
88 | #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x)) | ||
89 | #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0) | ||
90 | #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK) | ||
91 | #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */ | ||
92 | #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */ | ||
93 | #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \ | ||
94 | | STM32_DMA_SFCR_DMDIS) | ||
95 | |||
96 | /* DMA direction */ | ||
97 | #define STM32_DMA_DEV_TO_MEM 0x00 | ||
98 | #define STM32_DMA_MEM_TO_DEV 0x01 | ||
99 | #define STM32_DMA_MEM_TO_MEM 0x02 | ||
100 | |||
101 | /* DMA priority level */ | ||
102 | #define STM32_DMA_PRIORITY_LOW 0x00 | ||
103 | #define STM32_DMA_PRIORITY_MEDIUM 0x01 | ||
104 | #define STM32_DMA_PRIORITY_HIGH 0x02 | ||
105 | #define STM32_DMA_PRIORITY_VERY_HIGH 0x03 | ||
106 | |||
107 | /* DMA FIFO threshold selection */ | ||
108 | #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00 | ||
109 | #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 | ||
110 | #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 | ||
111 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 | ||
112 | |||
113 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff | ||
114 | #define STM32_DMA_MAX_CHANNELS 0x08 | ||
115 | #define STM32_DMA_MAX_REQUEST_ID 0x08 | ||
116 | #define STM32_DMA_MAX_DATA_PARAM 0x03 | ||
117 | |||
118 | enum stm32_dma_width { | ||
119 | STM32_DMA_BYTE, | ||
120 | STM32_DMA_HALF_WORD, | ||
121 | STM32_DMA_WORD, | ||
122 | }; | ||
123 | |||
124 | enum stm32_dma_burst_size { | ||
125 | STM32_DMA_BURST_SINGLE, | ||
126 | STM32_DMA_BURST_INCR4, | ||
127 | STM32_DMA_BURST_INCR8, | ||
128 | STM32_DMA_BURST_INCR16, | ||
129 | }; | ||
130 | |||
131 | struct stm32_dma_cfg { | ||
132 | u32 channel_id; | ||
133 | u32 request_line; | ||
134 | u32 stream_config; | ||
135 | u32 threshold; | ||
136 | }; | ||
137 | |||
138 | struct stm32_dma_chan_reg { | ||
139 | u32 dma_lisr; | ||
140 | u32 dma_hisr; | ||
141 | u32 dma_lifcr; | ||
142 | u32 dma_hifcr; | ||
143 | u32 dma_scr; | ||
144 | u32 dma_sndtr; | ||
145 | u32 dma_spar; | ||
146 | u32 dma_sm0ar; | ||
147 | u32 dma_sm1ar; | ||
148 | u32 dma_sfcr; | ||
149 | }; | ||
150 | |||
151 | struct stm32_dma_sg_req { | ||
152 | u32 len; | ||
153 | struct stm32_dma_chan_reg chan_reg; | ||
154 | }; | ||
155 | |||
156 | struct stm32_dma_desc { | ||
157 | struct virt_dma_desc vdesc; | ||
158 | bool cyclic; | ||
159 | u32 num_sgs; | ||
160 | struct stm32_dma_sg_req sg_req[]; | ||
161 | }; | ||
162 | |||
163 | struct stm32_dma_chan { | ||
164 | struct virt_dma_chan vchan; | ||
165 | bool config_init; | ||
166 | bool busy; | ||
167 | u32 id; | ||
168 | u32 irq; | ||
169 | struct stm32_dma_desc *desc; | ||
170 | u32 next_sg; | ||
171 | struct dma_slave_config dma_sconfig; | ||
172 | struct stm32_dma_chan_reg chan_reg; | ||
173 | }; | ||
174 | |||
175 | struct stm32_dma_device { | ||
176 | struct dma_device ddev; | ||
177 | void __iomem *base; | ||
178 | struct clk *clk; | ||
179 | struct reset_control *rst; | ||
180 | bool mem2mem; | ||
181 | struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; | ||
182 | }; | ||
183 | |||
184 | static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan) | ||
185 | { | ||
186 | return container_of(chan->vchan.chan.device, struct stm32_dma_device, | ||
187 | ddev); | ||
188 | } | ||
189 | |||
190 | static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) | ||
191 | { | ||
192 | return container_of(c, struct stm32_dma_chan, vchan.chan); | ||
193 | } | ||
194 | |||
195 | static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc) | ||
196 | { | ||
197 | return container_of(vdesc, struct stm32_dma_desc, vdesc); | ||
198 | } | ||
199 | |||
200 | static struct device *chan2dev(struct stm32_dma_chan *chan) | ||
201 | { | ||
202 | return &chan->vchan.chan.dev->device; | ||
203 | } | ||
204 | |||
205 | static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg) | ||
206 | { | ||
207 | return readl_relaxed(dmadev->base + reg); | ||
208 | } | ||
209 | |||
210 | static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) | ||
211 | { | ||
212 | writel_relaxed(val, dmadev->base + reg); | ||
213 | } | ||
214 | |||
215 | static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs) | ||
216 | { | ||
217 | return kzalloc(sizeof(struct stm32_dma_desc) + | ||
218 | sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT); | ||
219 | } | ||
220 | |||
221 | static int stm32_dma_get_width(struct stm32_dma_chan *chan, | ||
222 | enum dma_slave_buswidth width) | ||
223 | { | ||
224 | switch (width) { | ||
225 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
226 | return STM32_DMA_BYTE; | ||
227 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
228 | return STM32_DMA_HALF_WORD; | ||
229 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
230 | return STM32_DMA_WORD; | ||
231 | default: | ||
232 | dev_err(chan2dev(chan), "Dma bus width not supported\n"); | ||
233 | return -EINVAL; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) | ||
238 | { | ||
239 | switch (maxburst) { | ||
240 | case 0: | ||
241 | case 1: | ||
242 | return STM32_DMA_BURST_SINGLE; | ||
243 | case 4: | ||
244 | return STM32_DMA_BURST_INCR4; | ||
245 | case 8: | ||
246 | return STM32_DMA_BURST_INCR8; | ||
247 | case 16: | ||
248 | return STM32_DMA_BURST_INCR16; | ||
249 | default: | ||
250 | dev_err(chan2dev(chan), "Dma burst size not supported\n"); | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | } | ||
254 | |||
255 | static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, | ||
256 | u32 src_maxburst, u32 dst_maxburst) | ||
257 | { | ||
258 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; | ||
259 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; | ||
260 | |||
261 | if ((!src_maxburst) && (!dst_maxburst)) { | ||
262 | /* Using direct mode */ | ||
263 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; | ||
264 | } else { | ||
265 | /* Using FIFO mode */ | ||
266 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | static int stm32_dma_slave_config(struct dma_chan *c, | ||
271 | struct dma_slave_config *config) | ||
272 | { | ||
273 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
274 | |||
275 | memcpy(&chan->dma_sconfig, config, sizeof(*config)); | ||
276 | |||
277 | chan->config_init = true; | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) | ||
283 | { | ||
284 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
285 | u32 flags, dma_isr; | ||
286 | |||
287 | /* | ||
288 | * Read "flags" from DMA_xISR register corresponding to the selected | ||
289 | * DMA channel at the correct bit offset inside that register. | ||
290 | * | ||
291 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | ||
292 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | ||
293 | */ | ||
294 | |||
295 | if (chan->id & 4) | ||
296 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR); | ||
297 | else | ||
298 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR); | ||
299 | |||
300 | flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); | ||
301 | |||
302 | return flags; | ||
303 | } | ||
304 | |||
305 | static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) | ||
306 | { | ||
307 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
308 | u32 dma_ifcr; | ||
309 | |||
310 | /* | ||
311 | * Write "flags" to the DMA_xIFCR register corresponding to the selected | ||
312 | * DMA channel at the correct bit offset inside that register. | ||
313 | * | ||
314 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | ||
315 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | ||
316 | */ | ||
317 | dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); | ||
318 | |||
319 | if (chan->id & 4) | ||
320 | stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr); | ||
321 | else | ||
322 | stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr); | ||
323 | } | ||
324 | |||
325 | static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) | ||
326 | { | ||
327 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
328 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
329 | u32 dma_scr, id; | ||
330 | |||
331 | id = chan->id; | ||
332 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | ||
333 | |||
334 | if (dma_scr & STM32_DMA_SCR_EN) { | ||
335 | dma_scr &= ~STM32_DMA_SCR_EN; | ||
336 | stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr); | ||
337 | |||
338 | do { | ||
339 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | ||
340 | dma_scr &= STM32_DMA_SCR_EN; | ||
341 | if (!dma_scr) | ||
342 | break; | ||
343 | |||
344 | if (time_after_eq(jiffies, timeout)) { | ||
345 | dev_err(chan2dev(chan), "%s: timeout!\n", | ||
346 | __func__); | ||
347 | return -EBUSY; | ||
348 | } | ||
349 | cond_resched(); | ||
350 | } while (1); | ||
351 | } | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static void stm32_dma_stop(struct stm32_dma_chan *chan) | ||
357 | { | ||
358 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
359 | u32 dma_scr, dma_sfcr, status; | ||
360 | int ret; | ||
361 | |||
362 | /* Disable interrupts */ | ||
363 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | ||
364 | dma_scr &= ~STM32_DMA_SCR_IRQ_MASK; | ||
365 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); | ||
366 | dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | ||
367 | dma_sfcr &= ~STM32_DMA_SFCR_FEIE; | ||
368 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr); | ||
369 | |||
370 | /* Disable DMA */ | ||
371 | ret = stm32_dma_disable_chan(chan); | ||
372 | if (ret < 0) | ||
373 | return; | ||
374 | |||
375 | /* Clear interrupt status if it is there */ | ||
376 | status = stm32_dma_irq_status(chan); | ||
377 | if (status) { | ||
378 | dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", | ||
379 | __func__, status); | ||
380 | stm32_dma_irq_clear(chan, status); | ||
381 | } | ||
382 | |||
383 | chan->busy = false; | ||
384 | } | ||
385 | |||
386 | static int stm32_dma_terminate_all(struct dma_chan *c) | ||
387 | { | ||
388 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
389 | unsigned long flags; | ||
390 | LIST_HEAD(head); | ||
391 | |||
392 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
393 | |||
394 | if (chan->busy) { | ||
395 | stm32_dma_stop(chan); | ||
396 | chan->desc = NULL; | ||
397 | } | ||
398 | |||
399 | vchan_get_all_descriptors(&chan->vchan, &head); | ||
400 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
401 | vchan_dma_desc_free_list(&chan->vchan, &head); | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) | ||
407 | { | ||
408 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
409 | u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | ||
410 | u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | ||
411 | u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)); | ||
412 | u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)); | ||
413 | u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)); | ||
414 | u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | ||
415 | |||
416 | dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr); | ||
417 | dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr); | ||
418 | dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar); | ||
419 | dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar); | ||
420 | dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar); | ||
421 | dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); | ||
422 | } | ||
423 | |||
424 | static int stm32_dma_start_transfer(struct stm32_dma_chan *chan) | ||
425 | { | ||
426 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
427 | struct virt_dma_desc *vdesc; | ||
428 | struct stm32_dma_sg_req *sg_req; | ||
429 | struct stm32_dma_chan_reg *reg; | ||
430 | u32 status; | ||
431 | int ret; | ||
432 | |||
433 | ret = stm32_dma_disable_chan(chan); | ||
434 | if (ret < 0) | ||
435 | return ret; | ||
436 | |||
437 | if (!chan->desc) { | ||
438 | vdesc = vchan_next_desc(&chan->vchan); | ||
439 | if (!vdesc) | ||
440 | return -EPERM; | ||
441 | |||
442 | chan->desc = to_stm32_dma_desc(vdesc); | ||
443 | chan->next_sg = 0; | ||
444 | } | ||
445 | |||
446 | if (chan->next_sg == chan->desc->num_sgs) | ||
447 | chan->next_sg = 0; | ||
448 | |||
449 | sg_req = &chan->desc->sg_req[chan->next_sg]; | ||
450 | reg = &sg_req->chan_reg; | ||
451 | |||
452 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); | ||
453 | stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); | ||
454 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); | ||
455 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr); | ||
456 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); | ||
457 | stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); | ||
458 | |||
459 | chan->next_sg++; | ||
460 | |||
461 | /* Clear interrupt status if it is there */ | ||
462 | status = stm32_dma_irq_status(chan); | ||
463 | if (status) | ||
464 | stm32_dma_irq_clear(chan, status); | ||
465 | |||
466 | stm32_dma_dump_reg(chan); | ||
467 | |||
468 | /* Start DMA */ | ||
469 | reg->dma_scr |= STM32_DMA_SCR_EN; | ||
470 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); | ||
471 | |||
472 | chan->busy = true; | ||
473 | |||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) | ||
478 | { | ||
479 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
480 | struct stm32_dma_sg_req *sg_req; | ||
481 | u32 dma_scr, dma_sm0ar, dma_sm1ar, id; | ||
482 | |||
483 | id = chan->id; | ||
484 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | ||
485 | |||
486 | if (dma_scr & STM32_DMA_SCR_DBM) { | ||
487 | if (chan->next_sg == chan->desc->num_sgs) | ||
488 | chan->next_sg = 0; | ||
489 | |||
490 | sg_req = &chan->desc->sg_req[chan->next_sg]; | ||
491 | |||
492 | if (dma_scr & STM32_DMA_SCR_CT) { | ||
493 | dma_sm0ar = sg_req->chan_reg.dma_sm0ar; | ||
494 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); | ||
495 | dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", | ||
496 | stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); | ||
497 | } else { | ||
498 | dma_sm1ar = sg_req->chan_reg.dma_sm1ar; | ||
499 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); | ||
500 | dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", | ||
501 | stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); | ||
502 | } | ||
503 | |||
504 | chan->next_sg++; | ||
505 | } | ||
506 | } | ||
507 | |||
508 | static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) | ||
509 | { | ||
510 | if (chan->desc) { | ||
511 | if (chan->desc->cyclic) { | ||
512 | vchan_cyclic_callback(&chan->desc->vdesc); | ||
513 | stm32_dma_configure_next_sg(chan); | ||
514 | } else { | ||
515 | chan->busy = false; | ||
516 | if (chan->next_sg == chan->desc->num_sgs) { | ||
517 | list_del(&chan->desc->vdesc.node); | ||
518 | vchan_cookie_complete(&chan->desc->vdesc); | ||
519 | chan->desc = NULL; | ||
520 | } | ||
521 | stm32_dma_start_transfer(chan); | ||
522 | } | ||
523 | } | ||
524 | } | ||
525 | |||
526 | static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | ||
527 | { | ||
528 | struct stm32_dma_chan *chan = devid; | ||
529 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
530 | u32 status, scr, sfcr; | ||
531 | |||
532 | spin_lock(&chan->vchan.lock); | ||
533 | |||
534 | status = stm32_dma_irq_status(chan); | ||
535 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | ||
536 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | ||
537 | |||
538 | if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) { | ||
539 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); | ||
540 | stm32_dma_handle_chan_done(chan); | ||
541 | |||
542 | } else { | ||
543 | stm32_dma_irq_clear(chan, status); | ||
544 | dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); | ||
545 | } | ||
546 | |||
547 | spin_unlock(&chan->vchan.lock); | ||
548 | |||
549 | return IRQ_HANDLED; | ||
550 | } | ||
551 | |||
552 | static void stm32_dma_issue_pending(struct dma_chan *c) | ||
553 | { | ||
554 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
555 | unsigned long flags; | ||
556 | int ret; | ||
557 | |||
558 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
559 | if (!chan->busy) { | ||
560 | if (vchan_issue_pending(&chan->vchan) && !chan->desc) { | ||
561 | ret = stm32_dma_start_transfer(chan); | ||
562 | if ((!ret) && (chan->desc->cyclic)) | ||
563 | stm32_dma_configure_next_sg(chan); | ||
564 | } | ||
565 | } | ||
566 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
567 | } | ||
568 | |||
569 | static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | ||
570 | enum dma_transfer_direction direction, | ||
571 | enum dma_slave_buswidth *buswidth) | ||
572 | { | ||
573 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | ||
574 | int src_bus_width, dst_bus_width; | ||
575 | int src_burst_size, dst_burst_size; | ||
576 | u32 src_maxburst, dst_maxburst; | ||
577 | dma_addr_t src_addr, dst_addr; | ||
578 | u32 dma_scr = 0; | ||
579 | |||
580 | src_addr_width = chan->dma_sconfig.src_addr_width; | ||
581 | dst_addr_width = chan->dma_sconfig.dst_addr_width; | ||
582 | src_maxburst = chan->dma_sconfig.src_maxburst; | ||
583 | dst_maxburst = chan->dma_sconfig.dst_maxburst; | ||
584 | src_addr = chan->dma_sconfig.src_addr; | ||
585 | dst_addr = chan->dma_sconfig.dst_addr; | ||
586 | |||
587 | switch (direction) { | ||
588 | case DMA_MEM_TO_DEV: | ||
589 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); | ||
590 | if (dst_bus_width < 0) | ||
591 | return dst_bus_width; | ||
592 | |||
593 | dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst); | ||
594 | if (dst_burst_size < 0) | ||
595 | return dst_burst_size; | ||
596 | |||
597 | if (!src_addr_width) | ||
598 | src_addr_width = dst_addr_width; | ||
599 | |||
600 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); | ||
601 | if (src_bus_width < 0) | ||
602 | return src_bus_width; | ||
603 | |||
604 | src_burst_size = stm32_dma_get_burst(chan, src_maxburst); | ||
605 | if (src_burst_size < 0) | ||
606 | return src_burst_size; | ||
607 | |||
608 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) | | ||
609 | STM32_DMA_SCR_PSIZE(dst_bus_width) | | ||
610 | STM32_DMA_SCR_MSIZE(src_bus_width) | | ||
611 | STM32_DMA_SCR_PBURST(dst_burst_size) | | ||
612 | STM32_DMA_SCR_MBURST(src_burst_size); | ||
613 | |||
614 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; | ||
615 | *buswidth = dst_addr_width; | ||
616 | break; | ||
617 | |||
618 | case DMA_DEV_TO_MEM: | ||
619 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); | ||
620 | if (src_bus_width < 0) | ||
621 | return src_bus_width; | ||
622 | |||
623 | src_burst_size = stm32_dma_get_burst(chan, src_maxburst); | ||
624 | if (src_burst_size < 0) | ||
625 | return src_burst_size; | ||
626 | |||
627 | if (!dst_addr_width) | ||
628 | dst_addr_width = src_addr_width; | ||
629 | |||
630 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); | ||
631 | if (dst_bus_width < 0) | ||
632 | return dst_bus_width; | ||
633 | |||
634 | dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst); | ||
635 | if (dst_burst_size < 0) | ||
636 | return dst_burst_size; | ||
637 | |||
638 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) | | ||
639 | STM32_DMA_SCR_PSIZE(src_bus_width) | | ||
640 | STM32_DMA_SCR_MSIZE(dst_bus_width) | | ||
641 | STM32_DMA_SCR_PBURST(src_burst_size) | | ||
642 | STM32_DMA_SCR_MBURST(dst_burst_size); | ||
643 | |||
644 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; | ||
645 | *buswidth = chan->dma_sconfig.src_addr_width; | ||
646 | break; | ||
647 | |||
648 | default: | ||
649 | dev_err(chan2dev(chan), "Dma direction is not supported\n"); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | |||
653 | stm32_dma_set_fifo_config(chan, src_maxburst, dst_maxburst); | ||
654 | |||
655 | chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | | ||
656 | STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | | ||
657 | STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); | ||
658 | chan->chan_reg.dma_scr |= dma_scr; | ||
659 | |||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs) | ||
664 | { | ||
665 | memset(regs, 0, sizeof(struct stm32_dma_chan_reg)); | ||
666 | } | ||
667 | |||
668 | static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( | ||
669 | struct dma_chan *c, struct scatterlist *sgl, | ||
670 | u32 sg_len, enum dma_transfer_direction direction, | ||
671 | unsigned long flags, void *context) | ||
672 | { | ||
673 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
674 | struct stm32_dma_desc *desc; | ||
675 | struct scatterlist *sg; | ||
676 | enum dma_slave_buswidth buswidth; | ||
677 | u32 nb_data_items; | ||
678 | int i, ret; | ||
679 | |||
680 | if (!chan->config_init) { | ||
681 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | ||
682 | return NULL; | ||
683 | } | ||
684 | |||
685 | if (sg_len < 1) { | ||
686 | dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len); | ||
687 | return NULL; | ||
688 | } | ||
689 | |||
690 | desc = stm32_dma_alloc_desc(sg_len); | ||
691 | if (!desc) | ||
692 | return NULL; | ||
693 | |||
694 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth); | ||
695 | if (ret < 0) | ||
696 | goto err; | ||
697 | |||
698 | /* Set peripheral flow controller */ | ||
699 | if (chan->dma_sconfig.device_fc) | ||
700 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; | ||
701 | else | ||
702 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | ||
703 | |||
704 | for_each_sg(sgl, sg, sg_len, i) { | ||
705 | desc->sg_req[i].len = sg_dma_len(sg); | ||
706 | |||
707 | nb_data_items = desc->sg_req[i].len / buswidth; | ||
708 | if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) { | ||
709 | dev_err(chan2dev(chan), "nb items not supported\n"); | ||
710 | goto err; | ||
711 | } | ||
712 | |||
713 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | ||
714 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | ||
715 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | ||
716 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | ||
717 | desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg); | ||
718 | desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg); | ||
719 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | ||
720 | } | ||
721 | |||
722 | desc->num_sgs = sg_len; | ||
723 | desc->cyclic = false; | ||
724 | |||
725 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
726 | |||
727 | err: | ||
728 | kfree(desc); | ||
729 | return NULL; | ||
730 | } | ||
731 | |||
732 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( | ||
733 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | ||
734 | size_t period_len, enum dma_transfer_direction direction, | ||
735 | unsigned long flags) | ||
736 | { | ||
737 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
738 | struct stm32_dma_desc *desc; | ||
739 | enum dma_slave_buswidth buswidth; | ||
740 | u32 num_periods, nb_data_items; | ||
741 | int i, ret; | ||
742 | |||
743 | if (!buf_len || !period_len) { | ||
744 | dev_err(chan2dev(chan), "Invalid buffer/period len\n"); | ||
745 | return NULL; | ||
746 | } | ||
747 | |||
748 | if (!chan->config_init) { | ||
749 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | ||
750 | return NULL; | ||
751 | } | ||
752 | |||
753 | if (buf_len % period_len) { | ||
754 | dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); | ||
755 | return NULL; | ||
756 | } | ||
757 | |||
758 | /* | ||
759 | * We allow to take more number of requests till DMA is | ||
760 | * not started. The driver will loop over all requests. | ||
761 | * Once DMA is started then new requests can be queued only after | ||
762 | * terminating the DMA. | ||
763 | */ | ||
764 | if (chan->busy) { | ||
765 | dev_err(chan2dev(chan), "Request not allowed when dma busy\n"); | ||
766 | return NULL; | ||
767 | } | ||
768 | |||
769 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth); | ||
770 | if (ret < 0) | ||
771 | return NULL; | ||
772 | |||
773 | nb_data_items = period_len / buswidth; | ||
774 | if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) { | ||
775 | dev_err(chan2dev(chan), "number of items not supported\n"); | ||
776 | return NULL; | ||
777 | } | ||
778 | |||
779 | /* Enable Circular mode or double buffer mode */ | ||
780 | if (buf_len == period_len) | ||
781 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; | ||
782 | else | ||
783 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; | ||
784 | |||
785 | /* Clear periph ctrl if client set it */ | ||
786 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | ||
787 | |||
788 | num_periods = buf_len / period_len; | ||
789 | |||
790 | desc = stm32_dma_alloc_desc(num_periods); | ||
791 | if (!desc) | ||
792 | return NULL; | ||
793 | |||
794 | for (i = 0; i < num_periods; i++) { | ||
795 | desc->sg_req[i].len = period_len; | ||
796 | |||
797 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | ||
798 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | ||
799 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | ||
800 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | ||
801 | desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr; | ||
802 | desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr; | ||
803 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | ||
804 | buf_addr += period_len; | ||
805 | } | ||
806 | |||
807 | desc->num_sgs = num_periods; | ||
808 | desc->cyclic = true; | ||
809 | |||
810 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
811 | } | ||
812 | |||
813 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( | ||
814 | struct dma_chan *c, dma_addr_t dest, | ||
815 | dma_addr_t src, size_t len, unsigned long flags) | ||
816 | { | ||
817 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
818 | u32 num_sgs; | ||
819 | struct stm32_dma_desc *desc; | ||
820 | size_t xfer_count, offset; | ||
821 | int i; | ||
822 | |||
823 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_MAX_DATA_ITEMS); | ||
824 | desc = stm32_dma_alloc_desc(num_sgs); | ||
825 | if (!desc) | ||
826 | return NULL; | ||
827 | |||
828 | for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { | ||
829 | xfer_count = min_t(size_t, len - offset, | ||
830 | STM32_DMA_MAX_DATA_ITEMS); | ||
831 | |||
832 | desc->sg_req[i].len = xfer_count; | ||
833 | |||
834 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | ||
835 | desc->sg_req[i].chan_reg.dma_scr = | ||
836 | STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | | ||
837 | STM32_DMA_SCR_MINC | | ||
838 | STM32_DMA_SCR_PINC | | ||
839 | STM32_DMA_SCR_TCIE | | ||
840 | STM32_DMA_SCR_TEIE; | ||
841 | desc->sg_req[i].chan_reg.dma_sfcr = STM32_DMA_SFCR_DMDIS | | ||
842 | STM32_DMA_SFCR_FTH(STM32_DMA_FIFO_THRESHOLD_FULL) | | ||
843 | STM32_DMA_SFCR_FEIE; | ||
844 | desc->sg_req[i].chan_reg.dma_spar = src + offset; | ||
845 | desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; | ||
846 | desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; | ||
847 | } | ||
848 | |||
849 | desc->num_sgs = num_sgs; | ||
850 | desc->cyclic = false; | ||
851 | |||
852 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
853 | } | ||
854 | |||
855 | static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, | ||
856 | struct stm32_dma_desc *desc, | ||
857 | u32 next_sg) | ||
858 | { | ||
859 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
860 | u32 dma_scr, width, residue, count; | ||
861 | int i; | ||
862 | |||
863 | residue = 0; | ||
864 | |||
865 | for (i = next_sg; i < desc->num_sgs; i++) | ||
866 | residue += desc->sg_req[i].len; | ||
867 | |||
868 | if (next_sg != 0) { | ||
869 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | ||
870 | width = STM32_DMA_SCR_PSIZE_GET(dma_scr); | ||
871 | count = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | ||
872 | |||
873 | residue += count << width; | ||
874 | } | ||
875 | |||
876 | return residue; | ||
877 | } | ||
878 | |||
879 | static enum dma_status stm32_dma_tx_status(struct dma_chan *c, | ||
880 | dma_cookie_t cookie, | ||
881 | struct dma_tx_state *state) | ||
882 | { | ||
883 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
884 | struct virt_dma_desc *vdesc; | ||
885 | enum dma_status status; | ||
886 | unsigned long flags; | ||
887 | u32 residue; | ||
888 | |||
889 | status = dma_cookie_status(c, cookie, state); | ||
890 | if ((status == DMA_COMPLETE) || (!state)) | ||
891 | return status; | ||
892 | |||
893 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
894 | vdesc = vchan_find_desc(&chan->vchan, cookie); | ||
895 | if (cookie == chan->desc->vdesc.tx.cookie) { | ||
896 | residue = stm32_dma_desc_residue(chan, chan->desc, | ||
897 | chan->next_sg); | ||
898 | } else if (vdesc) { | ||
899 | residue = stm32_dma_desc_residue(chan, | ||
900 | to_stm32_dma_desc(vdesc), 0); | ||
901 | } else { | ||
902 | residue = 0; | ||
903 | } | ||
904 | |||
905 | dma_set_residue(state, residue); | ||
906 | |||
907 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
908 | |||
909 | return status; | ||
910 | } | ||
911 | |||
912 | static int stm32_dma_alloc_chan_resources(struct dma_chan *c) | ||
913 | { | ||
914 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
915 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
916 | int ret; | ||
917 | |||
918 | chan->config_init = false; | ||
919 | ret = clk_prepare_enable(dmadev->clk); | ||
920 | if (ret < 0) { | ||
921 | dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); | ||
922 | return ret; | ||
923 | } | ||
924 | |||
925 | ret = stm32_dma_disable_chan(chan); | ||
926 | if (ret < 0) | ||
927 | clk_disable_unprepare(dmadev->clk); | ||
928 | |||
929 | return ret; | ||
930 | } | ||
931 | |||
932 | static void stm32_dma_free_chan_resources(struct dma_chan *c) | ||
933 | { | ||
934 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | ||
935 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | ||
936 | unsigned long flags; | ||
937 | |||
938 | dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); | ||
939 | |||
940 | if (chan->busy) { | ||
941 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
942 | stm32_dma_stop(chan); | ||
943 | chan->desc = NULL; | ||
944 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
945 | } | ||
946 | |||
947 | clk_disable_unprepare(dmadev->clk); | ||
948 | |||
949 | vchan_free_chan_resources(to_virt_chan(c)); | ||
950 | } | ||
951 | |||
952 | static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) | ||
953 | { | ||
954 | kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); | ||
955 | } | ||
956 | |||
957 | void stm32_dma_set_config(struct stm32_dma_chan *chan, | ||
958 | struct stm32_dma_cfg *cfg) | ||
959 | { | ||
960 | stm32_dma_clear_reg(&chan->chan_reg); | ||
961 | |||
962 | chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK; | ||
963 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line); | ||
964 | |||
965 | /* Enable Interrupts */ | ||
966 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; | ||
967 | |||
968 | chan->chan_reg.dma_sfcr = cfg->threshold & STM32_DMA_SFCR_FTH_MASK; | ||
969 | } | ||
970 | |||
971 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, | ||
972 | struct of_dma *ofdma) | ||
973 | { | ||
974 | struct stm32_dma_device *dmadev = ofdma->of_dma_data; | ||
975 | struct stm32_dma_cfg cfg; | ||
976 | struct stm32_dma_chan *chan; | ||
977 | struct dma_chan *c; | ||
978 | |||
979 | if (dma_spec->args_count < 3) | ||
980 | return NULL; | ||
981 | |||
982 | cfg.channel_id = dma_spec->args[0]; | ||
983 | cfg.request_line = dma_spec->args[1]; | ||
984 | cfg.stream_config = dma_spec->args[2]; | ||
985 | cfg.threshold = 0; | ||
986 | |||
987 | if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >= | ||
988 | STM32_DMA_MAX_REQUEST_ID)) | ||
989 | return NULL; | ||
990 | |||
991 | if (dma_spec->args_count > 3) | ||
992 | cfg.threshold = dma_spec->args[3]; | ||
993 | |||
994 | chan = &dmadev->chan[cfg.channel_id]; | ||
995 | |||
996 | c = dma_get_slave_channel(&chan->vchan.chan); | ||
997 | if (c) | ||
998 | stm32_dma_set_config(chan, &cfg); | ||
999 | |||
1000 | return c; | ||
1001 | } | ||
1002 | |||
1003 | static const struct of_device_id stm32_dma_of_match[] = { | ||
1004 | { .compatible = "st,stm32-dma", }, | ||
1005 | { /* sentinel */ }, | ||
1006 | }; | ||
1007 | MODULE_DEVICE_TABLE(of, stm32_dma_of_match); | ||
1008 | |||
1009 | static int stm32_dma_probe(struct platform_device *pdev) | ||
1010 | { | ||
1011 | struct stm32_dma_chan *chan; | ||
1012 | struct stm32_dma_device *dmadev; | ||
1013 | struct dma_device *dd; | ||
1014 | const struct of_device_id *match; | ||
1015 | struct resource *res; | ||
1016 | int i, ret; | ||
1017 | |||
1018 | match = of_match_device(stm32_dma_of_match, &pdev->dev); | ||
1019 | if (!match) { | ||
1020 | dev_err(&pdev->dev, "Error: No device match found\n"); | ||
1021 | return -ENODEV; | ||
1022 | } | ||
1023 | |||
1024 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | ||
1025 | if (!dmadev) | ||
1026 | return -ENOMEM; | ||
1027 | |||
1028 | dd = &dmadev->ddev; | ||
1029 | |||
1030 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1031 | dmadev->base = devm_ioremap_resource(&pdev->dev, res); | ||
1032 | if (IS_ERR(dmadev->base)) | ||
1033 | return PTR_ERR(dmadev->base); | ||
1034 | |||
1035 | dmadev->clk = devm_clk_get(&pdev->dev, NULL); | ||
1036 | if (IS_ERR(dmadev->clk)) { | ||
1037 | dev_err(&pdev->dev, "Error: Missing controller clock\n"); | ||
1038 | return PTR_ERR(dmadev->clk); | ||
1039 | } | ||
1040 | |||
1041 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, | ||
1042 | "st,mem2mem"); | ||
1043 | |||
1044 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); | ||
1045 | if (!IS_ERR(dmadev->rst)) { | ||
1046 | reset_control_assert(dmadev->rst); | ||
1047 | udelay(2); | ||
1048 | reset_control_deassert(dmadev->rst); | ||
1049 | } | ||
1050 | |||
1051 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | ||
1052 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); | ||
1053 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | ||
1054 | dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources; | ||
1055 | dd->device_free_chan_resources = stm32_dma_free_chan_resources; | ||
1056 | dd->device_tx_status = stm32_dma_tx_status; | ||
1057 | dd->device_issue_pending = stm32_dma_issue_pending; | ||
1058 | dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; | ||
1059 | dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; | ||
1060 | dd->device_config = stm32_dma_slave_config; | ||
1061 | dd->device_terminate_all = stm32_dma_terminate_all; | ||
1062 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1063 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1064 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
1065 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1066 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1067 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
1068 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1069 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1070 | dd->dev = &pdev->dev; | ||
1071 | INIT_LIST_HEAD(&dd->channels); | ||
1072 | |||
1073 | if (dmadev->mem2mem) { | ||
1074 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | ||
1075 | dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy; | ||
1076 | dd->directions |= BIT(DMA_MEM_TO_MEM); | ||
1077 | } | ||
1078 | |||
1079 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | ||
1080 | chan = &dmadev->chan[i]; | ||
1081 | chan->id = i; | ||
1082 | chan->vchan.desc_free = stm32_dma_desc_free; | ||
1083 | vchan_init(&chan->vchan, dd); | ||
1084 | } | ||
1085 | |||
1086 | ret = dma_async_device_register(dd); | ||
1087 | if (ret) | ||
1088 | return ret; | ||
1089 | |||
1090 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | ||
1091 | chan = &dmadev->chan[i]; | ||
1092 | res = platform_get_resource(pdev, IORESOURCE_IRQ, i); | ||
1093 | if (!res) { | ||
1094 | ret = -EINVAL; | ||
1095 | dev_err(&pdev->dev, "No irq resource for chan %d\n", i); | ||
1096 | goto err_unregister; | ||
1097 | } | ||
1098 | chan->irq = res->start; | ||
1099 | ret = devm_request_irq(&pdev->dev, chan->irq, | ||
1100 | stm32_dma_chan_irq, 0, | ||
1101 | dev_name(chan2dev(chan)), chan); | ||
1102 | if (ret) { | ||
1103 | dev_err(&pdev->dev, | ||
1104 | "request_irq failed with err %d channel %d\n", | ||
1105 | ret, i); | ||
1106 | goto err_unregister; | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | ret = of_dma_controller_register(pdev->dev.of_node, | ||
1111 | stm32_dma_of_xlate, dmadev); | ||
1112 | if (ret < 0) { | ||
1113 | dev_err(&pdev->dev, | ||
1114 | "STM32 DMA DMA OF registration failed %d\n", ret); | ||
1115 | goto err_unregister; | ||
1116 | } | ||
1117 | |||
1118 | platform_set_drvdata(pdev, dmadev); | ||
1119 | |||
1120 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); | ||
1121 | |||
1122 | return 0; | ||
1123 | |||
1124 | err_unregister: | ||
1125 | dma_async_device_unregister(dd); | ||
1126 | |||
1127 | return ret; | ||
1128 | } | ||
1129 | |||
1130 | static struct platform_driver stm32_dma_driver = { | ||
1131 | .driver = { | ||
1132 | .name = "stm32-dma", | ||
1133 | .of_match_table = stm32_dma_of_match, | ||
1134 | }, | ||
1135 | }; | ||
1136 | |||
1137 | static int __init stm32_dma_init(void) | ||
1138 | { | ||
1139 | return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe); | ||
1140 | } | ||
1141 | subsys_initcall(stm32_dma_init); | ||
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index c8f79dcaaee8..935da8192f59 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -296,7 +296,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( | |||
296 | spin_unlock_irqrestore(&tdc->lock, flags); | 296 | spin_unlock_irqrestore(&tdc->lock, flags); |
297 | 297 | ||
298 | /* Allocate DMA desc */ | 298 | /* Allocate DMA desc */ |
299 | dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); | 299 | dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT); |
300 | if (!dma_desc) { | 300 | if (!dma_desc) { |
301 | dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); | 301 | dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); |
302 | return NULL; | 302 | return NULL; |
@@ -336,7 +336,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get( | |||
336 | } | 336 | } |
337 | spin_unlock_irqrestore(&tdc->lock, flags); | 337 | spin_unlock_irqrestore(&tdc->lock, flags); |
338 | 338 | ||
339 | sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); | 339 | sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT); |
340 | if (!sg_req) | 340 | if (!sg_req) |
341 | dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); | 341 | dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); |
342 | return sg_req; | 342 | return sg_req; |
@@ -1186,10 +1186,12 @@ static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) | |||
1186 | 1186 | ||
1187 | dma_cookie_init(&tdc->dma_chan); | 1187 | dma_cookie_init(&tdc->dma_chan); |
1188 | tdc->config_init = false; | 1188 | tdc->config_init = false; |
1189 | ret = clk_prepare_enable(tdma->dma_clk); | 1189 | |
1190 | ret = pm_runtime_get_sync(tdma->dev); | ||
1190 | if (ret < 0) | 1191 | if (ret < 0) |
1191 | dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); | 1192 | return ret; |
1192 | return ret; | 1193 | |
1194 | return 0; | ||
1193 | } | 1195 | } |
1194 | 1196 | ||
1195 | static void tegra_dma_free_chan_resources(struct dma_chan *dc) | 1197 | static void tegra_dma_free_chan_resources(struct dma_chan *dc) |
@@ -1232,7 +1234,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) | |||
1232 | list_del(&sg_req->node); | 1234 | list_del(&sg_req->node); |
1233 | kfree(sg_req); | 1235 | kfree(sg_req); |
1234 | } | 1236 | } |
1235 | clk_disable_unprepare(tdma->dma_clk); | 1237 | pm_runtime_put(tdma->dev); |
1236 | 1238 | ||
1237 | tdc->slave_id = 0; | 1239 | tdc->slave_id = 0; |
1238 | } | 1240 | } |
@@ -1356,20 +1358,14 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1356 | spin_lock_init(&tdma->global_lock); | 1358 | spin_lock_init(&tdma->global_lock); |
1357 | 1359 | ||
1358 | pm_runtime_enable(&pdev->dev); | 1360 | pm_runtime_enable(&pdev->dev); |
1359 | if (!pm_runtime_enabled(&pdev->dev)) { | 1361 | if (!pm_runtime_enabled(&pdev->dev)) |
1360 | ret = tegra_dma_runtime_resume(&pdev->dev); | 1362 | ret = tegra_dma_runtime_resume(&pdev->dev); |
1361 | if (ret) { | 1363 | else |
1362 | dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", | 1364 | ret = pm_runtime_get_sync(&pdev->dev); |
1363 | ret); | ||
1364 | goto err_pm_disable; | ||
1365 | } | ||
1366 | } | ||
1367 | 1365 | ||
1368 | /* Enable clock before accessing registers */ | ||
1369 | ret = clk_prepare_enable(tdma->dma_clk); | ||
1370 | if (ret < 0) { | 1366 | if (ret < 0) { |
1371 | dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); | 1367 | pm_runtime_disable(&pdev->dev); |
1372 | goto err_pm_disable; | 1368 | return ret; |
1373 | } | 1369 | } |
1374 | 1370 | ||
1375 | /* Reset DMA controller */ | 1371 | /* Reset DMA controller */ |
@@ -1382,7 +1378,7 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1382 | tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); | 1378 | tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); |
1383 | tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); | 1379 | tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); |
1384 | 1380 | ||
1385 | clk_disable_unprepare(tdma->dma_clk); | 1381 | pm_runtime_put(&pdev->dev); |
1386 | 1382 | ||
1387 | INIT_LIST_HEAD(&tdma->dma_dev.channels); | 1383 | INIT_LIST_HEAD(&tdma->dma_dev.channels); |
1388 | for (i = 0; i < cdata->nr_channels; i++) { | 1384 | for (i = 0; i < cdata->nr_channels; i++) { |
@@ -1400,8 +1396,7 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1400 | } | 1396 | } |
1401 | tdc->irq = res->start; | 1397 | tdc->irq = res->start; |
1402 | snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); | 1398 | snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); |
1403 | ret = devm_request_irq(&pdev->dev, tdc->irq, | 1399 | ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc); |
1404 | tegra_dma_isr, 0, tdc->name, tdc); | ||
1405 | if (ret) { | 1400 | if (ret) { |
1406 | dev_err(&pdev->dev, | 1401 | dev_err(&pdev->dev, |
1407 | "request_irq failed with err %d channel %d\n", | 1402 | "request_irq failed with err %d channel %d\n", |
@@ -1482,10 +1477,11 @@ err_unregister_dma_dev: | |||
1482 | err_irq: | 1477 | err_irq: |
1483 | while (--i >= 0) { | 1478 | while (--i >= 0) { |
1484 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | 1479 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1480 | |||
1481 | free_irq(tdc->irq, tdc); | ||
1485 | tasklet_kill(&tdc->tasklet); | 1482 | tasklet_kill(&tdc->tasklet); |
1486 | } | 1483 | } |
1487 | 1484 | ||
1488 | err_pm_disable: | ||
1489 | pm_runtime_disable(&pdev->dev); | 1485 | pm_runtime_disable(&pdev->dev); |
1490 | if (!pm_runtime_status_suspended(&pdev->dev)) | 1486 | if (!pm_runtime_status_suspended(&pdev->dev)) |
1491 | tegra_dma_runtime_suspend(&pdev->dev); | 1487 | tegra_dma_runtime_suspend(&pdev->dev); |
@@ -1502,6 +1498,7 @@ static int tegra_dma_remove(struct platform_device *pdev) | |||
1502 | 1498 | ||
1503 | for (i = 0; i < tdma->chip_data->nr_channels; ++i) { | 1499 | for (i = 0; i < tdma->chip_data->nr_channels; ++i) { |
1504 | tdc = &tdma->channels[i]; | 1500 | tdc = &tdma->channels[i]; |
1501 | free_irq(tdc->irq, tdc); | ||
1505 | tasklet_kill(&tdc->tasklet); | 1502 | tasklet_kill(&tdc->tasklet); |
1506 | } | 1503 | } |
1507 | 1504 | ||
@@ -1514,8 +1511,7 @@ static int tegra_dma_remove(struct platform_device *pdev) | |||
1514 | 1511 | ||
1515 | static int tegra_dma_runtime_suspend(struct device *dev) | 1512 | static int tegra_dma_runtime_suspend(struct device *dev) |
1516 | { | 1513 | { |
1517 | struct platform_device *pdev = to_platform_device(dev); | 1514 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1518 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1519 | 1515 | ||
1520 | clk_disable_unprepare(tdma->dma_clk); | 1516 | clk_disable_unprepare(tdma->dma_clk); |
1521 | return 0; | 1517 | return 0; |
@@ -1523,8 +1519,7 @@ static int tegra_dma_runtime_suspend(struct device *dev) | |||
1523 | 1519 | ||
1524 | static int tegra_dma_runtime_resume(struct device *dev) | 1520 | static int tegra_dma_runtime_resume(struct device *dev) |
1525 | { | 1521 | { |
1526 | struct platform_device *pdev = to_platform_device(dev); | 1522 | struct tegra_dma *tdma = dev_get_drvdata(dev); |
1527 | struct tegra_dma *tdma = platform_get_drvdata(pdev); | ||
1528 | int ret; | 1523 | int ret; |
1529 | 1524 | ||
1530 | ret = clk_prepare_enable(tdma->dma_clk); | 1525 | ret = clk_prepare_enable(tdma->dma_clk); |
@@ -1543,7 +1538,7 @@ static int tegra_dma_pm_suspend(struct device *dev) | |||
1543 | int ret; | 1538 | int ret; |
1544 | 1539 | ||
1545 | /* Enable clock before accessing register */ | 1540 | /* Enable clock before accessing register */ |
1546 | ret = tegra_dma_runtime_resume(dev); | 1541 | ret = pm_runtime_get_sync(dev); |
1547 | if (ret < 0) | 1542 | if (ret < 0) |
1548 | return ret; | 1543 | return ret; |
1549 | 1544 | ||
@@ -1552,15 +1547,22 @@ static int tegra_dma_pm_suspend(struct device *dev) | |||
1552 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | 1547 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1553 | struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; | 1548 | struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; |
1554 | 1549 | ||
1550 | /* Only save the state of DMA channels that are in use */ | ||
1551 | if (!tdc->config_init) | ||
1552 | continue; | ||
1553 | |||
1555 | ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); | 1554 | ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); |
1556 | ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); | 1555 | ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); |
1557 | ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); | 1556 | ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); |
1558 | ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); | 1557 | ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); |
1559 | ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); | 1558 | ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); |
1559 | if (tdma->chip_data->support_separate_wcount_reg) | ||
1560 | ch_reg->wcount = tdc_read(tdc, | ||
1561 | TEGRA_APBDMA_CHAN_WCOUNT); | ||
1560 | } | 1562 | } |
1561 | 1563 | ||
1562 | /* Disable clock */ | 1564 | /* Disable clock */ |
1563 | tegra_dma_runtime_suspend(dev); | 1565 | pm_runtime_put(dev); |
1564 | return 0; | 1566 | return 0; |
1565 | } | 1567 | } |
1566 | 1568 | ||
@@ -1571,7 +1573,7 @@ static int tegra_dma_pm_resume(struct device *dev) | |||
1571 | int ret; | 1573 | int ret; |
1572 | 1574 | ||
1573 | /* Enable clock before accessing register */ | 1575 | /* Enable clock before accessing register */ |
1574 | ret = tegra_dma_runtime_resume(dev); | 1576 | ret = pm_runtime_get_sync(dev); |
1575 | if (ret < 0) | 1577 | if (ret < 0) |
1576 | return ret; | 1578 | return ret; |
1577 | 1579 | ||
@@ -1583,6 +1585,13 @@ static int tegra_dma_pm_resume(struct device *dev) | |||
1583 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | 1585 | struct tegra_dma_channel *tdc = &tdma->channels[i]; |
1584 | struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; | 1586 | struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; |
1585 | 1587 | ||
1588 | /* Only restore the state of DMA channels that are in use */ | ||
1589 | if (!tdc->config_init) | ||
1590 | continue; | ||
1591 | |||
1592 | if (tdma->chip_data->support_separate_wcount_reg) | ||
1593 | tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, | ||
1594 | ch_reg->wcount); | ||
1586 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); | 1595 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); |
1587 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); | 1596 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); |
1588 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); | 1597 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); |
@@ -1592,16 +1601,14 @@ static int tegra_dma_pm_resume(struct device *dev) | |||
1592 | } | 1601 | } |
1593 | 1602 | ||
1594 | /* Disable clock */ | 1603 | /* Disable clock */ |
1595 | tegra_dma_runtime_suspend(dev); | 1604 | pm_runtime_put(dev); |
1596 | return 0; | 1605 | return 0; |
1597 | } | 1606 | } |
1598 | #endif | 1607 | #endif |
1599 | 1608 | ||
1600 | static const struct dev_pm_ops tegra_dma_dev_pm_ops = { | 1609 | static const struct dev_pm_ops tegra_dma_dev_pm_ops = { |
1601 | #ifdef CONFIG_PM | 1610 | SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume, |
1602 | .runtime_suspend = tegra_dma_runtime_suspend, | 1611 | NULL) |
1603 | .runtime_resume = tegra_dma_runtime_resume, | ||
1604 | #endif | ||
1605 | SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) | 1612 | SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) |
1606 | }; | 1613 | }; |
1607 | 1614 | ||
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index a415edbe61b1..e107779b1a2e 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/idr.h> | ||
16 | #include <linux/of_address.h> | 15 | #include <linux/of_address.h> |
17 | #include <linux/of_device.h> | 16 | #include <linux/of_device.h> |
18 | #include <linux/of_dma.h> | 17 | #include <linux/of_dma.h> |
@@ -198,7 +197,8 @@ struct ti_dra7_xbar_data { | |||
198 | void __iomem *iomem; | 197 | void __iomem *iomem; |
199 | 198 | ||
200 | struct dma_router dmarouter; | 199 | struct dma_router dmarouter; |
201 | struct idr map_idr; | 200 | struct mutex mutex; |
201 | unsigned long *dma_inuse; | ||
202 | 202 | ||
203 | u16 safe_val; /* Value to rest the crossbar lines */ | 203 | u16 safe_val; /* Value to rest the crossbar lines */ |
204 | u32 xbar_requests; /* number of DMA requests connected to XBAR */ | 204 | u32 xbar_requests; /* number of DMA requests connected to XBAR */ |
@@ -225,7 +225,9 @@ static void ti_dra7_xbar_free(struct device *dev, void *route_data) | |||
225 | map->xbar_in, map->xbar_out); | 225 | map->xbar_in, map->xbar_out); |
226 | 226 | ||
227 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); | 227 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); |
228 | idr_remove(&xbar->map_idr, map->xbar_out); | 228 | mutex_lock(&xbar->mutex); |
229 | clear_bit(map->xbar_out, xbar->dma_inuse); | ||
230 | mutex_unlock(&xbar->mutex); | ||
229 | kfree(map); | 231 | kfree(map); |
230 | } | 232 | } |
231 | 233 | ||
@@ -255,8 +257,17 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
255 | return ERR_PTR(-ENOMEM); | 257 | return ERR_PTR(-ENOMEM); |
256 | } | 258 | } |
257 | 259 | ||
258 | map->xbar_out = idr_alloc(&xbar->map_idr, NULL, 0, xbar->dma_requests, | 260 | mutex_lock(&xbar->mutex); |
259 | GFP_KERNEL); | 261 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, |
262 | xbar->dma_requests); | ||
263 | mutex_unlock(&xbar->mutex); | ||
264 | if (map->xbar_out == xbar->dma_requests) { | ||
265 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | ||
266 | kfree(map); | ||
267 | return ERR_PTR(-ENOMEM); | ||
268 | } | ||
269 | set_bit(map->xbar_out, xbar->dma_inuse); | ||
270 | |||
260 | map->xbar_in = (u16)dma_spec->args[0]; | 271 | map->xbar_in = (u16)dma_spec->args[0]; |
261 | 272 | ||
262 | dma_spec->args[0] = map->xbar_out + xbar->dma_offset; | 273 | dma_spec->args[0] = map->xbar_out + xbar->dma_offset; |
@@ -278,17 +289,29 @@ static const struct of_device_id ti_dra7_master_match[] = { | |||
278 | .compatible = "ti,edma3", | 289 | .compatible = "ti,edma3", |
279 | .data = (void *)TI_XBAR_EDMA_OFFSET, | 290 | .data = (void *)TI_XBAR_EDMA_OFFSET, |
280 | }, | 291 | }, |
292 | { | ||
293 | .compatible = "ti,edma3-tpcc", | ||
294 | .data = (void *)TI_XBAR_EDMA_OFFSET, | ||
295 | }, | ||
281 | {}, | 296 | {}, |
282 | }; | 297 | }; |
283 | 298 | ||
299 | static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) | ||
300 | { | ||
301 | for (; len > 0; len--) | ||
302 | clear_bit(offset + (len - 1), p); | ||
303 | } | ||
304 | |||
284 | static int ti_dra7_xbar_probe(struct platform_device *pdev) | 305 | static int ti_dra7_xbar_probe(struct platform_device *pdev) |
285 | { | 306 | { |
286 | struct device_node *node = pdev->dev.of_node; | 307 | struct device_node *node = pdev->dev.of_node; |
287 | const struct of_device_id *match; | 308 | const struct of_device_id *match; |
288 | struct device_node *dma_node; | 309 | struct device_node *dma_node; |
289 | struct ti_dra7_xbar_data *xbar; | 310 | struct ti_dra7_xbar_data *xbar; |
311 | struct property *prop; | ||
290 | struct resource *res; | 312 | struct resource *res; |
291 | u32 safe_val; | 313 | u32 safe_val; |
314 | size_t sz; | ||
292 | void __iomem *iomem; | 315 | void __iomem *iomem; |
293 | int i, ret; | 316 | int i, ret; |
294 | 317 | ||
@@ -299,8 +322,6 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) | |||
299 | if (!xbar) | 322 | if (!xbar) |
300 | return -ENOMEM; | 323 | return -ENOMEM; |
301 | 324 | ||
302 | idr_init(&xbar->map_idr); | ||
303 | |||
304 | dma_node = of_parse_phandle(node, "dma-masters", 0); | 325 | dma_node = of_parse_phandle(node, "dma-masters", 0); |
305 | if (!dma_node) { | 326 | if (!dma_node) { |
306 | dev_err(&pdev->dev, "Can't get DMA master node\n"); | 327 | dev_err(&pdev->dev, "Can't get DMA master node\n"); |
@@ -322,6 +343,12 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) | |||
322 | } | 343 | } |
323 | of_node_put(dma_node); | 344 | of_node_put(dma_node); |
324 | 345 | ||
346 | xbar->dma_inuse = devm_kcalloc(&pdev->dev, | ||
347 | BITS_TO_LONGS(xbar->dma_requests), | ||
348 | sizeof(unsigned long), GFP_KERNEL); | ||
349 | if (!xbar->dma_inuse) | ||
350 | return -ENOMEM; | ||
351 | |||
325 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { | 352 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { |
326 | dev_info(&pdev->dev, | 353 | dev_info(&pdev->dev, |
327 | "Missing XBAR input information, using %u.\n", | 354 | "Missing XBAR input information, using %u.\n", |
@@ -332,6 +359,33 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) | |||
332 | if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) | 359 | if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) |
333 | xbar->safe_val = (u16)safe_val; | 360 | xbar->safe_val = (u16)safe_val; |
334 | 361 | ||
362 | |||
363 | prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz); | ||
364 | if (prop) { | ||
365 | const char pname[] = "ti,reserved-dma-request-ranges"; | ||
366 | u32 (*rsv_events)[2]; | ||
367 | size_t nelm = sz / sizeof(*rsv_events); | ||
368 | int i; | ||
369 | |||
370 | if (!nelm) | ||
371 | return -EINVAL; | ||
372 | |||
373 | rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL); | ||
374 | if (!rsv_events) | ||
375 | return -ENOMEM; | ||
376 | |||
377 | ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, | ||
378 | nelm * 2); | ||
379 | if (ret) | ||
380 | return ret; | ||
381 | |||
382 | for (i = 0; i < nelm; i++) { | ||
383 | ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], | ||
384 | xbar->dma_inuse); | ||
385 | } | ||
386 | kfree(rsv_events); | ||
387 | } | ||
388 | |||
335 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 389 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
336 | iomem = devm_ioremap_resource(&pdev->dev, res); | 390 | iomem = devm_ioremap_resource(&pdev->dev, res); |
337 | if (IS_ERR(iomem)) | 391 | if (IS_ERR(iomem)) |
@@ -343,18 +397,23 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) | |||
343 | xbar->dmarouter.route_free = ti_dra7_xbar_free; | 397 | xbar->dmarouter.route_free = ti_dra7_xbar_free; |
344 | xbar->dma_offset = (u32)match->data; | 398 | xbar->dma_offset = (u32)match->data; |
345 | 399 | ||
400 | mutex_init(&xbar->mutex); | ||
346 | platform_set_drvdata(pdev, xbar); | 401 | platform_set_drvdata(pdev, xbar); |
347 | 402 | ||
348 | /* Reset the crossbar */ | 403 | /* Reset the crossbar */ |
349 | for (i = 0; i < xbar->dma_requests; i++) | 404 | for (i = 0; i < xbar->dma_requests; i++) { |
350 | ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); | 405 | if (!test_bit(i, xbar->dma_inuse)) |
406 | ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); | ||
407 | } | ||
351 | 408 | ||
352 | ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, | 409 | ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, |
353 | &xbar->dmarouter); | 410 | &xbar->dmarouter); |
354 | if (ret) { | 411 | if (ret) { |
355 | /* Restore the defaults for the crossbar */ | 412 | /* Restore the defaults for the crossbar */ |
356 | for (i = 0; i < xbar->dma_requests; i++) | 413 | for (i = 0; i < xbar->dma_requests; i++) { |
357 | ti_dra7_xbar_write(xbar->iomem, i, i); | 414 | if (!test_bit(i, xbar->dma_inuse)) |
415 | ti_dra7_xbar_write(xbar->iomem, i, i); | ||
416 | } | ||
358 | } | 417 | } |
359 | 418 | ||
360 | return ret; | 419 | return ret; |
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 6f80432a3f0a..a35c211857dd 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c | |||
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | |||
29 | spin_lock_irqsave(&vc->lock, flags); | 29 | spin_lock_irqsave(&vc->lock, flags); |
30 | cookie = dma_cookie_assign(tx); | 30 | cookie = dma_cookie_assign(tx); |
31 | 31 | ||
32 | list_add_tail(&vd->node, &vc->desc_submitted); | 32 | list_move_tail(&vd->node, &vc->desc_submitted); |
33 | spin_unlock_irqrestore(&vc->lock, flags); | 33 | spin_unlock_irqrestore(&vc->lock, flags); |
34 | 34 | ||
35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", | 35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", |
@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | |||
39 | } | 39 | } |
40 | EXPORT_SYMBOL_GPL(vchan_tx_submit); | 40 | EXPORT_SYMBOL_GPL(vchan_tx_submit); |
41 | 41 | ||
42 | /** | ||
43 | * vchan_tx_desc_free - free a reusable descriptor | ||
44 | * @tx: the transfer | ||
45 | * | ||
46 | * This function frees a previously allocated reusable descriptor. The only | ||
47 | * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the | ||
48 | * transfer. | ||
49 | * | ||
50 | * Returns 0 upon success | ||
51 | */ | ||
52 | int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) | ||
53 | { | ||
54 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | ||
55 | struct virt_dma_desc *vd = to_virt_desc(tx); | ||
56 | unsigned long flags; | ||
57 | |||
58 | spin_lock_irqsave(&vc->lock, flags); | ||
59 | list_del(&vd->node); | ||
60 | spin_unlock_irqrestore(&vc->lock, flags); | ||
61 | |||
62 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", | ||
63 | vc, vd, vd->tx.cookie); | ||
64 | vc->desc_free(vd); | ||
65 | return 0; | ||
66 | } | ||
67 | EXPORT_SYMBOL_GPL(vchan_tx_desc_free); | ||
68 | |||
42 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, | 69 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, |
43 | dma_cookie_t cookie) | 70 | dma_cookie_t cookie) |
44 | { | 71 | { |
@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg) | |||
83 | cb_data = vd->tx.callback_param; | 110 | cb_data = vd->tx.callback_param; |
84 | 111 | ||
85 | list_del(&vd->node); | 112 | list_del(&vd->node); |
86 | 113 | if (dmaengine_desc_test_reuse(&vd->tx)) | |
87 | vc->desc_free(vd); | 114 | list_add(&vd->node, &vc->desc_allocated); |
115 | else | ||
116 | vc->desc_free(vd); | ||
88 | 117 | ||
89 | if (cb) | 118 | if (cb) |
90 | cb(cb_data); | 119 | cb(cb_data); |
@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | |||
96 | while (!list_empty(head)) { | 125 | while (!list_empty(head)) { |
97 | struct virt_dma_desc *vd = list_first_entry(head, | 126 | struct virt_dma_desc *vd = list_first_entry(head, |
98 | struct virt_dma_desc, node); | 127 | struct virt_dma_desc, node); |
99 | list_del(&vd->node); | 128 | if (dmaengine_desc_test_reuse(&vd->tx)) { |
100 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | 129 | list_move_tail(&vd->node, &vc->desc_allocated); |
101 | vc->desc_free(vd); | 130 | } else { |
131 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | ||
132 | list_del(&vd->node); | ||
133 | vc->desc_free(vd); | ||
134 | } | ||
102 | } | 135 | } |
103 | } | 136 | } |
104 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); | 137 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); |
@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) | |||
108 | dma_cookie_init(&vc->chan); | 141 | dma_cookie_init(&vc->chan); |
109 | 142 | ||
110 | spin_lock_init(&vc->lock); | 143 | spin_lock_init(&vc->lock); |
144 | INIT_LIST_HEAD(&vc->desc_allocated); | ||
111 | INIT_LIST_HEAD(&vc->desc_submitted); | 145 | INIT_LIST_HEAD(&vc->desc_submitted); |
112 | INIT_LIST_HEAD(&vc->desc_issued); | 146 | INIT_LIST_HEAD(&vc->desc_issued); |
113 | INIT_LIST_HEAD(&vc->desc_completed); | 147 | INIT_LIST_HEAD(&vc->desc_completed); |
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 2fa47745a41f..d9731ca5e262 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h | |||
@@ -29,6 +29,7 @@ struct virt_dma_chan { | |||
29 | spinlock_t lock; | 29 | spinlock_t lock; |
30 | 30 | ||
31 | /* protected by vc.lock */ | 31 | /* protected by vc.lock */ |
32 | struct list_head desc_allocated; | ||
32 | struct list_head desc_submitted; | 33 | struct list_head desc_submitted; |
33 | struct list_head desc_issued; | 34 | struct list_head desc_issued; |
34 | struct list_head desc_completed; | 35 | struct list_head desc_completed; |
@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan | |||
55 | struct virt_dma_desc *vd, unsigned long tx_flags) | 56 | struct virt_dma_desc *vd, unsigned long tx_flags) |
56 | { | 57 | { |
57 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); | 58 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); |
59 | extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); | ||
60 | unsigned long flags; | ||
58 | 61 | ||
59 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); | 62 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); |
60 | vd->tx.flags = tx_flags; | 63 | vd->tx.flags = tx_flags; |
61 | vd->tx.tx_submit = vchan_tx_submit; | 64 | vd->tx.tx_submit = vchan_tx_submit; |
65 | vd->tx.desc_free = vchan_tx_desc_free; | ||
66 | |||
67 | spin_lock_irqsave(&vc->lock, flags); | ||
68 | list_add_tail(&vd->node, &vc->desc_allocated); | ||
69 | spin_unlock_irqrestore(&vc->lock, flags); | ||
62 | 70 | ||
63 | return &vd->tx; | 71 | return &vd->tx; |
64 | } | 72 | } |
@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | |||
134 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | 142 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, |
135 | struct list_head *head) | 143 | struct list_head *head) |
136 | { | 144 | { |
145 | list_splice_tail_init(&vc->desc_allocated, head); | ||
137 | list_splice_tail_init(&vc->desc_submitted, head); | 146 | list_splice_tail_init(&vc->desc_submitted, head); |
138 | list_splice_tail_init(&vc->desc_issued, head); | 147 | list_splice_tail_init(&vc->desc_issued, head); |
139 | list_splice_tail_init(&vc->desc_completed, head); | 148 | list_splice_tail_init(&vc->desc_completed, head); |
@@ -141,14 +150,30 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | |||
141 | 150 | ||
142 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) | 151 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) |
143 | { | 152 | { |
153 | struct virt_dma_desc *vd; | ||
144 | unsigned long flags; | 154 | unsigned long flags; |
145 | LIST_HEAD(head); | 155 | LIST_HEAD(head); |
146 | 156 | ||
147 | spin_lock_irqsave(&vc->lock, flags); | 157 | spin_lock_irqsave(&vc->lock, flags); |
148 | vchan_get_all_descriptors(vc, &head); | 158 | vchan_get_all_descriptors(vc, &head); |
159 | list_for_each_entry(vd, &head, node) | ||
160 | dmaengine_desc_clear_reuse(&vd->tx); | ||
149 | spin_unlock_irqrestore(&vc->lock, flags); | 161 | spin_unlock_irqrestore(&vc->lock, flags); |
150 | 162 | ||
151 | vchan_dma_desc_free_list(vc, &head); | 163 | vchan_dma_desc_free_list(vc, &head); |
152 | } | 164 | } |
153 | 165 | ||
166 | /** | ||
167 | * vchan_synchronize() - synchronize callback execution to the current context | ||
168 | * @vc: virtual channel to synchronize | ||
169 | * | ||
170 | * Makes sure that all scheduled or active callbacks have finished running. For | ||
171 | * proper operation the caller has to ensure that no new callbacks are scheduled | ||
172 | * after the invocation of this function started. | ||
173 | */ | ||
174 | static inline void vchan_synchronize(struct virt_dma_chan *vc) | ||
175 | { | ||
176 | tasklet_kill(&vc->task); | ||
177 | } | ||
178 | |||
154 | #endif | 179 | #endif |
diff --git a/include/linux/dca.h b/include/linux/dca.h index d27a7a05718d..ad956c2e07a8 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h | |||
@@ -34,7 +34,7 @@ void dca_unregister_notify(struct notifier_block *nb); | |||
34 | 34 | ||
35 | struct dca_provider { | 35 | struct dca_provider { |
36 | struct list_head node; | 36 | struct list_head node; |
37 | struct dca_ops *ops; | 37 | const struct dca_ops *ops; |
38 | struct device *cd; | 38 | struct device *cd; |
39 | int id; | 39 | int id; |
40 | }; | 40 | }; |
@@ -53,7 +53,8 @@ struct dca_ops { | |||
53 | int (*dev_managed) (struct dca_provider *, struct device *); | 53 | int (*dev_managed) (struct dca_provider *, struct device *); |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); | 56 | struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, |
57 | int priv_size); | ||
57 | void free_dca_provider(struct dca_provider *dca); | 58 | void free_dca_provider(struct dca_provider *dca); |
58 | int register_dca_provider(struct dca_provider *dca, struct device *dev); | 59 | int register_dca_provider(struct dca_provider *dca, struct device *dev); |
59 | void unregister_dca_provider(struct dca_provider *dca, struct device *dev); | 60 | void unregister_dca_provider(struct dca_provider *dca, struct device *dev); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..16a1cad30c33 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -607,11 +607,38 @@ enum dmaengine_alignment { | |||
607 | }; | 607 | }; |
608 | 608 | ||
609 | /** | 609 | /** |
610 | * struct dma_slave_map - associates slave device and it's slave channel with | ||
611 | * parameter to be used by a filter function | ||
612 | * @devname: name of the device | ||
613 | * @slave: slave channel name | ||
614 | * @param: opaque parameter to pass to struct dma_filter.fn | ||
615 | */ | ||
616 | struct dma_slave_map { | ||
617 | const char *devname; | ||
618 | const char *slave; | ||
619 | void *param; | ||
620 | }; | ||
621 | |||
622 | /** | ||
623 | * struct dma_filter - information for slave device/channel to filter_fn/param | ||
624 | * mapping | ||
625 | * @fn: filter function callback | ||
626 | * @mapcnt: number of slave device/channel in the map | ||
627 | * @map: array of channel to filter mapping data | ||
628 | */ | ||
629 | struct dma_filter { | ||
630 | dma_filter_fn fn; | ||
631 | int mapcnt; | ||
632 | const struct dma_slave_map *map; | ||
633 | }; | ||
634 | |||
635 | /** | ||
610 | * struct dma_device - info on the entity supplying DMA services | 636 | * struct dma_device - info on the entity supplying DMA services |
611 | * @chancnt: how many DMA channels are supported | 637 | * @chancnt: how many DMA channels are supported |
612 | * @privatecnt: how many DMA channels are requested by dma_request_channel | 638 | * @privatecnt: how many DMA channels are requested by dma_request_channel |
613 | * @channels: the list of struct dma_chan | 639 | * @channels: the list of struct dma_chan |
614 | * @global_node: list_head for global dma_device_list | 640 | * @global_node: list_head for global dma_device_list |
641 | * @filter: information for device/slave to filter function/param mapping | ||
615 | * @cap_mask: one or more dma_capability flags | 642 | * @cap_mask: one or more dma_capability flags |
616 | * @max_xor: maximum number of xor sources, 0 if no capability | 643 | * @max_xor: maximum number of xor sources, 0 if no capability |
617 | * @max_pq: maximum number of PQ sources and PQ-continue capability | 644 | * @max_pq: maximum number of PQ sources and PQ-continue capability |
@@ -654,11 +681,14 @@ enum dmaengine_alignment { | |||
654 | * paused. Returns 0 or an error code | 681 | * paused. Returns 0 or an error code |
655 | * @device_terminate_all: Aborts all transfers on a channel. Returns 0 | 682 | * @device_terminate_all: Aborts all transfers on a channel. Returns 0 |
656 | * or an error code | 683 | * or an error code |
684 | * @device_synchronize: Synchronizes the termination of a transfers to the | ||
685 | * current context. | ||
657 | * @device_tx_status: poll for transaction completion, the optional | 686 | * @device_tx_status: poll for transaction completion, the optional |
658 | * txstate parameter can be supplied with a pointer to get a | 687 | * txstate parameter can be supplied with a pointer to get a |
659 | * struct with auxiliary transfer status information, otherwise the call | 688 | * struct with auxiliary transfer status information, otherwise the call |
660 | * will just return a simple status code | 689 | * will just return a simple status code |
661 | * @device_issue_pending: push pending transactions to hardware | 690 | * @device_issue_pending: push pending transactions to hardware |
691 | * @descriptor_reuse: a submitted transfer can be resubmitted after completion | ||
662 | */ | 692 | */ |
663 | struct dma_device { | 693 | struct dma_device { |
664 | 694 | ||
@@ -666,6 +696,7 @@ struct dma_device { | |||
666 | unsigned int privatecnt; | 696 | unsigned int privatecnt; |
667 | struct list_head channels; | 697 | struct list_head channels; |
668 | struct list_head global_node; | 698 | struct list_head global_node; |
699 | struct dma_filter filter; | ||
669 | dma_cap_mask_t cap_mask; | 700 | dma_cap_mask_t cap_mask; |
670 | unsigned short max_xor; | 701 | unsigned short max_xor; |
671 | unsigned short max_pq; | 702 | unsigned short max_pq; |
@@ -681,6 +712,7 @@ struct dma_device { | |||
681 | u32 src_addr_widths; | 712 | u32 src_addr_widths; |
682 | u32 dst_addr_widths; | 713 | u32 dst_addr_widths; |
683 | u32 directions; | 714 | u32 directions; |
715 | bool descriptor_reuse; | ||
684 | enum dma_residue_granularity residue_granularity; | 716 | enum dma_residue_granularity residue_granularity; |
685 | 717 | ||
686 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | 718 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
@@ -737,6 +769,7 @@ struct dma_device { | |||
737 | int (*device_pause)(struct dma_chan *chan); | 769 | int (*device_pause)(struct dma_chan *chan); |
738 | int (*device_resume)(struct dma_chan *chan); | 770 | int (*device_resume)(struct dma_chan *chan); |
739 | int (*device_terminate_all)(struct dma_chan *chan); | 771 | int (*device_terminate_all)(struct dma_chan *chan); |
772 | void (*device_synchronize)(struct dma_chan *chan); | ||
740 | 773 | ||
741 | enum dma_status (*device_tx_status)(struct dma_chan *chan, | 774 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
742 | dma_cookie_t cookie, | 775 | dma_cookie_t cookie, |
@@ -828,6 +861,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | |||
828 | src_sg, src_nents, flags); | 861 | src_sg, src_nents, flags); |
829 | } | 862 | } |
830 | 863 | ||
864 | /** | ||
865 | * dmaengine_terminate_all() - Terminate all active DMA transfers | ||
866 | * @chan: The channel for which to terminate the transfers | ||
867 | * | ||
868 | * This function is DEPRECATED use either dmaengine_terminate_sync() or | ||
869 | * dmaengine_terminate_async() instead. | ||
870 | */ | ||
831 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | 871 | static inline int dmaengine_terminate_all(struct dma_chan *chan) |
832 | { | 872 | { |
833 | if (chan->device->device_terminate_all) | 873 | if (chan->device->device_terminate_all) |
@@ -836,6 +876,88 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) | |||
836 | return -ENOSYS; | 876 | return -ENOSYS; |
837 | } | 877 | } |
838 | 878 | ||
879 | /** | ||
880 | * dmaengine_terminate_async() - Terminate all active DMA transfers | ||
881 | * @chan: The channel for which to terminate the transfers | ||
882 | * | ||
883 | * Calling this function will terminate all active and pending descriptors | ||
884 | * that have previously been submitted to the channel. It is not guaranteed | ||
885 | * though that the transfer for the active descriptor has stopped when the | ||
886 | * function returns. Furthermore it is possible the complete callback of a | ||
887 | * submitted transfer is still running when this function returns. | ||
888 | * | ||
889 | * dmaengine_synchronize() needs to be called before it is safe to free | ||
890 | * any memory that is accessed by previously submitted descriptors or before | ||
891 | * freeing any resources accessed from within the completion callback of any | ||
892 | * perviously submitted descriptors. | ||
893 | * | ||
894 | * This function can be called from atomic context as well as from within a | ||
895 | * complete callback of a descriptor submitted on the same channel. | ||
896 | * | ||
897 | * If none of the two conditions above apply consider using | ||
898 | * dmaengine_terminate_sync() instead. | ||
899 | */ | ||
900 | static inline int dmaengine_terminate_async(struct dma_chan *chan) | ||
901 | { | ||
902 | if (chan->device->device_terminate_all) | ||
903 | return chan->device->device_terminate_all(chan); | ||
904 | |||
905 | return -EINVAL; | ||
906 | } | ||
907 | |||
908 | /** | ||
909 | * dmaengine_synchronize() - Synchronize DMA channel termination | ||
910 | * @chan: The channel to synchronize | ||
911 | * | ||
912 | * Synchronizes to the DMA channel termination to the current context. When this | ||
913 | * function returns it is guaranteed that all transfers for previously issued | ||
914 | * descriptors have stopped and and it is safe to free the memory assoicated | ||
915 | * with them. Furthermore it is guaranteed that all complete callback functions | ||
916 | * for a previously submitted descriptor have finished running and it is safe to | ||
917 | * free resources accessed from within the complete callbacks. | ||
918 | * | ||
919 | * The behavior of this function is undefined if dma_async_issue_pending() has | ||
920 | * been called between dmaengine_terminate_async() and this function. | ||
921 | * | ||
922 | * This function must only be called from non-atomic context and must not be | ||
923 | * called from within a complete callback of a descriptor submitted on the same | ||
924 | * channel. | ||
925 | */ | ||
926 | static inline void dmaengine_synchronize(struct dma_chan *chan) | ||
927 | { | ||
928 | might_sleep(); | ||
929 | |||
930 | if (chan->device->device_synchronize) | ||
931 | chan->device->device_synchronize(chan); | ||
932 | } | ||
933 | |||
934 | /** | ||
935 | * dmaengine_terminate_sync() - Terminate all active DMA transfers | ||
936 | * @chan: The channel for which to terminate the transfers | ||
937 | * | ||
938 | * Calling this function will terminate all active and pending transfers | ||
939 | * that have previously been submitted to the channel. It is similar to | ||
940 | * dmaengine_terminate_async() but guarantees that the DMA transfer has actually | ||
941 | * stopped and that all complete callbacks have finished running when the | ||
942 | * function returns. | ||
943 | * | ||
944 | * This function must only be called from non-atomic context and must not be | ||
945 | * called from within a complete callback of a descriptor submitted on the same | ||
946 | * channel. | ||
947 | */ | ||
948 | static inline int dmaengine_terminate_sync(struct dma_chan *chan) | ||
949 | { | ||
950 | int ret; | ||
951 | |||
952 | ret = dmaengine_terminate_async(chan); | ||
953 | if (ret) | ||
954 | return ret; | ||
955 | |||
956 | dmaengine_synchronize(chan); | ||
957 | |||
958 | return 0; | ||
959 | } | ||
960 | |||
839 | static inline int dmaengine_pause(struct dma_chan *chan) | 961 | static inline int dmaengine_pause(struct dma_chan *chan) |
840 | { | 962 | { |
841 | if (chan->device->device_pause) | 963 | if (chan->device->device_pause) |
@@ -1140,9 +1262,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | |||
1140 | void dma_issue_pending_all(void); | 1262 | void dma_issue_pending_all(void); |
1141 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | 1263 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
1142 | dma_filter_fn fn, void *fn_param); | 1264 | dma_filter_fn fn, void *fn_param); |
1143 | struct dma_chan *dma_request_slave_channel_reason(struct device *dev, | ||
1144 | const char *name); | ||
1145 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | 1265 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
1266 | |||
1267 | struct dma_chan *dma_request_chan(struct device *dev, const char *name); | ||
1268 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); | ||
1269 | |||
1146 | void dma_release_channel(struct dma_chan *chan); | 1270 | void dma_release_channel(struct dma_chan *chan); |
1147 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); | 1271 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); |
1148 | #else | 1272 | #else |
@@ -1166,16 +1290,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
1166 | { | 1290 | { |
1167 | return NULL; | 1291 | return NULL; |
1168 | } | 1292 | } |
1169 | static inline struct dma_chan *dma_request_slave_channel_reason( | ||
1170 | struct device *dev, const char *name) | ||
1171 | { | ||
1172 | return ERR_PTR(-ENODEV); | ||
1173 | } | ||
1174 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, | 1293 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, |
1175 | const char *name) | 1294 | const char *name) |
1176 | { | 1295 | { |
1177 | return NULL; | 1296 | return NULL; |
1178 | } | 1297 | } |
1298 | static inline struct dma_chan *dma_request_chan(struct device *dev, | ||
1299 | const char *name) | ||
1300 | { | ||
1301 | return ERR_PTR(-ENODEV); | ||
1302 | } | ||
1303 | static inline struct dma_chan *dma_request_chan_by_mask( | ||
1304 | const dma_cap_mask_t *mask) | ||
1305 | { | ||
1306 | return ERR_PTR(-ENODEV); | ||
1307 | } | ||
1179 | static inline void dma_release_channel(struct dma_chan *chan) | 1308 | static inline void dma_release_channel(struct dma_chan *chan) |
1180 | { | 1309 | { |
1181 | } | 1310 | } |
@@ -1186,6 +1315,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, | |||
1186 | } | 1315 | } |
1187 | #endif | 1316 | #endif |
1188 | 1317 | ||
1318 | #define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) | ||
1319 | |||
1189 | static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) | 1320 | static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) |
1190 | { | 1321 | { |
1191 | struct dma_slave_caps caps; | 1322 | struct dma_slave_caps caps; |
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 88fa8af2b937..1d99b61adc65 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h | |||
@@ -267,6 +267,9 @@ struct omap_dma_reg { | |||
267 | u8 type; | 267 | u8 type; |
268 | }; | 268 | }; |
269 | 269 | ||
270 | #define SDMA_FILTER_PARAM(hw_req) ((int[]) { (hw_req) }) | ||
271 | struct dma_slave_map; | ||
272 | |||
270 | /* System DMA platform data structure */ | 273 | /* System DMA platform data structure */ |
271 | struct omap_system_dma_plat_info { | 274 | struct omap_system_dma_plat_info { |
272 | const struct omap_dma_reg *reg_map; | 275 | const struct omap_dma_reg *reg_map; |
@@ -278,6 +281,9 @@ struct omap_system_dma_plat_info { | |||
278 | void (*clear_dma)(int lch); | 281 | void (*clear_dma)(int lch); |
279 | void (*dma_write)(u32 val, int reg, int lch); | 282 | void (*dma_write)(u32 val, int reg, int lch); |
280 | u32 (*dma_read)(int reg, int lch); | 283 | u32 (*dma_read)(int reg, int lch); |
284 | |||
285 | const struct dma_slave_map *slave_map; | ||
286 | int slavecnt; | ||
281 | }; | 287 | }; |
282 | 288 | ||
283 | #ifdef CONFIG_ARCH_OMAP2PLUS | 289 | #ifdef CONFIG_ARCH_OMAP2PLUS |
diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h deleted file mode 100644 index 648b8ea61a22..000000000000 --- a/include/linux/platform_data/dma-rcar-hpbdma.h +++ /dev/null | |||
@@ -1,103 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011-2013 Renesas Electronics Corporation | ||
3 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef __DMA_RCAR_HPBDMA_H | ||
11 | #define __DMA_RCAR_HPBDMA_H | ||
12 | |||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/types.h> | ||
15 | |||
16 | /* Transmit sizes and respective register values */ | ||
17 | enum { | ||
18 | XMIT_SZ_8BIT = 0, | ||
19 | XMIT_SZ_16BIT = 1, | ||
20 | XMIT_SZ_32BIT = 2, | ||
21 | XMIT_SZ_MAX | ||
22 | }; | ||
23 | |||
24 | /* DMA control register (DCR) bits */ | ||
25 | #define HPB_DMAE_DCR_DTAMD (1u << 26) | ||
26 | #define HPB_DMAE_DCR_DTAC (1u << 25) | ||
27 | #define HPB_DMAE_DCR_DTAU (1u << 24) | ||
28 | #define HPB_DMAE_DCR_DTAU1 (1u << 23) | ||
29 | #define HPB_DMAE_DCR_SWMD (1u << 22) | ||
30 | #define HPB_DMAE_DCR_BTMD (1u << 21) | ||
31 | #define HPB_DMAE_DCR_PKMD (1u << 20) | ||
32 | #define HPB_DMAE_DCR_CT (1u << 18) | ||
33 | #define HPB_DMAE_DCR_ACMD (1u << 17) | ||
34 | #define HPB_DMAE_DCR_DIP (1u << 16) | ||
35 | #define HPB_DMAE_DCR_SMDL (1u << 13) | ||
36 | #define HPB_DMAE_DCR_SPDAM (1u << 12) | ||
37 | #define HPB_DMAE_DCR_SDRMD_MASK (3u << 10) | ||
38 | #define HPB_DMAE_DCR_SDRMD_MOD (0u << 10) | ||
39 | #define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10) | ||
40 | #define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10) | ||
41 | #define HPB_DMAE_DCR_SPDS_MASK (3u << 8) | ||
42 | #define HPB_DMAE_DCR_SPDS_8BIT (0u << 8) | ||
43 | #define HPB_DMAE_DCR_SPDS_16BIT (1u << 8) | ||
44 | #define HPB_DMAE_DCR_SPDS_32BIT (2u << 8) | ||
45 | #define HPB_DMAE_DCR_DMDL (1u << 5) | ||
46 | #define HPB_DMAE_DCR_DPDAM (1u << 4) | ||
47 | #define HPB_DMAE_DCR_DDRMD_MASK (3u << 2) | ||
48 | #define HPB_DMAE_DCR_DDRMD_MOD (0u << 2) | ||
49 | #define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2) | ||
50 | #define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2) | ||
51 | #define HPB_DMAE_DCR_DPDS_MASK (3u << 0) | ||
52 | #define HPB_DMAE_DCR_DPDS_8BIT (0u << 0) | ||
53 | #define HPB_DMAE_DCR_DPDS_16BIT (1u << 0) | ||
54 | #define HPB_DMAE_DCR_DPDS_32BIT (2u << 0) | ||
55 | |||
56 | /* Asynchronous reset register (ASYNCRSTR) bits */ | ||
57 | #define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10) | ||
58 | #define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9) | ||
59 | #define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8) | ||
60 | #define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7) | ||
61 | #define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6) | ||
62 | #define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5) | ||
63 | #define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4) | ||
64 | #define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3) | ||
65 | #define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2) | ||
66 | #define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1) | ||
67 | #define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0) | ||
68 | |||
69 | struct hpb_dmae_slave_config { | ||
70 | unsigned int id; | ||
71 | dma_addr_t addr; | ||
72 | u32 dcr; | ||
73 | u32 port; | ||
74 | u32 rstr; | ||
75 | u32 mdr; | ||
76 | u32 mdm; | ||
77 | u32 flags; | ||
78 | #define HPB_DMAE_SET_ASYNC_RESET BIT(0) | ||
79 | #define HPB_DMAE_SET_ASYNC_MODE BIT(1) | ||
80 | u32 dma_ch; | ||
81 | }; | ||
82 | |||
83 | #define HPB_DMAE_CHANNEL(_irq, _s_id) \ | ||
84 | { \ | ||
85 | .ch_irq = _irq, \ | ||
86 | .s_id = _s_id, \ | ||
87 | } | ||
88 | |||
89 | struct hpb_dmae_channel { | ||
90 | unsigned int ch_irq; | ||
91 | unsigned int s_id; | ||
92 | }; | ||
93 | |||
94 | struct hpb_dmae_pdata { | ||
95 | const struct hpb_dmae_slave_config *slaves; | ||
96 | int num_slaves; | ||
97 | const struct hpb_dmae_channel *channels; | ||
98 | int num_channels; | ||
99 | const unsigned int ts_shift[XMIT_SZ_MAX]; | ||
100 | int num_hw_channels; | ||
101 | }; | ||
102 | |||
103 | #endif | ||
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 4299f4ba03bd..0a533f94438f 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -53,12 +53,16 @@ enum dma_event_q { | |||
53 | #define EDMA_CTLR(i) ((i) >> 16) | 53 | #define EDMA_CTLR(i) ((i) >> 16) |
54 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) | 54 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) |
55 | 55 | ||
56 | #define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) }) | ||
57 | |||
56 | struct edma_rsv_info { | 58 | struct edma_rsv_info { |
57 | 59 | ||
58 | const s16 (*rsv_chans)[2]; | 60 | const s16 (*rsv_chans)[2]; |
59 | const s16 (*rsv_slots)[2]; | 61 | const s16 (*rsv_slots)[2]; |
60 | }; | 62 | }; |
61 | 63 | ||
64 | struct dma_slave_map; | ||
65 | |||
62 | /* platform_data for EDMA driver */ | 66 | /* platform_data for EDMA driver */ |
63 | struct edma_soc_info { | 67 | struct edma_soc_info { |
64 | /* | 68 | /* |
@@ -76,6 +80,9 @@ struct edma_soc_info { | |||
76 | 80 | ||
77 | s8 (*queue_priority_mapping)[2]; | 81 | s8 (*queue_priority_mapping)[2]; |
78 | const s16 (*xbar_chans)[2]; | 82 | const s16 (*xbar_chans)[2]; |
83 | |||
84 | const struct dma_slave_map *slave_map; | ||
85 | int slavecnt; | ||
79 | }; | 86 | }; |
80 | 87 | ||
81 | #endif | 88 | #endif |
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c index fba365a78390..697c166acf05 100644 --- a/sound/core/pcm_dmaengine.c +++ b/sound/core/pcm_dmaengine.c | |||
@@ -202,13 +202,13 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
202 | if (runtime->info & SNDRV_PCM_INFO_PAUSE) | 202 | if (runtime->info & SNDRV_PCM_INFO_PAUSE) |
203 | dmaengine_pause(prtd->dma_chan); | 203 | dmaengine_pause(prtd->dma_chan); |
204 | else | 204 | else |
205 | dmaengine_terminate_all(prtd->dma_chan); | 205 | dmaengine_terminate_async(prtd->dma_chan); |
206 | break; | 206 | break; |
207 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 207 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
208 | dmaengine_pause(prtd->dma_chan); | 208 | dmaengine_pause(prtd->dma_chan); |
209 | break; | 209 | break; |
210 | case SNDRV_PCM_TRIGGER_STOP: | 210 | case SNDRV_PCM_TRIGGER_STOP: |
211 | dmaengine_terminate_all(prtd->dma_chan); | 211 | dmaengine_terminate_async(prtd->dma_chan); |
212 | break; | 212 | break; |
213 | default: | 213 | default: |
214 | return -EINVAL; | 214 | return -EINVAL; |
@@ -346,6 +346,7 @@ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) | |||
346 | { | 346 | { |
347 | struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); | 347 | struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); |
348 | 348 | ||
349 | dmaengine_synchronize(prtd->dma_chan); | ||
349 | kfree(prtd); | 350 | kfree(prtd); |
350 | 351 | ||
351 | return 0; | 352 | return 0; |
@@ -362,9 +363,11 @@ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) | |||
362 | { | 363 | { |
363 | struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); | 364 | struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); |
364 | 365 | ||
366 | dmaengine_synchronize(prtd->dma_chan); | ||
365 | dma_release_channel(prtd->dma_chan); | 367 | dma_release_channel(prtd->dma_chan); |
368 | kfree(prtd); | ||
366 | 369 | ||
367 | return snd_dmaengine_pcm_close(substream); | 370 | return 0; |
368 | } | 371 | } |
369 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); | 372 | EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); |
370 | 373 | ||