diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 14:47:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 14:47:18 -0400 |
commit | a0d3c7c5c07cfbe00ab89438ddf82482f5a99422 (patch) | |
tree | 560def78af776bef5d0d0202580c4be0fc6219c6 | |
parent | ec67b14c1be4ebe4cf08f06746a8d0313ab85432 (diff) | |
parent | f9114a54c1d828abbe87ac446a2da49d9720203f (diff) |
Merge tag 'dmaengine-4.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This time round the update brings in following changes:
- new tegra driver for ADMA device
- support for Xilinx AXI Direct Memory Access Engine and Xilinx AXI
Central Direct Memory Access Engine and few updates to this driver
- new cyclic capability to sun6i and few updates
- slave-sg support in bcm2835
- updates to many drivers like designware, hsu, mv_xor, pxa, edma,
qcom_hidma & bam"
* tag 'dmaengine-4.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (84 commits)
dmaengine: ioatdma: disable relaxed ordering for ioatdma
dmaengine: of_dma: approximate an average distribution
dmaengine: core: Use IS_ENABLED() instead of checking for built-in or module
dmaengine: edma: Re-evaluate errors when ccerr is triggered w/o error event
dmaengine: qcom_hidma: add support for object hierarchy
dmaengine: qcom_hidma: add debugfs hooks
dmaengine: qcom_hidma: implement lower level hardware interface
dmaengine: vdma: Add clock support
Documentation: DT: vdma: Add clock support for dmas
dmaengine: vdma: Add config structure to differentiate dmas
MAINTAINERS: Update Tegra DMA maintainers
dmaengine: tegra-adma: Add support for Tegra210 ADMA
Documentation: DT: Add binding documentation for NVIDIA ADMA
dmaengine: vdma: Add Support for Xilinx AXI Central Direct Memory Access Engine
Documentation: DT: vdma: update binding doc for AXI CDMA
dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine
Documentation: DT: vdma: update binding doc for AXI DMA
dmaengine: vdma: Rename xilinx_vdma_ prefix to xilinx_dma
dmaengine: slave means at least one of DMA_SLAVE, DMA_CYCLIC
dmaengine: mv_xor: Allow selecting mv_xor for mvebu only compatible SoC
...
53 files changed, 4959 insertions, 1114 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-hidma b/Documentation/ABI/testing/sysfs-platform-hidma new file mode 100644 index 000000000000..d36441538660 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-hidma | |||
@@ -0,0 +1,9 @@ | |||
1 | What: /sys/devices/platform/hidma-*/chid | ||
2 | /sys/devices/platform/QCOM8061:*/chid | ||
3 | Date: Dec 2015 | ||
4 | KernelVersion: 4.4 | ||
5 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
6 | Description: | ||
7 | Contains the ID of the channel within the HIDMA instance. | ||
8 | It is used to associate a given HIDMA channel with the | ||
9 | priority and weight calls in the management interface. | ||
diff --git a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt index 1396078d15ac..baf9b34d20bf 100644 --- a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt +++ b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt | |||
@@ -12,6 +12,10 @@ Required properties: | |||
12 | - reg: Should contain DMA registers location and length. | 12 | - reg: Should contain DMA registers location and length. |
13 | - interrupts: Should contain the DMA interrupts associated | 13 | - interrupts: Should contain the DMA interrupts associated |
14 | to the DMA channels in ascending order. | 14 | to the DMA channels in ascending order. |
15 | - interrupt-names: Should contain the names of the interrupt | ||
16 | in the form "dmaXX". | ||
17 | Use "dma-shared-all" for the common interrupt line | ||
18 | that is shared by all dma channels. | ||
15 | - #dma-cells: Must be <1>, the cell in the dmas property of the | 19 | - #dma-cells: Must be <1>, the cell in the dmas property of the |
16 | client device represents the DREQ number. | 20 | client device represents the DREQ number. |
17 | - brcm,dma-channel-mask: Bit mask representing the channels | 21 | - brcm,dma-channel-mask: Bit mask representing the channels |
@@ -34,13 +38,35 @@ dma: dma@7e007000 { | |||
34 | <1 24>, | 38 | <1 24>, |
35 | <1 25>, | 39 | <1 25>, |
36 | <1 26>, | 40 | <1 26>, |
41 | /* dma channel 11-14 share one irq */ | ||
37 | <1 27>, | 42 | <1 27>, |
43 | <1 27>, | ||
44 | <1 27>, | ||
45 | <1 27>, | ||
46 | /* unused shared irq for all channels */ | ||
38 | <1 28>; | 47 | <1 28>; |
48 | interrupt-names = "dma0", | ||
49 | "dma1", | ||
50 | "dma2", | ||
51 | "dma3", | ||
52 | "dma4", | ||
53 | "dma5", | ||
54 | "dma6", | ||
55 | "dma7", | ||
56 | "dma8", | ||
57 | "dma9", | ||
58 | "dma10", | ||
59 | "dma11", | ||
60 | "dma12", | ||
61 | "dma13", | ||
62 | "dma14", | ||
63 | "dma-shared-all"; | ||
39 | 64 | ||
40 | #dma-cells = <1>; | 65 | #dma-cells = <1>; |
41 | brcm,dma-channel-mask = <0x7f35>; | 66 | brcm,dma-channel-mask = <0x7f35>; |
42 | }; | 67 | }; |
43 | 68 | ||
69 | |||
44 | DMA clients connected to the BCM2835 DMA controller must use the format | 70 | DMA clients connected to the BCM2835 DMA controller must use the format |
45 | described in the dma.txt file, using a two-cell specifier for each channel. | 71 | described in the dma.txt file, using a two-cell specifier for each channel. |
46 | 72 | ||
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt index 276ef815ef32..c075f5988135 100644 --- a/Documentation/devicetree/bindings/dma/mv-xor.txt +++ b/Documentation/devicetree/bindings/dma/mv-xor.txt | |||
@@ -1,7 +1,10 @@ | |||
1 | * Marvell XOR engines | 1 | * Marvell XOR engines |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: Should be "marvell,orion-xor" or "marvell,armada-380-xor" | 4 | - compatible: Should be one of the following: |
5 | - "marvell,orion-xor" | ||
6 | - "marvell,armada-380-xor" | ||
7 | - "marvell,armada-3700-xor". | ||
5 | - reg: Should contain registers location and length (two sets) | 8 | - reg: Should contain registers location and length (two sets) |
6 | the first set is the low registers, the second set the high | 9 | the first set is the low registers, the second set the high |
7 | registers for the XOR engine. | 10 | registers for the XOR engine. |
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt new file mode 100644 index 000000000000..1e1dc8f972e4 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt | |||
@@ -0,0 +1,55 @@ | |||
1 | * NVIDIA Tegra Audio DMA (ADMA) controller | ||
2 | |||
3 | The Tegra Audio DMA controller that is used for transferring data | ||
4 | between system memory and the Audio Processing Engine (APE). | ||
5 | |||
6 | Required properties: | ||
7 | - compatible: Must be "nvidia,tegra210-adma". | ||
8 | - reg: Should contain DMA registers location and length. This should be | ||
9 | a single entry that includes all of the per-channel registers in one | ||
10 | contiguous bank. | ||
11 | - interrupt-parent: Phandle to the interrupt parent controller. | ||
12 | - interrupts: Should contain all of the per-channel DMA interrupts in | ||
13 | ascending order with respect to the DMA channel index. | ||
14 | - clocks: Must contain one entry for the ADMA module clock | ||
15 | (TEGRA210_CLK_D_AUDIO). | ||
16 | - clock-names: Must contain the name "d_audio" for the corresponding | ||
17 | 'clocks' entry. | ||
18 | - #dma-cells : Must be 1. The first cell denotes the receive/transmit | ||
19 | request number and should be between 1 and the maximum number of | ||
20 | requests supported. This value corresponds to the RX/TX_REQUEST_SELECT | ||
21 | fields in the ADMA_CHn_CTRL register. | ||
22 | |||
23 | |||
24 | Example: | ||
25 | |||
26 | adma: dma@702e2000 { | ||
27 | compatible = "nvidia,tegra210-adma"; | ||
28 | reg = <0x0 0x702e2000 0x0 0x2000>; | ||
29 | interrupt-parent = <&tegra_agic>; | ||
30 | interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, | ||
31 | <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, | ||
32 | <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, | ||
33 | <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, | ||
34 | <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, | ||
35 | <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, | ||
36 | <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, | ||
37 | <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, | ||
38 | <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, | ||
39 | <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, | ||
40 | <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, | ||
41 | <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, | ||
42 | <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, | ||
43 | <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, | ||
44 | <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, | ||
45 | <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, | ||
46 | <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, | ||
47 | <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, | ||
48 | <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>, | ||
49 | <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>, | ||
50 | <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>, | ||
51 | <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; | ||
52 | clocks = <&tegra_car TEGRA210_CLK_D_AUDIO>; | ||
53 | clock-names = "d_audio"; | ||
54 | #dma-cells = <1>; | ||
55 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt index 1c9d48ea4914..9cbf5d9df8fd 100644 --- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt +++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt | |||
@@ -13,6 +13,8 @@ Required properties: | |||
13 | - clock-names: must contain "bam_clk" entry | 13 | - clock-names: must contain "bam_clk" entry |
14 | - qcom,ee : indicates the active Execution Environment identifier (0-7) used in | 14 | - qcom,ee : indicates the active Execution Environment identifier (0-7) used in |
15 | the secure world. | 15 | the secure world. |
16 | - qcom,controlled-remotely : optional, indicates that the bam is controlled by | ||
17 | remote proccessor i.e. execution environment. | ||
16 | 18 | ||
17 | Example: | 19 | Example: |
18 | 20 | ||
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index c261598164a7..0f5583293c9c 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt | |||
@@ -13,6 +13,11 @@ Required properties: | |||
13 | - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1: | 13 | - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1: |
14 | increase from chan n->0 | 14 | increase from chan n->0 |
15 | - block_size: Maximum block size supported by the controller | 15 | - block_size: Maximum block size supported by the controller |
16 | - data-width: Maximum data width supported by hardware per AHB master | ||
17 | (in bytes, power of 2) | ||
18 | |||
19 | |||
20 | Deprecated properties: | ||
16 | - data_width: Maximum data width supported by hardware per AHB master | 21 | - data_width: Maximum data width supported by hardware per AHB master |
17 | (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | 22 | (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) |
18 | 23 | ||
@@ -38,7 +43,7 @@ Example: | |||
38 | chan_allocation_order = <1>; | 43 | chan_allocation_order = <1>; |
39 | chan_priority = <1>; | 44 | chan_priority = <1>; |
40 | block_size = <0xfff>; | 45 | block_size = <0xfff>; |
41 | data_width = <3 3>; | 46 | data-width = <8 8>; |
42 | }; | 47 | }; |
43 | 48 | ||
44 | DMA clients connected to the Designware DMA controller must use the format | 49 | DMA clients connected to the Designware DMA controller must use the format |
@@ -47,8 +52,8 @@ The four cells in order are: | |||
47 | 52 | ||
48 | 1. A phandle pointing to the DMA controller | 53 | 1. A phandle pointing to the DMA controller |
49 | 2. The DMA request line number | 54 | 2. The DMA request line number |
50 | 3. Source master for transfers on allocated channel | 55 | 3. Memory master for transfers on allocated channel |
51 | 4. Destination master for transfers on allocated channel | 56 | 4. Peripheral master for transfers on allocated channel |
52 | 57 | ||
53 | Example: | 58 | Example: |
54 | 59 | ||
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt index e4c4d47f8137..a1f2683c49bf 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt | |||
@@ -3,18 +3,44 @@ It can be configured to have one channel or two channels. If configured | |||
3 | as two channels, one is to transmit to the video device and another is | 3 | as two channels, one is to transmit to the video device and another is |
4 | to receive from the video device. | 4 | to receive from the video device. |
5 | 5 | ||
6 | Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream | ||
7 | target devices. It can be configured to have one channel or two channels. | ||
8 | If configured as two channels, one is to transmit to the device and another | ||
9 | is to receive from the device. | ||
10 | |||
11 | Xilinx AXI CDMA engine, it does transfers between memory-mapped source | ||
12 | address and a memory-mapped destination address. | ||
13 | |||
6 | Required properties: | 14 | Required properties: |
7 | - compatible: Should be "xlnx,axi-vdma-1.00.a" | 15 | - compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or |
16 | "xlnx,axi-cdma-1.00.a"" | ||
8 | - #dma-cells: Should be <1>, see "dmas" property below | 17 | - #dma-cells: Should be <1>, see "dmas" property below |
9 | - reg: Should contain VDMA registers location and length. | 18 | - reg: Should contain VDMA registers location and length. |
10 | - xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. | 19 | - xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits). |
20 | - dma-ranges: Should be as the following <dma_addr cpu_addr max_len>. | ||
11 | - dma-channel child node: Should have at least one channel and can have up to | 21 | - dma-channel child node: Should have at least one channel and can have up to |
12 | two channels per device. This node specifies the properties of each | 22 | two channels per device. This node specifies the properties of each |
13 | DMA channel (see child node properties below). | 23 | DMA channel (see child node properties below). |
24 | - clocks: Input clock specifier. Refer to common clock bindings. | ||
25 | - clock-names: List of input clocks | ||
26 | For VDMA: | ||
27 | Required elements: "s_axi_lite_aclk" | ||
28 | Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk", | ||
29 | "m_axis_mm2s_aclk", "s_axis_s2mm_aclk" | ||
30 | For CDMA: | ||
31 | Required elements: "s_axi_lite_aclk", "m_axi_aclk" | ||
32 | FOR AXIDMA: | ||
33 | Required elements: "s_axi_lite_aclk" | ||
34 | Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", | ||
35 | "m_axi_sg_aclk" | ||
36 | |||
37 | Required properties for VDMA: | ||
38 | - xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. | ||
14 | 39 | ||
15 | Optional properties: | 40 | Optional properties: |
16 | - xlnx,include-sg: Tells configured for Scatter-mode in | 41 | - xlnx,include-sg: Tells configured for Scatter-mode in |
17 | the hardware. | 42 | the hardware. |
43 | Optional properties for VDMA: | ||
18 | - xlnx,flush-fsync: Tells which channel to Flush on Frame sync. | 44 | - xlnx,flush-fsync: Tells which channel to Flush on Frame sync. |
19 | It takes following values: | 45 | It takes following values: |
20 | {1}, flush both channels | 46 | {1}, flush both channels |
@@ -31,6 +57,7 @@ Required child node properties: | |||
31 | Optional child node properties: | 57 | Optional child node properties: |
32 | - xlnx,include-dre: Tells hardware is configured for Data | 58 | - xlnx,include-dre: Tells hardware is configured for Data |
33 | Realignment Engine. | 59 | Realignment Engine. |
60 | Optional child node properties for VDMA: | ||
34 | - xlnx,genlock-mode: Tells Genlock synchronization is | 61 | - xlnx,genlock-mode: Tells Genlock synchronization is |
35 | enabled/disabled in hardware. | 62 | enabled/disabled in hardware. |
36 | 63 | ||
@@ -41,8 +68,13 @@ axi_vdma_0: axivdma@40030000 { | |||
41 | compatible = "xlnx,axi-vdma-1.00.a"; | 68 | compatible = "xlnx,axi-vdma-1.00.a"; |
42 | #dma_cells = <1>; | 69 | #dma_cells = <1>; |
43 | reg = < 0x40030000 0x10000 >; | 70 | reg = < 0x40030000 0x10000 >; |
71 | dma-ranges = <0x00000000 0x00000000 0x40000000>; | ||
44 | xlnx,num-fstores = <0x8>; | 72 | xlnx,num-fstores = <0x8>; |
45 | xlnx,flush-fsync = <0x1>; | 73 | xlnx,flush-fsync = <0x1>; |
74 | xlnx,addrwidth = <0x20>; | ||
75 | clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>; | ||
76 | clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", | ||
77 | "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"; | ||
46 | dma-channel@40030000 { | 78 | dma-channel@40030000 { |
47 | compatible = "xlnx,axi-vdma-mm2s-channel"; | 79 | compatible = "xlnx,axi-vdma-mm2s-channel"; |
48 | interrupts = < 0 54 4 >; | 80 | interrupts = < 0 54 4 >; |
diff --git a/MAINTAINERS b/MAINTAINERS index ef8a56e04af6..f825014a082a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -11017,10 +11017,11 @@ M: Prashant Gaikwad <pgaikwad@nvidia.com> | |||
11017 | S: Supported | 11017 | S: Supported |
11018 | F: drivers/clk/tegra/ | 11018 | F: drivers/clk/tegra/ |
11019 | 11019 | ||
11020 | TEGRA DMA DRIVER | 11020 | TEGRA DMA DRIVERS |
11021 | M: Laxman Dewangan <ldewangan@nvidia.com> | 11021 | M: Laxman Dewangan <ldewangan@nvidia.com> |
11022 | M: Jon Hunter <jonathanh@nvidia.com> | ||
11022 | S: Supported | 11023 | S: Supported |
11023 | F: drivers/dma/tegra20-apb-dma.c | 11024 | F: drivers/dma/tegra* |
11024 | 11025 | ||
11025 | TEGRA I2C DRIVER | 11026 | TEGRA I2C DRIVER |
11026 | M: Laxman Dewangan <ldewangan@nvidia.com> | 11027 | M: Laxman Dewangan <ldewangan@nvidia.com> |
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi index 663671f22680..de53f5c3251c 100644 --- a/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi | |||
@@ -126,7 +126,7 @@ | |||
126 | chan_allocation_order = <0>; | 126 | chan_allocation_order = <0>; |
127 | chan_priority = <1>; | 127 | chan_priority = <1>; |
128 | block_size = <0x7ff>; | 128 | block_size = <0x7ff>; |
129 | data_width = <2>; | 129 | data-width = <4>; |
130 | clocks = <&ahb_clk>; | 130 | clocks = <&ahb_clk>; |
131 | clock-names = "hclk"; | 131 | clock-names = "hclk"; |
132 | }; | 132 | }; |
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 31cc2f2ef040..10b27b912bac 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi | |||
@@ -48,9 +48,29 @@ | |||
48 | <1 24>, | 48 | <1 24>, |
49 | <1 25>, | 49 | <1 25>, |
50 | <1 26>, | 50 | <1 26>, |
51 | /* dma channel 11-14 share one irq */ | ||
51 | <1 27>, | 52 | <1 27>, |
53 | <1 27>, | ||
54 | <1 27>, | ||
55 | <1 27>, | ||
56 | /* unused shared irq for all channels */ | ||
52 | <1 28>; | 57 | <1 28>; |
53 | 58 | interrupt-names = "dma0", | |
59 | "dma1", | ||
60 | "dma2", | ||
61 | "dma3", | ||
62 | "dma4", | ||
63 | "dma5", | ||
64 | "dma6", | ||
65 | "dma7", | ||
66 | "dma8", | ||
67 | "dma9", | ||
68 | "dma10", | ||
69 | "dma11", | ||
70 | "dma12", | ||
71 | "dma13", | ||
72 | "dma14", | ||
73 | "dma-shared-all"; | ||
54 | #dma-cells = <1>; | 74 | #dma-cells = <1>; |
55 | brcm,dma-channel-mask = <0x7f35>; | 75 | brcm,dma-channel-mask = <0x7f35>; |
56 | }; | 76 | }; |
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 14594ce8c18a..449acf0d8272 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi | |||
@@ -117,7 +117,7 @@ | |||
117 | chan_priority = <1>; | 117 | chan_priority = <1>; |
118 | block_size = <0xfff>; | 118 | block_size = <0xfff>; |
119 | dma-masters = <2>; | 119 | dma-masters = <2>; |
120 | data_width = <3 3>; | 120 | data-width = <8 8>; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | dma@eb000000 { | 123 | dma@eb000000 { |
@@ -133,7 +133,7 @@ | |||
133 | chan_allocation_order = <1>; | 133 | chan_allocation_order = <1>; |
134 | chan_priority = <1>; | 134 | chan_priority = <1>; |
135 | block_size = <0xfff>; | 135 | block_size = <0xfff>; |
136 | data_width = <3 3>; | 136 | data-width = <8 8>; |
137 | }; | 137 | }; |
138 | 138 | ||
139 | fsmc: flash@b0000000 { | 139 | fsmc: flash@b0000000 { |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index bf445aa48282..00d6dcc1d9b6 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -1365,8 +1365,8 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | |||
1365 | slave->dma_dev = &dw_dmac0_device.dev; | 1365 | slave->dma_dev = &dw_dmac0_device.dev; |
1366 | slave->src_id = 0; | 1366 | slave->src_id = 0; |
1367 | slave->dst_id = 1; | 1367 | slave->dst_id = 1; |
1368 | slave->src_master = 1; | 1368 | slave->m_master = 1; |
1369 | slave->dst_master = 0; | 1369 | slave->p_master = 0; |
1370 | 1370 | ||
1371 | data->dma_slave = slave; | 1371 | data->dma_slave = slave; |
1372 | data->dma_filter = at32_mci_dma_filter; | 1372 | data->dma_filter = at32_mci_dma_filter; |
@@ -2061,16 +2061,16 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
2061 | if (flags & AC97C_CAPTURE) { | 2061 | if (flags & AC97C_CAPTURE) { |
2062 | rx_dws->dma_dev = &dw_dmac0_device.dev; | 2062 | rx_dws->dma_dev = &dw_dmac0_device.dev; |
2063 | rx_dws->src_id = 3; | 2063 | rx_dws->src_id = 3; |
2064 | rx_dws->src_master = 0; | 2064 | rx_dws->m_master = 0; |
2065 | rx_dws->dst_master = 1; | 2065 | rx_dws->p_master = 1; |
2066 | } | 2066 | } |
2067 | 2067 | ||
2068 | /* Check if DMA slave interface for playback should be configured. */ | 2068 | /* Check if DMA slave interface for playback should be configured. */ |
2069 | if (flags & AC97C_PLAYBACK) { | 2069 | if (flags & AC97C_PLAYBACK) { |
2070 | tx_dws->dma_dev = &dw_dmac0_device.dev; | 2070 | tx_dws->dma_dev = &dw_dmac0_device.dev; |
2071 | tx_dws->dst_id = 4; | 2071 | tx_dws->dst_id = 4; |
2072 | tx_dws->src_master = 0; | 2072 | tx_dws->m_master = 0; |
2073 | tx_dws->dst_master = 1; | 2073 | tx_dws->p_master = 1; |
2074 | } | 2074 | } |
2075 | 2075 | ||
2076 | if (platform_device_add_data(pdev, data, | 2076 | if (platform_device_add_data(pdev, data, |
@@ -2141,8 +2141,8 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) | |||
2141 | 2141 | ||
2142 | dws->dma_dev = &dw_dmac0_device.dev; | 2142 | dws->dma_dev = &dw_dmac0_device.dev; |
2143 | dws->dst_id = 2; | 2143 | dws->dst_id = 2; |
2144 | dws->src_master = 0; | 2144 | dws->m_master = 0; |
2145 | dws->dst_master = 1; | 2145 | dws->p_master = 1; |
2146 | 2146 | ||
2147 | if (platform_device_add_data(pdev, data, | 2147 | if (platform_device_add_data(pdev, data, |
2148 | sizeof(struct atmel_abdac_pdata))) | 2148 | sizeof(struct atmel_abdac_pdata))) |
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 902034991517..2cb6f7e04b5c 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -201,8 +201,8 @@ static struct sata_dwc_host_priv host_pvt; | |||
201 | static struct dw_dma_slave sata_dwc_dma_dws = { | 201 | static struct dw_dma_slave sata_dwc_dma_dws = { |
202 | .src_id = 0, | 202 | .src_id = 0, |
203 | .dst_id = 0, | 203 | .dst_id = 0, |
204 | .src_master = 0, | 204 | .m_master = 1, |
205 | .dst_master = 1, | 205 | .p_master = 0, |
206 | }; | 206 | }; |
207 | 207 | ||
208 | /* | 208 | /* |
@@ -1248,7 +1248,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1248 | hsdev->dma->dev = &ofdev->dev; | 1248 | hsdev->dma->dev = &ofdev->dev; |
1249 | 1249 | ||
1250 | /* Initialize AHB DMAC */ | 1250 | /* Initialize AHB DMAC */ |
1251 | err = dw_dma_probe(hsdev->dma, NULL); | 1251 | err = dw_dma_probe(hsdev->dma); |
1252 | if (err) | 1252 | if (err) |
1253 | goto error_dma_iomap; | 1253 | goto error_dma_iomap; |
1254 | 1254 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d96d87c56f2e..8c98779a12b1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -332,7 +332,7 @@ config MPC512X_DMA | |||
332 | 332 | ||
333 | config MV_XOR | 333 | config MV_XOR |
334 | bool "Marvell XOR engine support" | 334 | bool "Marvell XOR engine support" |
335 | depends on PLAT_ORION | 335 | depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST |
336 | select DMA_ENGINE | 336 | select DMA_ENGINE |
337 | select DMA_ENGINE_RAID | 337 | select DMA_ENGINE_RAID |
338 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 338 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
@@ -467,6 +467,20 @@ config TEGRA20_APB_DMA | |||
467 | This DMA controller transfers data from memory to peripheral fifo | 467 | This DMA controller transfers data from memory to peripheral fifo |
468 | or vice versa. It does not support memory to memory data transfer. | 468 | or vice versa. It does not support memory to memory data transfer. |
469 | 469 | ||
470 | config TEGRA210_ADMA | ||
471 | bool "NVIDIA Tegra210 ADMA support" | ||
472 | depends on ARCH_TEGRA_210_SOC | ||
473 | select DMA_ENGINE | ||
474 | select DMA_VIRTUAL_CHANNELS | ||
475 | select PM_CLK | ||
476 | help | ||
477 | Support for the NVIDIA Tegra210 ADMA controller driver. The | ||
478 | DMA controller has multiple DMA channels and is used to service | ||
479 | various audio clients in the Tegra210 audio processing engine | ||
480 | (APE). This DMA controller transfers data from memory to | ||
481 | peripheral and vice versa. It does not support memory to | ||
482 | memory data transfer. | ||
483 | |||
470 | config TIMB_DMA | 484 | config TIMB_DMA |
471 | tristate "Timberdale FPGA DMA support" | 485 | tristate "Timberdale FPGA DMA support" |
472 | depends on MFD_TIMBERDALE | 486 | depends on MFD_TIMBERDALE |
@@ -507,7 +521,7 @@ config XGENE_DMA | |||
507 | 521 | ||
508 | config XILINX_VDMA | 522 | config XILINX_VDMA |
509 | tristate "Xilinx AXI VDMA Engine" | 523 | tristate "Xilinx AXI VDMA Engine" |
510 | depends on (ARCH_ZYNQ || MICROBLAZE) | 524 | depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) |
511 | select DMA_ENGINE | 525 | select DMA_ENGINE |
512 | help | 526 | help |
513 | Enable support for Xilinx AXI VDMA Soft IP. | 527 | Enable support for Xilinx AXI VDMA Soft IP. |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 6084127c1486..614f28b0b739 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -59,6 +59,7 @@ obj-$(CONFIG_STM32_DMA) += stm32-dma.o | |||
59 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o | 59 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o |
60 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 60 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
61 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 61 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
62 | obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o | ||
62 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 63 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
63 | obj-$(CONFIG_TI_CPPI41) += cppi41.o | 64 | obj-$(CONFIG_TI_CPPI41) += cppi41.o |
64 | obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o | 65 | obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 9b42c0588550..81db1c4811ce 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -107,16 +107,20 @@ struct pl08x_driver_data; | |||
107 | /** | 107 | /** |
108 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives | 108 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives |
109 | * @channels: the number of channels available in this variant | 109 | * @channels: the number of channels available in this variant |
110 | * @signals: the number of request signals available from the hardware | ||
110 | * @dualmaster: whether this version supports dual AHB masters or not. | 111 | * @dualmaster: whether this version supports dual AHB masters or not. |
111 | * @nomadik: whether the channels have Nomadik security extension bits | 112 | * @nomadik: whether the channels have Nomadik security extension bits |
112 | * that need to be checked for permission before use and some registers are | 113 | * that need to be checked for permission before use and some registers are |
113 | * missing | 114 | * missing |
114 | * @pl080s: whether this version is a PL080S, which has separate register and | 115 | * @pl080s: whether this version is a PL080S, which has separate register and |
115 | * LLI word for transfer size. | 116 | * LLI word for transfer size. |
117 | * @max_transfer_size: the maximum single element transfer size for this | ||
118 | * PL08x variant. | ||
116 | */ | 119 | */ |
117 | struct vendor_data { | 120 | struct vendor_data { |
118 | u8 config_offset; | 121 | u8 config_offset; |
119 | u8 channels; | 122 | u8 channels; |
123 | u8 signals; | ||
120 | bool dualmaster; | 124 | bool dualmaster; |
121 | bool nomadik; | 125 | bool nomadik; |
122 | bool pl080s; | 126 | bool pl080s; |
@@ -235,7 +239,7 @@ struct pl08x_dma_chan { | |||
235 | struct virt_dma_chan vc; | 239 | struct virt_dma_chan vc; |
236 | struct pl08x_phy_chan *phychan; | 240 | struct pl08x_phy_chan *phychan; |
237 | const char *name; | 241 | const char *name; |
238 | const struct pl08x_channel_data *cd; | 242 | struct pl08x_channel_data *cd; |
239 | struct dma_slave_config cfg; | 243 | struct dma_slave_config cfg; |
240 | struct pl08x_txd *at; | 244 | struct pl08x_txd *at; |
241 | struct pl08x_driver_data *host; | 245 | struct pl08x_driver_data *host; |
@@ -1909,6 +1913,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1909 | 1913 | ||
1910 | if (slave) { | 1914 | if (slave) { |
1911 | chan->cd = &pl08x->pd->slave_channels[i]; | 1915 | chan->cd = &pl08x->pd->slave_channels[i]; |
1916 | /* | ||
1917 | * Some implementations have muxed signals, whereas some | ||
1918 | * use a mux in front of the signals and need dynamic | ||
1919 | * assignment of signals. | ||
1920 | */ | ||
1921 | chan->signal = i; | ||
1912 | pl08x_dma_slave_init(chan); | 1922 | pl08x_dma_slave_init(chan); |
1913 | } else { | 1923 | } else { |
1914 | chan->cd = &pl08x->pd->memcpy_channel; | 1924 | chan->cd = &pl08x->pd->memcpy_channel; |
@@ -2050,40 +2060,33 @@ static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, | |||
2050 | struct of_dma *ofdma) | 2060 | struct of_dma *ofdma) |
2051 | { | 2061 | { |
2052 | struct pl08x_driver_data *pl08x = ofdma->of_dma_data; | 2062 | struct pl08x_driver_data *pl08x = ofdma->of_dma_data; |
2053 | struct pl08x_channel_data *data; | ||
2054 | struct pl08x_dma_chan *chan; | ||
2055 | struct dma_chan *dma_chan; | 2063 | struct dma_chan *dma_chan; |
2064 | struct pl08x_dma_chan *plchan; | ||
2056 | 2065 | ||
2057 | if (!pl08x) | 2066 | if (!pl08x) |
2058 | return NULL; | 2067 | return NULL; |
2059 | 2068 | ||
2060 | if (dma_spec->args_count != 2) | 2069 | if (dma_spec->args_count != 2) { |
2070 | dev_err(&pl08x->adev->dev, | ||
2071 | "DMA channel translation requires two cells\n"); | ||
2061 | return NULL; | 2072 | return NULL; |
2073 | } | ||
2062 | 2074 | ||
2063 | dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); | 2075 | dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); |
2064 | if (dma_chan) | 2076 | if (!dma_chan) { |
2065 | return dma_get_slave_channel(dma_chan); | 2077 | dev_err(&pl08x->adev->dev, |
2066 | 2078 | "DMA slave channel not found\n"); | |
2067 | chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data), | ||
2068 | GFP_KERNEL); | ||
2069 | if (!chan) | ||
2070 | return NULL; | 2079 | return NULL; |
2080 | } | ||
2071 | 2081 | ||
2072 | data = (void *)&chan[1]; | 2082 | plchan = to_pl08x_chan(dma_chan); |
2073 | data->bus_id = "(none)"; | 2083 | dev_dbg(&pl08x->adev->dev, |
2074 | data->periph_buses = dma_spec->args[1]; | 2084 | "translated channel for signal %d\n", |
2075 | 2085 | dma_spec->args[0]); | |
2076 | chan->cd = data; | ||
2077 | chan->host = pl08x; | ||
2078 | chan->slave = true; | ||
2079 | chan->name = data->bus_id; | ||
2080 | chan->state = PL08X_CHAN_IDLE; | ||
2081 | chan->signal = dma_spec->args[0]; | ||
2082 | chan->vc.desc_free = pl08x_desc_free; | ||
2083 | |||
2084 | vchan_init(&chan->vc, &pl08x->slave); | ||
2085 | 2086 | ||
2086 | return dma_get_slave_channel(&chan->vc.chan); | 2087 | /* Augment channel data for applicable AHB buses */ |
2088 | plchan->cd->periph_buses = dma_spec->args[1]; | ||
2089 | return dma_get_slave_channel(dma_chan); | ||
2087 | } | 2090 | } |
2088 | 2091 | ||
2089 | static int pl08x_of_probe(struct amba_device *adev, | 2092 | static int pl08x_of_probe(struct amba_device *adev, |
@@ -2091,9 +2094,11 @@ static int pl08x_of_probe(struct amba_device *adev, | |||
2091 | struct device_node *np) | 2094 | struct device_node *np) |
2092 | { | 2095 | { |
2093 | struct pl08x_platform_data *pd; | 2096 | struct pl08x_platform_data *pd; |
2097 | struct pl08x_channel_data *chanp = NULL; | ||
2094 | u32 cctl_memcpy = 0; | 2098 | u32 cctl_memcpy = 0; |
2095 | u32 val; | 2099 | u32 val; |
2096 | int ret; | 2100 | int ret; |
2101 | int i; | ||
2097 | 2102 | ||
2098 | pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); | 2103 | pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); |
2099 | if (!pd) | 2104 | if (!pd) |
@@ -2195,6 +2200,27 @@ static int pl08x_of_probe(struct amba_device *adev, | |||
2195 | /* Use the buses that can access memory, obviously */ | 2200 | /* Use the buses that can access memory, obviously */ |
2196 | pd->memcpy_channel.periph_buses = pd->mem_buses; | 2201 | pd->memcpy_channel.periph_buses = pd->mem_buses; |
2197 | 2202 | ||
2203 | /* | ||
2204 | * Allocate channel data for all possible slave channels (one | ||
2205 | * for each possible signal), channels will then be allocated | ||
2206 | * for a device and have it's AHB interfaces set up at | ||
2207 | * translation time. | ||
2208 | */ | ||
2209 | chanp = devm_kcalloc(&adev->dev, | ||
2210 | pl08x->vd->signals, | ||
2211 | sizeof(struct pl08x_channel_data), | ||
2212 | GFP_KERNEL); | ||
2213 | if (!chanp) | ||
2214 | return -ENOMEM; | ||
2215 | |||
2216 | pd->slave_channels = chanp; | ||
2217 | for (i = 0; i < pl08x->vd->signals; i++) { | ||
2218 | /* chanp->periph_buses will be assigned at translation */ | ||
2219 | chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); | ||
2220 | chanp++; | ||
2221 | } | ||
2222 | pd->num_slave_channels = pl08x->vd->signals; | ||
2223 | |||
2198 | pl08x->pd = pd; | 2224 | pl08x->pd = pd; |
2199 | 2225 | ||
2200 | return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, | 2226 | return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, |
@@ -2234,6 +2260,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2234 | goto out_no_pl08x; | 2260 | goto out_no_pl08x; |
2235 | } | 2261 | } |
2236 | 2262 | ||
2263 | /* Assign useful pointers to the driver state */ | ||
2264 | pl08x->adev = adev; | ||
2265 | pl08x->vd = vd; | ||
2266 | |||
2237 | /* Initialize memcpy engine */ | 2267 | /* Initialize memcpy engine */ |
2238 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | 2268 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); |
2239 | pl08x->memcpy.dev = &adev->dev; | 2269 | pl08x->memcpy.dev = &adev->dev; |
@@ -2284,10 +2314,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2284 | } | 2314 | } |
2285 | } | 2315 | } |
2286 | 2316 | ||
2287 | /* Assign useful pointers to the driver state */ | ||
2288 | pl08x->adev = adev; | ||
2289 | pl08x->vd = vd; | ||
2290 | |||
2291 | /* By default, AHB1 only. If dualmaster, from platform */ | 2317 | /* By default, AHB1 only. If dualmaster, from platform */ |
2292 | pl08x->lli_buses = PL08X_AHB1; | 2318 | pl08x->lli_buses = PL08X_AHB1; |
2293 | pl08x->mem_buses = PL08X_AHB1; | 2319 | pl08x->mem_buses = PL08X_AHB1; |
@@ -2438,6 +2464,7 @@ out_no_pl08x: | |||
2438 | static struct vendor_data vendor_pl080 = { | 2464 | static struct vendor_data vendor_pl080 = { |
2439 | .config_offset = PL080_CH_CONFIG, | 2465 | .config_offset = PL080_CH_CONFIG, |
2440 | .channels = 8, | 2466 | .channels = 8, |
2467 | .signals = 16, | ||
2441 | .dualmaster = true, | 2468 | .dualmaster = true, |
2442 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | 2469 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, |
2443 | }; | 2470 | }; |
@@ -2445,6 +2472,7 @@ static struct vendor_data vendor_pl080 = { | |||
2445 | static struct vendor_data vendor_nomadik = { | 2472 | static struct vendor_data vendor_nomadik = { |
2446 | .config_offset = PL080_CH_CONFIG, | 2473 | .config_offset = PL080_CH_CONFIG, |
2447 | .channels = 8, | 2474 | .channels = 8, |
2475 | .signals = 32, | ||
2448 | .dualmaster = true, | 2476 | .dualmaster = true, |
2449 | .nomadik = true, | 2477 | .nomadik = true, |
2450 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | 2478 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, |
@@ -2453,6 +2481,7 @@ static struct vendor_data vendor_nomadik = { | |||
2453 | static struct vendor_data vendor_pl080s = { | 2481 | static struct vendor_data vendor_pl080s = { |
2454 | .config_offset = PL080S_CH_CONFIG, | 2482 | .config_offset = PL080S_CH_CONFIG, |
2455 | .channels = 8, | 2483 | .channels = 8, |
2484 | .signals = 32, | ||
2456 | .pl080s = true, | 2485 | .pl080s = true, |
2457 | .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, | 2486 | .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, |
2458 | }; | 2487 | }; |
@@ -2460,6 +2489,7 @@ static struct vendor_data vendor_pl080s = { | |||
2460 | static struct vendor_data vendor_pl081 = { | 2489 | static struct vendor_data vendor_pl081 = { |
2461 | .config_offset = PL080_CH_CONFIG, | 2490 | .config_offset = PL080_CH_CONFIG, |
2462 | .channels = 2, | 2491 | .channels = 2, |
2492 | .signals = 16, | ||
2463 | .dualmaster = false, | 2493 | .dualmaster = false, |
2464 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | 2494 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, |
2465 | }; | 2495 | }; |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 996c4b00d323..6149b27c33ad 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -46,6 +46,9 @@ | |||
46 | 46 | ||
47 | #include "virt-dma.h" | 47 | #include "virt-dma.h" |
48 | 48 | ||
49 | #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14 | ||
50 | #define BCM2835_DMA_CHAN_NAME_SIZE 8 | ||
51 | |||
49 | struct bcm2835_dmadev { | 52 | struct bcm2835_dmadev { |
50 | struct dma_device ddev; | 53 | struct dma_device ddev; |
51 | spinlock_t lock; | 54 | spinlock_t lock; |
@@ -73,7 +76,6 @@ struct bcm2835_chan { | |||
73 | struct list_head node; | 76 | struct list_head node; |
74 | 77 | ||
75 | struct dma_slave_config cfg; | 78 | struct dma_slave_config cfg; |
76 | bool cyclic; | ||
77 | unsigned int dreq; | 79 | unsigned int dreq; |
78 | 80 | ||
79 | int ch; | 81 | int ch; |
@@ -82,6 +84,9 @@ struct bcm2835_chan { | |||
82 | 84 | ||
83 | void __iomem *chan_base; | 85 | void __iomem *chan_base; |
84 | int irq_number; | 86 | int irq_number; |
87 | unsigned int irq_flags; | ||
88 | |||
89 | bool is_lite_channel; | ||
85 | }; | 90 | }; |
86 | 91 | ||
87 | struct bcm2835_desc { | 92 | struct bcm2835_desc { |
@@ -89,47 +94,104 @@ struct bcm2835_desc { | |||
89 | struct virt_dma_desc vd; | 94 | struct virt_dma_desc vd; |
90 | enum dma_transfer_direction dir; | 95 | enum dma_transfer_direction dir; |
91 | 96 | ||
92 | struct bcm2835_cb_entry *cb_list; | ||
93 | |||
94 | unsigned int frames; | 97 | unsigned int frames; |
95 | size_t size; | 98 | size_t size; |
99 | |||
100 | bool cyclic; | ||
101 | |||
102 | struct bcm2835_cb_entry cb_list[]; | ||
96 | }; | 103 | }; |
97 | 104 | ||
98 | #define BCM2835_DMA_CS 0x00 | 105 | #define BCM2835_DMA_CS 0x00 |
99 | #define BCM2835_DMA_ADDR 0x04 | 106 | #define BCM2835_DMA_ADDR 0x04 |
107 | #define BCM2835_DMA_TI 0x08 | ||
100 | #define BCM2835_DMA_SOURCE_AD 0x0c | 108 | #define BCM2835_DMA_SOURCE_AD 0x0c |
101 | #define BCM2835_DMA_DEST_AD 0x10 | 109 | #define BCM2835_DMA_DEST_AD 0x10 |
102 | #define BCM2835_DMA_NEXTCB 0x1C | 110 | #define BCM2835_DMA_LEN 0x14 |
111 | #define BCM2835_DMA_STRIDE 0x18 | ||
112 | #define BCM2835_DMA_NEXTCB 0x1c | ||
113 | #define BCM2835_DMA_DEBUG 0x20 | ||
103 | 114 | ||
104 | /* DMA CS Control and Status bits */ | 115 | /* DMA CS Control and Status bits */ |
105 | #define BCM2835_DMA_ACTIVE BIT(0) | 116 | #define BCM2835_DMA_ACTIVE BIT(0) /* activate the DMA */ |
106 | #define BCM2835_DMA_INT BIT(2) | 117 | #define BCM2835_DMA_END BIT(1) /* current CB has ended */ |
118 | #define BCM2835_DMA_INT BIT(2) /* interrupt status */ | ||
119 | #define BCM2835_DMA_DREQ BIT(3) /* DREQ state */ | ||
107 | #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ | 120 | #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ |
108 | #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ | 121 | #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ |
109 | #define BCM2835_DMA_ERR BIT(8) | 122 | #define BCM2835_DMA_WAITING_FOR_WRITES BIT(6) /* waiting for last |
123 | * AXI-write to ack | ||
124 | */ | ||
125 | #define BCM2835_DMA_ERR BIT(8) | ||
126 | #define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16) /* AXI priority */ | ||
127 | #define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20) /* panic priority */ | ||
128 | /* current value of TI.BCM2835_DMA_WAIT_RESP */ | ||
129 | #define BCM2835_DMA_WAIT_FOR_WRITES BIT(28) | ||
130 | #define BCM2835_DMA_DIS_DEBUG BIT(29) /* disable debug pause signal */ | ||
110 | #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ | 131 | #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ |
111 | #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ | 132 | #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ |
112 | 133 | ||
134 | /* Transfer information bits - also bcm2835_cb.info field */ | ||
113 | #define BCM2835_DMA_INT_EN BIT(0) | 135 | #define BCM2835_DMA_INT_EN BIT(0) |
136 | #define BCM2835_DMA_TDMODE BIT(1) /* 2D-Mode */ | ||
137 | #define BCM2835_DMA_WAIT_RESP BIT(3) /* wait for AXI-write to be acked */ | ||
114 | #define BCM2835_DMA_D_INC BIT(4) | 138 | #define BCM2835_DMA_D_INC BIT(4) |
115 | #define BCM2835_DMA_D_DREQ BIT(6) | 139 | #define BCM2835_DMA_D_WIDTH BIT(5) /* 128bit writes if set */ |
140 | #define BCM2835_DMA_D_DREQ BIT(6) /* enable DREQ for destination */ | ||
141 | #define BCM2835_DMA_D_IGNORE BIT(7) /* ignore destination writes */ | ||
116 | #define BCM2835_DMA_S_INC BIT(8) | 142 | #define BCM2835_DMA_S_INC BIT(8) |
117 | #define BCM2835_DMA_S_DREQ BIT(10) | 143 | #define BCM2835_DMA_S_WIDTH BIT(9) /* 128bit writes if set */ |
118 | 144 | #define BCM2835_DMA_S_DREQ BIT(10) /* enable SREQ for source */ | |
119 | #define BCM2835_DMA_PER_MAP(x) ((x) << 16) | 145 | #define BCM2835_DMA_S_IGNORE BIT(11) /* ignore source reads - read 0 */ |
146 | #define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12) | ||
147 | #define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16) /* REQ source */ | ||
148 | #define BCM2835_DMA_WAIT(x) ((x & 31) << 21) /* add DMA-wait cycles */ | ||
149 | #define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */ | ||
150 | |||
151 | /* debug register bits */ | ||
152 | #define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0) | ||
153 | #define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1) | ||
154 | #define BCM2835_DMA_DEBUG_READ_ERR BIT(2) | ||
155 | #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4 | ||
156 | #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4 | ||
157 | #define BCM2835_DMA_DEBUG_ID_SHIFT 16 | ||
158 | #define BCM2835_DMA_DEBUG_ID_BITS 9 | ||
159 | #define BCM2835_DMA_DEBUG_STATE_SHIFT 16 | ||
160 | #define BCM2835_DMA_DEBUG_STATE_BITS 9 | ||
161 | #define BCM2835_DMA_DEBUG_VERSION_SHIFT 25 | ||
162 | #define BCM2835_DMA_DEBUG_VERSION_BITS 3 | ||
163 | #define BCM2835_DMA_DEBUG_LITE BIT(28) | ||
164 | |||
165 | /* shared registers for all dma channels */ | ||
166 | #define BCM2835_DMA_INT_STATUS 0xfe0 | ||
167 | #define BCM2835_DMA_ENABLE 0xff0 | ||
120 | 168 | ||
121 | #define BCM2835_DMA_DATA_TYPE_S8 1 | 169 | #define BCM2835_DMA_DATA_TYPE_S8 1 |
122 | #define BCM2835_DMA_DATA_TYPE_S16 2 | 170 | #define BCM2835_DMA_DATA_TYPE_S16 2 |
123 | #define BCM2835_DMA_DATA_TYPE_S32 4 | 171 | #define BCM2835_DMA_DATA_TYPE_S32 4 |
124 | #define BCM2835_DMA_DATA_TYPE_S128 16 | 172 | #define BCM2835_DMA_DATA_TYPE_S128 16 |
125 | 173 | ||
126 | #define BCM2835_DMA_BULK_MASK BIT(0) | ||
127 | #define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3)) | ||
128 | |||
129 | /* Valid only for channels 0 - 14, 15 has its own base address */ | 174 | /* Valid only for channels 0 - 14, 15 has its own base address */ |
130 | #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ | 175 | #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ |
131 | #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) | 176 | #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) |
132 | 177 | ||
178 | /* the max dma length for different channels */ | ||
179 | #define MAX_DMA_LEN SZ_1G | ||
180 | #define MAX_LITE_DMA_LEN (SZ_64K - 4) | ||
181 | |||
182 | static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c) | ||
183 | { | ||
184 | /* lite and normal channels have different max frame length */ | ||
185 | return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN; | ||
186 | } | ||
187 | |||
188 | /* how many frames of max_len size do we need to transfer len bytes */ | ||
189 | static inline size_t bcm2835_dma_frames_for_length(size_t len, | ||
190 | size_t max_len) | ||
191 | { | ||
192 | return DIV_ROUND_UP(len, max_len); | ||
193 | } | ||
194 | |||
133 | static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) | 195 | static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) |
134 | { | 196 | { |
135 | return container_of(d, struct bcm2835_dmadev, ddev); | 197 | return container_of(d, struct bcm2835_dmadev, ddev); |
@@ -146,19 +208,209 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( | |||
146 | return container_of(t, struct bcm2835_desc, vd.tx); | 208 | return container_of(t, struct bcm2835_desc, vd.tx); |
147 | } | 209 | } |
148 | 210 | ||
149 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) | 211 | static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc) |
150 | { | 212 | { |
151 | struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); | 213 | size_t i; |
152 | int i; | ||
153 | 214 | ||
154 | for (i = 0; i < desc->frames; i++) | 215 | for (i = 0; i < desc->frames; i++) |
155 | dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, | 216 | dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, |
156 | desc->cb_list[i].paddr); | 217 | desc->cb_list[i].paddr); |
157 | 218 | ||
158 | kfree(desc->cb_list); | ||
159 | kfree(desc); | 219 | kfree(desc); |
160 | } | 220 | } |
161 | 221 | ||
222 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) | ||
223 | { | ||
224 | bcm2835_dma_free_cb_chain( | ||
225 | container_of(vd, struct bcm2835_desc, vd)); | ||
226 | } | ||
227 | |||
228 | static void bcm2835_dma_create_cb_set_length( | ||
229 | struct bcm2835_chan *chan, | ||
230 | struct bcm2835_dma_cb *control_block, | ||
231 | size_t len, | ||
232 | size_t period_len, | ||
233 | size_t *total_len, | ||
234 | u32 finalextrainfo) | ||
235 | { | ||
236 | size_t max_len = bcm2835_dma_max_frame_length(chan); | ||
237 | |||
238 | /* set the length taking lite-channel limitations into account */ | ||
239 | control_block->length = min_t(u32, len, max_len); | ||
240 | |||
241 | /* finished if we have no period_length */ | ||
242 | if (!period_len) | ||
243 | return; | ||
244 | |||
245 | /* | ||
246 | * period_len means: that we need to generate | ||
247 | * transfers that are terminating at every | ||
248 | * multiple of period_len - this is typically | ||
249 | * used to set the interrupt flag in info | ||
250 | * which is required during cyclic transfers | ||
251 | */ | ||
252 | |||
253 | /* have we filled in period_length yet? */ | ||
254 | if (*total_len + control_block->length < period_len) | ||
255 | return; | ||
256 | |||
257 | /* calculate the length that remains to reach period_length */ | ||
258 | control_block->length = period_len - *total_len; | ||
259 | |||
260 | /* reset total_length for next period */ | ||
261 | *total_len = 0; | ||
262 | |||
263 | /* add extrainfo bits in info */ | ||
264 | control_block->info |= finalextrainfo; | ||
265 | } | ||
266 | |||
267 | static inline size_t bcm2835_dma_count_frames_for_sg( | ||
268 | struct bcm2835_chan *c, | ||
269 | struct scatterlist *sgl, | ||
270 | unsigned int sg_len) | ||
271 | { | ||
272 | size_t frames = 0; | ||
273 | struct scatterlist *sgent; | ||
274 | unsigned int i; | ||
275 | size_t plength = bcm2835_dma_max_frame_length(c); | ||
276 | |||
277 | for_each_sg(sgl, sgent, sg_len, i) | ||
278 | frames += bcm2835_dma_frames_for_length( | ||
279 | sg_dma_len(sgent), plength); | ||
280 | |||
281 | return frames; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * bcm2835_dma_create_cb_chain - create a control block and fills data in | ||
286 | * | ||
287 | * @chan: the @dma_chan for which we run this | ||
288 | * @direction: the direction in which we transfer | ||
289 | * @cyclic: it is a cyclic transfer | ||
290 | * @info: the default info bits to apply per controlblock | ||
291 | * @frames: number of controlblocks to allocate | ||
292 | * @src: the src address to assign (if the S_INC bit is set | ||
293 | * in @info, then it gets incremented) | ||
294 | * @dst: the dst address to assign (if the D_INC bit is set | ||
295 | * in @info, then it gets incremented) | ||
296 | * @buf_len: the full buffer length (may also be 0) | ||
297 | * @period_len: the period length when to apply @finalextrainfo | ||
298 | * in addition to the last transfer | ||
299 | * this will also break some control-blocks early | ||
300 | * @finalextrainfo: additional bits in last controlblock | ||
301 | * (or when period_len is reached in case of cyclic) | ||
302 | * @gfp: the GFP flag to use for allocation | ||
303 | */ | ||
304 | static struct bcm2835_desc *bcm2835_dma_create_cb_chain( | ||
305 | struct dma_chan *chan, enum dma_transfer_direction direction, | ||
306 | bool cyclic, u32 info, u32 finalextrainfo, size_t frames, | ||
307 | dma_addr_t src, dma_addr_t dst, size_t buf_len, | ||
308 | size_t period_len, gfp_t gfp) | ||
309 | { | ||
310 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
311 | size_t len = buf_len, total_len; | ||
312 | size_t frame; | ||
313 | struct bcm2835_desc *d; | ||
314 | struct bcm2835_cb_entry *cb_entry; | ||
315 | struct bcm2835_dma_cb *control_block; | ||
316 | |||
317 | if (!frames) | ||
318 | return NULL; | ||
319 | |||
320 | /* allocate and setup the descriptor. */ | ||
321 | d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry), | ||
322 | gfp); | ||
323 | if (!d) | ||
324 | return NULL; | ||
325 | |||
326 | d->c = c; | ||
327 | d->dir = direction; | ||
328 | d->cyclic = cyclic; | ||
329 | |||
330 | /* | ||
331 | * Iterate over all frames, create a control block | ||
332 | * for each frame and link them together. | ||
333 | */ | ||
334 | for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) { | ||
335 | cb_entry = &d->cb_list[frame]; | ||
336 | cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp, | ||
337 | &cb_entry->paddr); | ||
338 | if (!cb_entry->cb) | ||
339 | goto error_cb; | ||
340 | |||
341 | /* fill in the control block */ | ||
342 | control_block = cb_entry->cb; | ||
343 | control_block->info = info; | ||
344 | control_block->src = src; | ||
345 | control_block->dst = dst; | ||
346 | control_block->stride = 0; | ||
347 | control_block->next = 0; | ||
348 | /* set up length in control_block if requested */ | ||
349 | if (buf_len) { | ||
350 | /* calculate length honoring period_length */ | ||
351 | bcm2835_dma_create_cb_set_length( | ||
352 | c, control_block, | ||
353 | len, period_len, &total_len, | ||
354 | cyclic ? finalextrainfo : 0); | ||
355 | |||
356 | /* calculate new remaining length */ | ||
357 | len -= control_block->length; | ||
358 | } | ||
359 | |||
360 | /* link this the last controlblock */ | ||
361 | if (frame) | ||
362 | d->cb_list[frame - 1].cb->next = cb_entry->paddr; | ||
363 | |||
364 | /* update src and dst and length */ | ||
365 | if (src && (info & BCM2835_DMA_S_INC)) | ||
366 | src += control_block->length; | ||
367 | if (dst && (info & BCM2835_DMA_D_INC)) | ||
368 | dst += control_block->length; | ||
369 | |||
370 | /* Length of total transfer */ | ||
371 | d->size += control_block->length; | ||
372 | } | ||
373 | |||
374 | /* the last frame requires extra flags */ | ||
375 | d->cb_list[d->frames - 1].cb->info |= finalextrainfo; | ||
376 | |||
377 | /* detect a size missmatch */ | ||
378 | if (buf_len && (d->size != buf_len)) | ||
379 | goto error_cb; | ||
380 | |||
381 | return d; | ||
382 | error_cb: | ||
383 | bcm2835_dma_free_cb_chain(d); | ||
384 | |||
385 | return NULL; | ||
386 | } | ||
387 | |||
388 | static void bcm2835_dma_fill_cb_chain_with_sg( | ||
389 | struct dma_chan *chan, | ||
390 | enum dma_transfer_direction direction, | ||
391 | struct bcm2835_cb_entry *cb, | ||
392 | struct scatterlist *sgl, | ||
393 | unsigned int sg_len) | ||
394 | { | ||
395 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
396 | size_t max_len = bcm2835_dma_max_frame_length(c); | ||
397 | unsigned int i, len; | ||
398 | dma_addr_t addr; | ||
399 | struct scatterlist *sgent; | ||
400 | |||
401 | for_each_sg(sgl, sgent, sg_len, i) { | ||
402 | for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); | ||
403 | len > 0; | ||
404 | addr += cb->cb->length, len -= cb->cb->length, cb++) { | ||
405 | if (direction == DMA_DEV_TO_MEM) | ||
406 | cb->cb->dst = addr; | ||
407 | else | ||
408 | cb->cb->src = addr; | ||
409 | cb->cb->length = min(len, max_len); | ||
410 | } | ||
411 | } | ||
412 | } | ||
413 | |||
162 | static int bcm2835_dma_abort(void __iomem *chan_base) | 414 | static int bcm2835_dma_abort(void __iomem *chan_base) |
163 | { | 415 | { |
164 | unsigned long cs; | 416 | unsigned long cs; |
@@ -218,6 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
218 | struct bcm2835_desc *d; | 470 | struct bcm2835_desc *d; |
219 | unsigned long flags; | 471 | unsigned long flags; |
220 | 472 | ||
473 | /* check the shared interrupt */ | ||
474 | if (c->irq_flags & IRQF_SHARED) { | ||
475 | /* check if the interrupt is enabled */ | ||
476 | flags = readl(c->chan_base + BCM2835_DMA_CS); | ||
477 | /* if not set then we are not the reason for the irq */ | ||
478 | if (!(flags & BCM2835_DMA_INT)) | ||
479 | return IRQ_NONE; | ||
480 | } | ||
481 | |||
221 | spin_lock_irqsave(&c->vc.lock, flags); | 482 | spin_lock_irqsave(&c->vc.lock, flags); |
222 | 483 | ||
223 | /* Acknowledge interrupt */ | 484 | /* Acknowledge interrupt */ |
@@ -226,12 +487,18 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
226 | d = c->desc; | 487 | d = c->desc; |
227 | 488 | ||
228 | if (d) { | 489 | if (d) { |
229 | /* TODO Only works for cyclic DMA */ | 490 | if (d->cyclic) { |
230 | vchan_cyclic_callback(&d->vd); | 491 | /* call the cyclic callback */ |
231 | } | 492 | vchan_cyclic_callback(&d->vd); |
232 | 493 | ||
233 | /* Keep the DMA engine running */ | 494 | /* Keep the DMA engine running */ |
234 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); | 495 | writel(BCM2835_DMA_ACTIVE, |
496 | c->chan_base + BCM2835_DMA_CS); | ||
497 | } else { | ||
498 | vchan_cookie_complete(&c->desc->vd); | ||
499 | bcm2835_dma_start_desc(c); | ||
500 | } | ||
501 | } | ||
235 | 502 | ||
236 | spin_unlock_irqrestore(&c->vc.lock, flags); | 503 | spin_unlock_irqrestore(&c->vc.lock, flags); |
237 | 504 | ||
@@ -252,8 +519,8 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | |||
252 | return -ENOMEM; | 519 | return -ENOMEM; |
253 | } | 520 | } |
254 | 521 | ||
255 | return request_irq(c->irq_number, | 522 | return request_irq(c->irq_number, bcm2835_dma_callback, |
256 | bcm2835_dma_callback, 0, "DMA IRQ", c); | 523 | c->irq_flags, "DMA IRQ", c); |
257 | } | 524 | } |
258 | 525 | ||
259 | static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) | 526 | static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) |
@@ -339,8 +606,6 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) | |||
339 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 606 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
340 | unsigned long flags; | 607 | unsigned long flags; |
341 | 608 | ||
342 | c->cyclic = true; /* Nothing else is implemented */ | ||
343 | |||
344 | spin_lock_irqsave(&c->vc.lock, flags); | 609 | spin_lock_irqsave(&c->vc.lock, flags); |
345 | if (vchan_issue_pending(&c->vc) && !c->desc) | 610 | if (vchan_issue_pending(&c->vc) && !c->desc) |
346 | bcm2835_dma_start_desc(c); | 611 | bcm2835_dma_start_desc(c); |
@@ -348,122 +613,160 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) | |||
348 | spin_unlock_irqrestore(&c->vc.lock, flags); | 613 | spin_unlock_irqrestore(&c->vc.lock, flags); |
349 | } | 614 | } |
350 | 615 | ||
351 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | 616 | struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( |
352 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 617 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
353 | size_t period_len, enum dma_transfer_direction direction, | 618 | size_t len, unsigned long flags) |
354 | unsigned long flags) | ||
355 | { | 619 | { |
356 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 620 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
357 | enum dma_slave_buswidth dev_width; | ||
358 | struct bcm2835_desc *d; | 621 | struct bcm2835_desc *d; |
359 | dma_addr_t dev_addr; | 622 | u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC; |
360 | unsigned int es, sync_type; | 623 | u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP; |
361 | unsigned int frame; | 624 | size_t max_len = bcm2835_dma_max_frame_length(c); |
362 | int i; | 625 | size_t frames; |
626 | |||
627 | /* if src, dst or len is not given return with an error */ | ||
628 | if (!src || !dst || !len) | ||
629 | return NULL; | ||
630 | |||
631 | /* calculate number of frames */ | ||
632 | frames = bcm2835_dma_frames_for_length(len, max_len); | ||
633 | |||
634 | /* allocate the CB chain - this also fills in the pointers */ | ||
635 | d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false, | ||
636 | info, extra, frames, | ||
637 | src, dst, len, 0, GFP_KERNEL); | ||
638 | if (!d) | ||
639 | return NULL; | ||
640 | |||
641 | return vchan_tx_prep(&c->vc, &d->vd, flags); | ||
642 | } | ||
643 | |||
644 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( | ||
645 | struct dma_chan *chan, | ||
646 | struct scatterlist *sgl, unsigned int sg_len, | ||
647 | enum dma_transfer_direction direction, | ||
648 | unsigned long flags, void *context) | ||
649 | { | ||
650 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
651 | struct bcm2835_desc *d; | ||
652 | dma_addr_t src = 0, dst = 0; | ||
653 | u32 info = BCM2835_DMA_WAIT_RESP; | ||
654 | u32 extra = BCM2835_DMA_INT_EN; | ||
655 | size_t frames; | ||
363 | 656 | ||
364 | /* Grab configuration */ | ||
365 | if (!is_slave_direction(direction)) { | 657 | if (!is_slave_direction(direction)) { |
366 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 658 | dev_err(chan->device->dev, |
659 | "%s: bad direction?\n", __func__); | ||
367 | return NULL; | 660 | return NULL; |
368 | } | 661 | } |
369 | 662 | ||
663 | if (c->dreq != 0) | ||
664 | info |= BCM2835_DMA_PER_MAP(c->dreq); | ||
665 | |||
370 | if (direction == DMA_DEV_TO_MEM) { | 666 | if (direction == DMA_DEV_TO_MEM) { |
371 | dev_addr = c->cfg.src_addr; | 667 | if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) |
372 | dev_width = c->cfg.src_addr_width; | 668 | return NULL; |
373 | sync_type = BCM2835_DMA_S_DREQ; | 669 | src = c->cfg.src_addr; |
670 | info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; | ||
374 | } else { | 671 | } else { |
375 | dev_addr = c->cfg.dst_addr; | 672 | if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) |
376 | dev_width = c->cfg.dst_addr_width; | 673 | return NULL; |
377 | sync_type = BCM2835_DMA_D_DREQ; | 674 | dst = c->cfg.dst_addr; |
675 | info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; | ||
378 | } | 676 | } |
379 | 677 | ||
380 | /* Bus width translates to the element size (ES) */ | 678 | /* count frames in sg list */ |
381 | switch (dev_width) { | 679 | frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len); |
382 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
383 | es = BCM2835_DMA_DATA_TYPE_S32; | ||
384 | break; | ||
385 | default: | ||
386 | return NULL; | ||
387 | } | ||
388 | 680 | ||
389 | /* Now allocate and setup the descriptor. */ | 681 | /* allocate the CB chain */ |
390 | d = kzalloc(sizeof(*d), GFP_NOWAIT); | 682 | d = bcm2835_dma_create_cb_chain(chan, direction, false, |
683 | info, extra, | ||
684 | frames, src, dst, 0, 0, | ||
685 | GFP_KERNEL); | ||
391 | if (!d) | 686 | if (!d) |
392 | return NULL; | 687 | return NULL; |
393 | 688 | ||
394 | d->c = c; | 689 | /* fill in frames with scatterlist pointers */ |
395 | d->dir = direction; | 690 | bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list, |
396 | d->frames = buf_len / period_len; | 691 | sgl, sg_len); |
397 | 692 | ||
398 | d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); | 693 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
399 | if (!d->cb_list) { | 694 | } |
400 | kfree(d); | 695 | |
696 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | ||
697 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
698 | size_t period_len, enum dma_transfer_direction direction, | ||
699 | unsigned long flags) | ||
700 | { | ||
701 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
702 | struct bcm2835_desc *d; | ||
703 | dma_addr_t src, dst; | ||
704 | u32 info = BCM2835_DMA_WAIT_RESP; | ||
705 | u32 extra = BCM2835_DMA_INT_EN; | ||
706 | size_t max_len = bcm2835_dma_max_frame_length(c); | ||
707 | size_t frames; | ||
708 | |||
709 | /* Grab configuration */ | ||
710 | if (!is_slave_direction(direction)) { | ||
711 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | ||
401 | return NULL; | 712 | return NULL; |
402 | } | 713 | } |
403 | /* Allocate memory for control blocks */ | ||
404 | for (i = 0; i < d->frames; i++) { | ||
405 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
406 | 714 | ||
407 | cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, | 715 | if (!buf_len) { |
408 | &cb_entry->paddr); | 716 | dev_err(chan->device->dev, |
409 | if (!cb_entry->cb) | 717 | "%s: bad buffer length (= 0)\n", __func__); |
410 | goto error_cb; | 718 | return NULL; |
411 | } | 719 | } |
412 | 720 | ||
413 | /* | 721 | /* |
414 | * Iterate over all frames, create a control block | 722 | * warn if buf_len is not a multiple of period_len - this may leed |
415 | * for each frame and link them together. | 723 | * to unexpected latencies for interrupts and thus audiable clicks |
416 | */ | 724 | */ |
417 | for (frame = 0; frame < d->frames; frame++) { | 725 | if (buf_len % period_len) |
418 | struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; | 726 | dev_warn_once(chan->device->dev, |
419 | 727 | "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n", | |
420 | /* Setup adresses */ | 728 | __func__, buf_len, period_len); |
421 | if (d->dir == DMA_DEV_TO_MEM) { | ||
422 | control_block->info = BCM2835_DMA_D_INC; | ||
423 | control_block->src = dev_addr; | ||
424 | control_block->dst = buf_addr + frame * period_len; | ||
425 | } else { | ||
426 | control_block->info = BCM2835_DMA_S_INC; | ||
427 | control_block->src = buf_addr + frame * period_len; | ||
428 | control_block->dst = dev_addr; | ||
429 | } | ||
430 | 729 | ||
431 | /* Enable interrupt */ | 730 | /* Setup DREQ channel */ |
432 | control_block->info |= BCM2835_DMA_INT_EN; | 731 | if (c->dreq != 0) |
732 | info |= BCM2835_DMA_PER_MAP(c->dreq); | ||
433 | 733 | ||
434 | /* Setup synchronization */ | 734 | if (direction == DMA_DEV_TO_MEM) { |
435 | if (sync_type != 0) | 735 | if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) |
436 | control_block->info |= sync_type; | 736 | return NULL; |
737 | src = c->cfg.src_addr; | ||
738 | dst = buf_addr; | ||
739 | info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; | ||
740 | } else { | ||
741 | if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
742 | return NULL; | ||
743 | dst = c->cfg.dst_addr; | ||
744 | src = buf_addr; | ||
745 | info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; | ||
746 | } | ||
437 | 747 | ||
438 | /* Setup DREQ channel */ | 748 | /* calculate number of frames */ |
439 | if (c->dreq != 0) | 749 | frames = /* number of periods */ |
440 | control_block->info |= | 750 | DIV_ROUND_UP(buf_len, period_len) * |
441 | BCM2835_DMA_PER_MAP(c->dreq); | 751 | /* number of frames per period */ |
752 | bcm2835_dma_frames_for_length(period_len, max_len); | ||
442 | 753 | ||
443 | /* Length of a frame */ | 754 | /* |
444 | control_block->length = period_len; | 755 | * allocate the CB chain |
445 | d->size += control_block->length; | 756 | * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine |
757 | * implementation calls prep_dma_cyclic with interrupts disabled. | ||
758 | */ | ||
759 | d = bcm2835_dma_create_cb_chain(chan, direction, true, | ||
760 | info, extra, | ||
761 | frames, src, dst, buf_len, | ||
762 | period_len, GFP_NOWAIT); | ||
763 | if (!d) | ||
764 | return NULL; | ||
446 | 765 | ||
447 | /* | 766 | /* wrap around into a loop */ |
448 | * Next block is the next frame. | 767 | d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr; |
449 | * This DMA engine driver currently only supports cyclic DMA. | ||
450 | * Therefore, wrap around at number of frames. | ||
451 | */ | ||
452 | control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; | ||
453 | } | ||
454 | 768 | ||
455 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 769 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
456 | error_cb: | ||
457 | i--; | ||
458 | for (; i >= 0; i--) { | ||
459 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
460 | |||
461 | dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); | ||
462 | } | ||
463 | |||
464 | kfree(d->cb_list); | ||
465 | kfree(d); | ||
466 | return NULL; | ||
467 | } | 770 | } |
468 | 771 | ||
469 | static int bcm2835_dma_slave_config(struct dma_chan *chan, | 772 | static int bcm2835_dma_slave_config(struct dma_chan *chan, |
@@ -529,7 +832,8 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
529 | return 0; | 832 | return 0; |
530 | } | 833 | } |
531 | 834 | ||
532 | static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) | 835 | static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, |
836 | int irq, unsigned int irq_flags) | ||
533 | { | 837 | { |
534 | struct bcm2835_chan *c; | 838 | struct bcm2835_chan *c; |
535 | 839 | ||
@@ -544,6 +848,12 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) | |||
544 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); | 848 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); |
545 | c->ch = chan_id; | 849 | c->ch = chan_id; |
546 | c->irq_number = irq; | 850 | c->irq_number = irq; |
851 | c->irq_flags = irq_flags; | ||
852 | |||
853 | /* check in DEBUG register if this is a LITE channel */ | ||
854 | if (readl(c->chan_base + BCM2835_DMA_DEBUG) & | ||
855 | BCM2835_DMA_DEBUG_LITE) | ||
856 | c->is_lite_channel = true; | ||
547 | 857 | ||
548 | return 0; | 858 | return 0; |
549 | } | 859 | } |
@@ -587,9 +897,11 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
587 | struct resource *res; | 897 | struct resource *res; |
588 | void __iomem *base; | 898 | void __iomem *base; |
589 | int rc; | 899 | int rc; |
590 | int i; | 900 | int i, j; |
591 | int irq; | 901 | int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1]; |
902 | int irq_flags; | ||
592 | uint32_t chans_available; | 903 | uint32_t chans_available; |
904 | char chan_name[BCM2835_DMA_CHAN_NAME_SIZE]; | ||
593 | 905 | ||
594 | if (!pdev->dev.dma_mask) | 906 | if (!pdev->dev.dma_mask) |
595 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | 907 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; |
@@ -615,16 +927,22 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
615 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | 927 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); |
616 | dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); | 928 | dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); |
617 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); | 929 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
930 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | ||
931 | dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); | ||
618 | od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; | 932 | od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; |
619 | od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; | 933 | od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; |
620 | od->ddev.device_tx_status = bcm2835_dma_tx_status; | 934 | od->ddev.device_tx_status = bcm2835_dma_tx_status; |
621 | od->ddev.device_issue_pending = bcm2835_dma_issue_pending; | 935 | od->ddev.device_issue_pending = bcm2835_dma_issue_pending; |
622 | od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; | 936 | od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; |
937 | od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg; | ||
938 | od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy; | ||
623 | od->ddev.device_config = bcm2835_dma_slave_config; | 939 | od->ddev.device_config = bcm2835_dma_slave_config; |
624 | od->ddev.device_terminate_all = bcm2835_dma_terminate_all; | 940 | od->ddev.device_terminate_all = bcm2835_dma_terminate_all; |
625 | od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 941 | od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
626 | od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 942 | od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
627 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 943 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | |
944 | BIT(DMA_MEM_TO_MEM); | ||
945 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
628 | od->ddev.dev = &pdev->dev; | 946 | od->ddev.dev = &pdev->dev; |
629 | INIT_LIST_HEAD(&od->ddev.channels); | 947 | INIT_LIST_HEAD(&od->ddev.channels); |
630 | spin_lock_init(&od->lock); | 948 | spin_lock_init(&od->lock); |
@@ -640,22 +958,48 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
640 | goto err_no_dma; | 958 | goto err_no_dma; |
641 | } | 959 | } |
642 | 960 | ||
643 | /* | 961 | /* get irqs for each channel that we support */ |
644 | * Do not use the FIQ and BULK channels, | 962 | for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { |
645 | * because they are used by the GPU. | 963 | /* skip masked out channels */ |
646 | */ | 964 | if (!(chans_available & (1 << i))) { |
647 | chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK); | 965 | irq[i] = -1; |
966 | continue; | ||
967 | } | ||
648 | 968 | ||
649 | for (i = 0; i < pdev->num_resources; i++) { | 969 | /* get the named irq */ |
650 | irq = platform_get_irq(pdev, i); | 970 | snprintf(chan_name, sizeof(chan_name), "dma%i", i); |
651 | if (irq < 0) | 971 | irq[i] = platform_get_irq_byname(pdev, chan_name); |
652 | break; | 972 | if (irq[i] >= 0) |
973 | continue; | ||
653 | 974 | ||
654 | if (chans_available & (1 << i)) { | 975 | /* legacy device tree case handling */ |
655 | rc = bcm2835_dma_chan_init(od, i, irq); | 976 | dev_warn_once(&pdev->dev, |
656 | if (rc) | 977 | "missing interrupt-names property in device tree - legacy interpretation is used\n"); |
657 | goto err_no_dma; | 978 | /* |
658 | } | 979 | * in case of channel >= 11 |
980 | * use the 11th interrupt and that is shared | ||
981 | */ | ||
982 | irq[i] = platform_get_irq(pdev, i < 11 ? i : 11); | ||
983 | } | ||
984 | |||
985 | /* get irqs for each channel */ | ||
986 | for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { | ||
987 | /* skip channels without irq */ | ||
988 | if (irq[i] < 0) | ||
989 | continue; | ||
990 | |||
991 | /* check if there are other channels that also use this irq */ | ||
992 | irq_flags = 0; | ||
993 | for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++) | ||
994 | if ((i != j) && (irq[j] == irq[i])) { | ||
995 | irq_flags = IRQF_SHARED; | ||
996 | break; | ||
997 | } | ||
998 | |||
999 | /* initialize the channel */ | ||
1000 | rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags); | ||
1001 | if (rc) | ||
1002 | goto err_no_dma; | ||
659 | } | 1003 | } |
660 | 1004 | ||
661 | dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); | 1005 | dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 0cb259c59916..8c9f45fd55fc 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -289,7 +289,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
289 | do { | 289 | do { |
290 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 290 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
291 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 291 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
292 | pr_err("%s: timeout!\n", __func__); | 292 | dev_err(chan->device->dev, "%s: timeout!\n", __func__); |
293 | return DMA_ERROR; | 293 | return DMA_ERROR; |
294 | } | 294 | } |
295 | if (status != DMA_IN_PROGRESS) | 295 | if (status != DMA_IN_PROGRESS) |
@@ -482,7 +482,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | |||
482 | device = chan->device; | 482 | device = chan->device; |
483 | 483 | ||
484 | /* check if the channel supports slave transactions */ | 484 | /* check if the channel supports slave transactions */ |
485 | if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) | 485 | if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || |
486 | test_bit(DMA_CYCLIC, device->cap_mask.bits))) | ||
486 | return -ENXIO; | 487 | return -ENXIO; |
487 | 488 | ||
488 | /* | 489 | /* |
@@ -518,7 +519,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |||
518 | struct dma_chan *chan; | 519 | struct dma_chan *chan; |
519 | 520 | ||
520 | if (mask && !__dma_device_satisfies_mask(dev, mask)) { | 521 | if (mask && !__dma_device_satisfies_mask(dev, mask)) { |
521 | pr_debug("%s: wrong capabilities\n", __func__); | 522 | dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); |
522 | return NULL; | 523 | return NULL; |
523 | } | 524 | } |
524 | /* devices with multiple channels need special handling as we need to | 525 | /* devices with multiple channels need special handling as we need to |
@@ -533,12 +534,12 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |||
533 | 534 | ||
534 | list_for_each_entry(chan, &dev->channels, device_node) { | 535 | list_for_each_entry(chan, &dev->channels, device_node) { |
535 | if (chan->client_count) { | 536 | if (chan->client_count) { |
536 | pr_debug("%s: %s busy\n", | 537 | dev_dbg(dev->dev, "%s: %s busy\n", |
537 | __func__, dma_chan_name(chan)); | 538 | __func__, dma_chan_name(chan)); |
538 | continue; | 539 | continue; |
539 | } | 540 | } |
540 | if (fn && !fn(chan, fn_param)) { | 541 | if (fn && !fn(chan, fn_param)) { |
541 | pr_debug("%s: %s filter said false\n", | 542 | dev_dbg(dev->dev, "%s: %s filter said false\n", |
542 | __func__, dma_chan_name(chan)); | 543 | __func__, dma_chan_name(chan)); |
543 | continue; | 544 | continue; |
544 | } | 545 | } |
@@ -567,11 +568,12 @@ static struct dma_chan *find_candidate(struct dma_device *device, | |||
567 | 568 | ||
568 | if (err) { | 569 | if (err) { |
569 | if (err == -ENODEV) { | 570 | if (err == -ENODEV) { |
570 | pr_debug("%s: %s module removed\n", __func__, | 571 | dev_dbg(device->dev, "%s: %s module removed\n", |
571 | dma_chan_name(chan)); | 572 | __func__, dma_chan_name(chan)); |
572 | list_del_rcu(&device->global_node); | 573 | list_del_rcu(&device->global_node); |
573 | } else | 574 | } else |
574 | pr_debug("%s: failed to get %s: (%d)\n", | 575 | dev_dbg(device->dev, |
576 | "%s: failed to get %s: (%d)\n", | ||
575 | __func__, dma_chan_name(chan), err); | 577 | __func__, dma_chan_name(chan), err); |
576 | 578 | ||
577 | if (--device->privatecnt == 0) | 579 | if (--device->privatecnt == 0) |
@@ -602,7 +604,8 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |||
602 | device->privatecnt++; | 604 | device->privatecnt++; |
603 | err = dma_chan_get(chan); | 605 | err = dma_chan_get(chan); |
604 | if (err) { | 606 | if (err) { |
605 | pr_debug("%s: failed to get %s: (%d)\n", | 607 | dev_dbg(chan->device->dev, |
608 | "%s: failed to get %s: (%d)\n", | ||
606 | __func__, dma_chan_name(chan), err); | 609 | __func__, dma_chan_name(chan), err); |
607 | chan = NULL; | 610 | chan = NULL; |
608 | if (--device->privatecnt == 0) | 611 | if (--device->privatecnt == 0) |
@@ -814,8 +817,9 @@ void dmaengine_get(void) | |||
814 | list_del_rcu(&device->global_node); | 817 | list_del_rcu(&device->global_node); |
815 | break; | 818 | break; |
816 | } else if (err) | 819 | } else if (err) |
817 | pr_debug("%s: failed to get %s: (%d)\n", | 820 | dev_dbg(chan->device->dev, |
818 | __func__, dma_chan_name(chan), err); | 821 | "%s: failed to get %s: (%d)\n", |
822 | __func__, dma_chan_name(chan), err); | ||
819 | } | 823 | } |
820 | } | 824 | } |
821 | 825 | ||
@@ -862,12 +866,12 @@ static bool device_has_all_tx_types(struct dma_device *device) | |||
862 | return false; | 866 | return false; |
863 | #endif | 867 | #endif |
864 | 868 | ||
865 | #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) | 869 | #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) |
866 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) | 870 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) |
867 | return false; | 871 | return false; |
868 | #endif | 872 | #endif |
869 | 873 | ||
870 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) | 874 | #if IS_ENABLED(CONFIG_ASYNC_XOR) |
871 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | 875 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
872 | return false; | 876 | return false; |
873 | 877 | ||
@@ -877,7 +881,7 @@ static bool device_has_all_tx_types(struct dma_device *device) | |||
877 | #endif | 881 | #endif |
878 | #endif | 882 | #endif |
879 | 883 | ||
880 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) | 884 | #if IS_ENABLED(CONFIG_ASYNC_PQ) |
881 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | 885 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) |
882 | return false; | 886 | return false; |
883 | 887 | ||
@@ -1222,8 +1226,9 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
1222 | 1226 | ||
1223 | while (tx->cookie == -EBUSY) { | 1227 | while (tx->cookie == -EBUSY) { |
1224 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 1228 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
1225 | pr_err("%s timeout waiting for descriptor submission\n", | 1229 | dev_err(tx->chan->device->dev, |
1226 | __func__); | 1230 | "%s timeout waiting for descriptor submission\n", |
1231 | __func__); | ||
1227 | return DMA_ERROR; | 1232 | return DMA_ERROR; |
1228 | } | 1233 | } |
1229 | cpu_relax(); | 1234 | cpu_relax(); |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 97199b3c25a2..edf053f73a49 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -45,22 +45,19 @@ | |||
45 | DW_DMA_MSIZE_16; \ | 45 | DW_DMA_MSIZE_16; \ |
46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ | 46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
47 | DW_DMA_MSIZE_16; \ | 47 | DW_DMA_MSIZE_16; \ |
48 | u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ | ||
49 | _dwc->p_master : _dwc->m_master; \ | ||
50 | u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ | ||
51 | _dwc->p_master : _dwc->m_master; \ | ||
48 | \ | 52 | \ |
49 | (DWC_CTLL_DST_MSIZE(_dmsize) \ | 53 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
50 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | 54 | | DWC_CTLL_SRC_MSIZE(_smsize) \ |
51 | | DWC_CTLL_LLP_D_EN \ | 55 | | DWC_CTLL_LLP_D_EN \ |
52 | | DWC_CTLL_LLP_S_EN \ | 56 | | DWC_CTLL_LLP_S_EN \ |
53 | | DWC_CTLL_DMS(_dwc->dst_master) \ | 57 | | DWC_CTLL_DMS(_dms) \ |
54 | | DWC_CTLL_SMS(_dwc->src_master)); \ | 58 | | DWC_CTLL_SMS(_sms)); \ |
55 | }) | 59 | }) |
56 | 60 | ||
57 | /* | ||
58 | * Number of descriptors to allocate for each channel. This should be | ||
59 | * made configurable somehow; preferably, the clients (at least the | ||
60 | * ones using slave transfers) should be able to give us a hint. | ||
61 | */ | ||
62 | #define NR_DESCS_PER_CHANNEL 64 | ||
63 | |||
64 | /* The set of bus widths supported by the DMA controller */ | 61 | /* The set of bus widths supported by the DMA controller */ |
65 | #define DW_DMA_BUSWIDTHS \ | 62 | #define DW_DMA_BUSWIDTHS \ |
66 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | 63 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
@@ -80,51 +77,65 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | |||
80 | return to_dw_desc(dwc->active_list.next); | 77 | return to_dw_desc(dwc->active_list.next); |
81 | } | 78 | } |
82 | 79 | ||
83 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 80 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) |
84 | { | 81 | { |
85 | struct dw_desc *desc, *_desc; | 82 | struct dw_desc *desc = txd_to_dw_desc(tx); |
86 | struct dw_desc *ret = NULL; | 83 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); |
87 | unsigned int i = 0; | 84 | dma_cookie_t cookie; |
88 | unsigned long flags; | 85 | unsigned long flags; |
89 | 86 | ||
90 | spin_lock_irqsave(&dwc->lock, flags); | 87 | spin_lock_irqsave(&dwc->lock, flags); |
91 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 88 | cookie = dma_cookie_assign(tx); |
92 | i++; | 89 | |
93 | if (async_tx_test_ack(&desc->txd)) { | 90 | /* |
94 | list_del(&desc->desc_node); | 91 | * REVISIT: We should attempt to chain as many descriptors as |
95 | ret = desc; | 92 | * possible, perhaps even appending to those already submitted |
96 | break; | 93 | * for DMA. But this is hard to do in a race-free manner. |
97 | } | 94 | */ |
98 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 95 | |
99 | } | 96 | list_add_tail(&desc->desc_node, &dwc->queue); |
100 | spin_unlock_irqrestore(&dwc->lock, flags); | 97 | spin_unlock_irqrestore(&dwc->lock, flags); |
98 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", | ||
99 | __func__, desc->txd.cookie); | ||
101 | 100 | ||
102 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); | 101 | return cookie; |
102 | } | ||
103 | 103 | ||
104 | return ret; | 104 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
105 | { | ||
106 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
107 | struct dw_desc *desc; | ||
108 | dma_addr_t phys; | ||
109 | |||
110 | desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys); | ||
111 | if (!desc) | ||
112 | return NULL; | ||
113 | |||
114 | dwc->descs_allocated++; | ||
115 | INIT_LIST_HEAD(&desc->tx_list); | ||
116 | dma_async_tx_descriptor_init(&desc->txd, &dwc->chan); | ||
117 | desc->txd.tx_submit = dwc_tx_submit; | ||
118 | desc->txd.flags = DMA_CTRL_ACK; | ||
119 | desc->txd.phys = phys; | ||
120 | return desc; | ||
105 | } | 121 | } |
106 | 122 | ||
107 | /* | ||
108 | * Move a descriptor, including any children, to the free list. | ||
109 | * `desc' must not be on any lists. | ||
110 | */ | ||
111 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | 123 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
112 | { | 124 | { |
113 | unsigned long flags; | 125 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
126 | struct dw_desc *child, *_next; | ||
114 | 127 | ||
115 | if (desc) { | 128 | if (unlikely(!desc)) |
116 | struct dw_desc *child; | 129 | return; |
117 | 130 | ||
118 | spin_lock_irqsave(&dwc->lock, flags); | 131 | list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { |
119 | list_for_each_entry(child, &desc->tx_list, desc_node) | 132 | list_del(&child->desc_node); |
120 | dev_vdbg(chan2dev(&dwc->chan), | 133 | dma_pool_free(dw->desc_pool, child, child->txd.phys); |
121 | "moving child desc %p to freelist\n", | 134 | dwc->descs_allocated--; |
122 | child); | ||
123 | list_splice_init(&desc->tx_list, &dwc->free_list); | ||
124 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); | ||
125 | list_add(&desc->desc_node, &dwc->free_list); | ||
126 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
127 | } | 135 | } |
136 | |||
137 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | ||
138 | dwc->descs_allocated--; | ||
128 | } | 139 | } |
129 | 140 | ||
130 | static void dwc_initialize(struct dw_dma_chan *dwc) | 141 | static void dwc_initialize(struct dw_dma_chan *dwc) |
@@ -133,7 +144,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
133 | u32 cfghi = DWC_CFGH_FIFO_MODE; | 144 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
134 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | 145 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); |
135 | 146 | ||
136 | if (dwc->initialized == true) | 147 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) |
137 | return; | 148 | return; |
138 | 149 | ||
139 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); | 150 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
@@ -146,26 +157,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
146 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 157 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
147 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 158 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
148 | 159 | ||
149 | dwc->initialized = true; | 160 | set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); |
150 | } | 161 | } |
151 | 162 | ||
152 | /*----------------------------------------------------------------------*/ | 163 | /*----------------------------------------------------------------------*/ |
153 | 164 | ||
154 | static inline unsigned int dwc_fast_ffs(unsigned long long v) | ||
155 | { | ||
156 | /* | ||
157 | * We can be a lot more clever here, but this should take care | ||
158 | * of the most common optimization. | ||
159 | */ | ||
160 | if (!(v & 7)) | ||
161 | return 3; | ||
162 | else if (!(v & 3)) | ||
163 | return 2; | ||
164 | else if (!(v & 1)) | ||
165 | return 1; | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | 165 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
170 | { | 166 | { |
171 | dev_err(chan2dev(&dwc->chan), | 167 | dev_err(chan2dev(&dwc->chan), |
@@ -197,12 +193,12 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |||
197 | * Software emulation of LLP mode relies on interrupts to continue | 193 | * Software emulation of LLP mode relies on interrupts to continue |
198 | * multi block transfer. | 194 | * multi block transfer. |
199 | */ | 195 | */ |
200 | ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; | 196 | ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; |
201 | 197 | ||
202 | channel_writel(dwc, SAR, desc->lli.sar); | 198 | channel_writel(dwc, SAR, lli_read(desc, sar)); |
203 | channel_writel(dwc, DAR, desc->lli.dar); | 199 | channel_writel(dwc, DAR, lli_read(desc, dar)); |
204 | channel_writel(dwc, CTL_LO, ctllo); | 200 | channel_writel(dwc, CTL_LO, ctllo); |
205 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | 201 | channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); |
206 | channel_set_bit(dw, CH_EN, dwc->mask); | 202 | channel_set_bit(dw, CH_EN, dwc->mask); |
207 | 203 | ||
208 | /* Move pointer to next descriptor */ | 204 | /* Move pointer to next descriptor */ |
@@ -213,6 +209,7 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |||
213 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 209 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
214 | { | 210 | { |
215 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 211 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
212 | u8 lms = DWC_LLP_LMS(dwc->m_master); | ||
216 | unsigned long was_soft_llp; | 213 | unsigned long was_soft_llp; |
217 | 214 | ||
218 | /* ASSERT: channel is idle */ | 215 | /* ASSERT: channel is idle */ |
@@ -237,7 +234,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
237 | 234 | ||
238 | dwc_initialize(dwc); | 235 | dwc_initialize(dwc); |
239 | 236 | ||
240 | dwc->residue = first->total_len; | 237 | first->residue = first->total_len; |
241 | dwc->tx_node_active = &first->tx_list; | 238 | dwc->tx_node_active = &first->tx_list; |
242 | 239 | ||
243 | /* Submit first block */ | 240 | /* Submit first block */ |
@@ -248,9 +245,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
248 | 245 | ||
249 | dwc_initialize(dwc); | 246 | dwc_initialize(dwc); |
250 | 247 | ||
251 | channel_writel(dwc, LLP, first->txd.phys); | 248 | channel_writel(dwc, LLP, first->txd.phys | lms); |
252 | channel_writel(dwc, CTL_LO, | 249 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
253 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
254 | channel_writel(dwc, CTL_HI, 0); | 250 | channel_writel(dwc, CTL_HI, 0); |
255 | channel_set_bit(dw, CH_EN, dwc->mask); | 251 | channel_set_bit(dw, CH_EN, dwc->mask); |
256 | } | 252 | } |
@@ -293,11 +289,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
293 | list_for_each_entry(child, &desc->tx_list, desc_node) | 289 | list_for_each_entry(child, &desc->tx_list, desc_node) |
294 | async_tx_ack(&child->txd); | 290 | async_tx_ack(&child->txd); |
295 | async_tx_ack(&desc->txd); | 291 | async_tx_ack(&desc->txd); |
296 | 292 | dwc_desc_put(dwc, desc); | |
297 | list_splice_init(&desc->tx_list, &dwc->free_list); | ||
298 | list_move(&desc->desc_node, &dwc->free_list); | ||
299 | |||
300 | dma_descriptor_unmap(txd); | ||
301 | spin_unlock_irqrestore(&dwc->lock, flags); | 293 | spin_unlock_irqrestore(&dwc->lock, flags); |
302 | 294 | ||
303 | if (callback) | 295 | if (callback) |
@@ -368,11 +360,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
368 | 360 | ||
369 | head = &desc->tx_list; | 361 | head = &desc->tx_list; |
370 | if (active != head) { | 362 | if (active != head) { |
371 | /* Update desc to reflect last sent one */ | 363 | /* Update residue to reflect last sent descriptor */ |
372 | if (active != head->next) | 364 | if (active == head->next) |
373 | desc = to_dw_desc(active->prev); | 365 | desc->residue -= desc->len; |
374 | 366 | else | |
375 | dwc->residue -= desc->len; | 367 | desc->residue -= to_dw_desc(active->prev)->len; |
376 | 368 | ||
377 | child = to_dw_desc(active); | 369 | child = to_dw_desc(active); |
378 | 370 | ||
@@ -387,8 +379,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
387 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | 379 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); |
388 | } | 380 | } |
389 | 381 | ||
390 | dwc->residue = 0; | ||
391 | |||
392 | spin_unlock_irqrestore(&dwc->lock, flags); | 382 | spin_unlock_irqrestore(&dwc->lock, flags); |
393 | 383 | ||
394 | dwc_complete_all(dw, dwc); | 384 | dwc_complete_all(dw, dwc); |
@@ -396,7 +386,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
396 | } | 386 | } |
397 | 387 | ||
398 | if (list_empty(&dwc->active_list)) { | 388 | if (list_empty(&dwc->active_list)) { |
399 | dwc->residue = 0; | ||
400 | spin_unlock_irqrestore(&dwc->lock, flags); | 389 | spin_unlock_irqrestore(&dwc->lock, flags); |
401 | return; | 390 | return; |
402 | } | 391 | } |
@@ -411,31 +400,31 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
411 | 400 | ||
412 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 401 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
413 | /* Initial residue value */ | 402 | /* Initial residue value */ |
414 | dwc->residue = desc->total_len; | 403 | desc->residue = desc->total_len; |
415 | 404 | ||
416 | /* Check first descriptors addr */ | 405 | /* Check first descriptors addr */ |
417 | if (desc->txd.phys == llp) { | 406 | if (desc->txd.phys == DWC_LLP_LOC(llp)) { |
418 | spin_unlock_irqrestore(&dwc->lock, flags); | 407 | spin_unlock_irqrestore(&dwc->lock, flags); |
419 | return; | 408 | return; |
420 | } | 409 | } |
421 | 410 | ||
422 | /* Check first descriptors llp */ | 411 | /* Check first descriptors llp */ |
423 | if (desc->lli.llp == llp) { | 412 | if (lli_read(desc, llp) == llp) { |
424 | /* This one is currently in progress */ | 413 | /* This one is currently in progress */ |
425 | dwc->residue -= dwc_get_sent(dwc); | 414 | desc->residue -= dwc_get_sent(dwc); |
426 | spin_unlock_irqrestore(&dwc->lock, flags); | 415 | spin_unlock_irqrestore(&dwc->lock, flags); |
427 | return; | 416 | return; |
428 | } | 417 | } |
429 | 418 | ||
430 | dwc->residue -= desc->len; | 419 | desc->residue -= desc->len; |
431 | list_for_each_entry(child, &desc->tx_list, desc_node) { | 420 | list_for_each_entry(child, &desc->tx_list, desc_node) { |
432 | if (child->lli.llp == llp) { | 421 | if (lli_read(child, llp) == llp) { |
433 | /* Currently in progress */ | 422 | /* Currently in progress */ |
434 | dwc->residue -= dwc_get_sent(dwc); | 423 | desc->residue -= dwc_get_sent(dwc); |
435 | spin_unlock_irqrestore(&dwc->lock, flags); | 424 | spin_unlock_irqrestore(&dwc->lock, flags); |
436 | return; | 425 | return; |
437 | } | 426 | } |
438 | dwc->residue -= child->len; | 427 | desc->residue -= child->len; |
439 | } | 428 | } |
440 | 429 | ||
441 | /* | 430 | /* |
@@ -457,10 +446,14 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
457 | spin_unlock_irqrestore(&dwc->lock, flags); | 446 | spin_unlock_irqrestore(&dwc->lock, flags); |
458 | } | 447 | } |
459 | 448 | ||
460 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 449 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) |
461 | { | 450 | { |
462 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 451 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
463 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); | 452 | lli_read(desc, sar), |
453 | lli_read(desc, dar), | ||
454 | lli_read(desc, llp), | ||
455 | lli_read(desc, ctlhi), | ||
456 | lli_read(desc, ctllo)); | ||
464 | } | 457 | } |
465 | 458 | ||
466 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 459 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -496,9 +489,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
496 | */ | 489 | */ |
497 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" | 490 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
498 | " cookie: %d\n", bad_desc->txd.cookie); | 491 | " cookie: %d\n", bad_desc->txd.cookie); |
499 | dwc_dump_lli(dwc, &bad_desc->lli); | 492 | dwc_dump_lli(dwc, bad_desc); |
500 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 493 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
501 | dwc_dump_lli(dwc, &child->lli); | 494 | dwc_dump_lli(dwc, child); |
502 | 495 | ||
503 | spin_unlock_irqrestore(&dwc->lock, flags); | 496 | spin_unlock_irqrestore(&dwc->lock, flags); |
504 | 497 | ||
@@ -549,7 +542,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
549 | */ | 542 | */ |
550 | if (unlikely(status_err & dwc->mask) || | 543 | if (unlikely(status_err & dwc->mask) || |
551 | unlikely(status_xfer & dwc->mask)) { | 544 | unlikely(status_xfer & dwc->mask)) { |
552 | int i; | 545 | unsigned int i; |
553 | 546 | ||
554 | dev_err(chan2dev(&dwc->chan), | 547 | dev_err(chan2dev(&dwc->chan), |
555 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", | 548 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", |
@@ -571,7 +564,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
571 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 564 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
572 | 565 | ||
573 | for (i = 0; i < dwc->cdesc->periods; i++) | 566 | for (i = 0; i < dwc->cdesc->periods; i++) |
574 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | 567 | dwc_dump_lli(dwc, dwc->cdesc->desc[i]); |
575 | 568 | ||
576 | spin_unlock_irqrestore(&dwc->lock, flags); | 569 | spin_unlock_irqrestore(&dwc->lock, flags); |
577 | } | 570 | } |
@@ -589,7 +582,7 @@ static void dw_dma_tasklet(unsigned long data) | |||
589 | u32 status_block; | 582 | u32 status_block; |
590 | u32 status_xfer; | 583 | u32 status_xfer; |
591 | u32 status_err; | 584 | u32 status_err; |
592 | int i; | 585 | unsigned int i; |
593 | 586 | ||
594 | status_block = dma_readl(dw, RAW.BLOCK); | 587 | status_block = dma_readl(dw, RAW.BLOCK); |
595 | status_xfer = dma_readl(dw, RAW.XFER); | 588 | status_xfer = dma_readl(dw, RAW.XFER); |
@@ -658,30 +651,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
658 | 651 | ||
659 | /*----------------------------------------------------------------------*/ | 652 | /*----------------------------------------------------------------------*/ |
660 | 653 | ||
661 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | ||
662 | { | ||
663 | struct dw_desc *desc = txd_to_dw_desc(tx); | ||
664 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | ||
665 | dma_cookie_t cookie; | ||
666 | unsigned long flags; | ||
667 | |||
668 | spin_lock_irqsave(&dwc->lock, flags); | ||
669 | cookie = dma_cookie_assign(tx); | ||
670 | |||
671 | /* | ||
672 | * REVISIT: We should attempt to chain as many descriptors as | ||
673 | * possible, perhaps even appending to those already submitted | ||
674 | * for DMA. But this is hard to do in a race-free manner. | ||
675 | */ | ||
676 | |||
677 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); | ||
678 | list_add_tail(&desc->desc_node, &dwc->queue); | ||
679 | |||
680 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
681 | |||
682 | return cookie; | ||
683 | } | ||
684 | |||
685 | static struct dma_async_tx_descriptor * | 654 | static struct dma_async_tx_descriptor * |
686 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 655 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
687 | size_t len, unsigned long flags) | 656 | size_t len, unsigned long flags) |
@@ -693,10 +662,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
693 | struct dw_desc *prev; | 662 | struct dw_desc *prev; |
694 | size_t xfer_count; | 663 | size_t xfer_count; |
695 | size_t offset; | 664 | size_t offset; |
665 | u8 m_master = dwc->m_master; | ||
696 | unsigned int src_width; | 666 | unsigned int src_width; |
697 | unsigned int dst_width; | 667 | unsigned int dst_width; |
698 | unsigned int data_width; | 668 | unsigned int data_width = dw->pdata->data_width[m_master]; |
699 | u32 ctllo; | 669 | u32 ctllo; |
670 | u8 lms = DWC_LLP_LMS(m_master); | ||
700 | 671 | ||
701 | dev_vdbg(chan2dev(chan), | 672 | dev_vdbg(chan2dev(chan), |
702 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, | 673 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, |
@@ -709,11 +680,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
709 | 680 | ||
710 | dwc->direction = DMA_MEM_TO_MEM; | 681 | dwc->direction = DMA_MEM_TO_MEM; |
711 | 682 | ||
712 | data_width = min_t(unsigned int, dw->data_width[dwc->src_master], | 683 | src_width = dst_width = __ffs(data_width | src | dest | len); |
713 | dw->data_width[dwc->dst_master]); | ||
714 | |||
715 | src_width = dst_width = min_t(unsigned int, data_width, | ||
716 | dwc_fast_ffs(src | dest | len)); | ||
717 | 684 | ||
718 | ctllo = DWC_DEFAULT_CTLLO(chan) | 685 | ctllo = DWC_DEFAULT_CTLLO(chan) |
719 | | DWC_CTLL_DST_WIDTH(dst_width) | 686 | | DWC_CTLL_DST_WIDTH(dst_width) |
@@ -731,27 +698,27 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
731 | if (!desc) | 698 | if (!desc) |
732 | goto err_desc_get; | 699 | goto err_desc_get; |
733 | 700 | ||
734 | desc->lli.sar = src + offset; | 701 | lli_write(desc, sar, src + offset); |
735 | desc->lli.dar = dest + offset; | 702 | lli_write(desc, dar, dest + offset); |
736 | desc->lli.ctllo = ctllo; | 703 | lli_write(desc, ctllo, ctllo); |
737 | desc->lli.ctlhi = xfer_count; | 704 | lli_write(desc, ctlhi, xfer_count); |
738 | desc->len = xfer_count << src_width; | 705 | desc->len = xfer_count << src_width; |
739 | 706 | ||
740 | if (!first) { | 707 | if (!first) { |
741 | first = desc; | 708 | first = desc; |
742 | } else { | 709 | } else { |
743 | prev->lli.llp = desc->txd.phys; | 710 | lli_write(prev, llp, desc->txd.phys | lms); |
744 | list_add_tail(&desc->desc_node, | 711 | list_add_tail(&desc->desc_node, &first->tx_list); |
745 | &first->tx_list); | ||
746 | } | 712 | } |
747 | prev = desc; | 713 | prev = desc; |
748 | } | 714 | } |
749 | 715 | ||
750 | if (flags & DMA_PREP_INTERRUPT) | 716 | if (flags & DMA_PREP_INTERRUPT) |
751 | /* Trigger interrupt after last block */ | 717 | /* Trigger interrupt after last block */ |
752 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 718 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
753 | 719 | ||
754 | prev->lli.llp = 0; | 720 | prev->lli.llp = 0; |
721 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
755 | first->txd.flags = flags; | 722 | first->txd.flags = flags; |
756 | first->total_len = len; | 723 | first->total_len = len; |
757 | 724 | ||
@@ -773,10 +740,12 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
773 | struct dw_desc *prev; | 740 | struct dw_desc *prev; |
774 | struct dw_desc *first; | 741 | struct dw_desc *first; |
775 | u32 ctllo; | 742 | u32 ctllo; |
743 | u8 m_master = dwc->m_master; | ||
744 | u8 lms = DWC_LLP_LMS(m_master); | ||
776 | dma_addr_t reg; | 745 | dma_addr_t reg; |
777 | unsigned int reg_width; | 746 | unsigned int reg_width; |
778 | unsigned int mem_width; | 747 | unsigned int mem_width; |
779 | unsigned int data_width; | 748 | unsigned int data_width = dw->pdata->data_width[m_master]; |
780 | unsigned int i; | 749 | unsigned int i; |
781 | struct scatterlist *sg; | 750 | struct scatterlist *sg; |
782 | size_t total_len = 0; | 751 | size_t total_len = 0; |
@@ -802,8 +771,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
802 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 771 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
803 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 772 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
804 | 773 | ||
805 | data_width = dw->data_width[dwc->src_master]; | ||
806 | |||
807 | for_each_sg(sgl, sg, sg_len, i) { | 774 | for_each_sg(sgl, sg, sg_len, i) { |
808 | struct dw_desc *desc; | 775 | struct dw_desc *desc; |
809 | u32 len, dlen, mem; | 776 | u32 len, dlen, mem; |
@@ -811,17 +778,16 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
811 | mem = sg_dma_address(sg); | 778 | mem = sg_dma_address(sg); |
812 | len = sg_dma_len(sg); | 779 | len = sg_dma_len(sg); |
813 | 780 | ||
814 | mem_width = min_t(unsigned int, | 781 | mem_width = __ffs(data_width | mem | len); |
815 | data_width, dwc_fast_ffs(mem | len)); | ||
816 | 782 | ||
817 | slave_sg_todev_fill_desc: | 783 | slave_sg_todev_fill_desc: |
818 | desc = dwc_desc_get(dwc); | 784 | desc = dwc_desc_get(dwc); |
819 | if (!desc) | 785 | if (!desc) |
820 | goto err_desc_get; | 786 | goto err_desc_get; |
821 | 787 | ||
822 | desc->lli.sar = mem; | 788 | lli_write(desc, sar, mem); |
823 | desc->lli.dar = reg; | 789 | lli_write(desc, dar, reg); |
824 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 790 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); |
825 | if ((len >> mem_width) > dwc->block_size) { | 791 | if ((len >> mem_width) > dwc->block_size) { |
826 | dlen = dwc->block_size << mem_width; | 792 | dlen = dwc->block_size << mem_width; |
827 | mem += dlen; | 793 | mem += dlen; |
@@ -831,15 +797,14 @@ slave_sg_todev_fill_desc: | |||
831 | len = 0; | 797 | len = 0; |
832 | } | 798 | } |
833 | 799 | ||
834 | desc->lli.ctlhi = dlen >> mem_width; | 800 | lli_write(desc, ctlhi, dlen >> mem_width); |
835 | desc->len = dlen; | 801 | desc->len = dlen; |
836 | 802 | ||
837 | if (!first) { | 803 | if (!first) { |
838 | first = desc; | 804 | first = desc; |
839 | } else { | 805 | } else { |
840 | prev->lli.llp = desc->txd.phys; | 806 | lli_write(prev, llp, desc->txd.phys | lms); |
841 | list_add_tail(&desc->desc_node, | 807 | list_add_tail(&desc->desc_node, &first->tx_list); |
842 | &first->tx_list); | ||
843 | } | 808 | } |
844 | prev = desc; | 809 | prev = desc; |
845 | total_len += dlen; | 810 | total_len += dlen; |
@@ -859,8 +824,6 @@ slave_sg_todev_fill_desc: | |||
859 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 824 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
860 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 825 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
861 | 826 | ||
862 | data_width = dw->data_width[dwc->dst_master]; | ||
863 | |||
864 | for_each_sg(sgl, sg, sg_len, i) { | 827 | for_each_sg(sgl, sg, sg_len, i) { |
865 | struct dw_desc *desc; | 828 | struct dw_desc *desc; |
866 | u32 len, dlen, mem; | 829 | u32 len, dlen, mem; |
@@ -868,17 +831,16 @@ slave_sg_todev_fill_desc: | |||
868 | mem = sg_dma_address(sg); | 831 | mem = sg_dma_address(sg); |
869 | len = sg_dma_len(sg); | 832 | len = sg_dma_len(sg); |
870 | 833 | ||
871 | mem_width = min_t(unsigned int, | 834 | mem_width = __ffs(data_width | mem | len); |
872 | data_width, dwc_fast_ffs(mem | len)); | ||
873 | 835 | ||
874 | slave_sg_fromdev_fill_desc: | 836 | slave_sg_fromdev_fill_desc: |
875 | desc = dwc_desc_get(dwc); | 837 | desc = dwc_desc_get(dwc); |
876 | if (!desc) | 838 | if (!desc) |
877 | goto err_desc_get; | 839 | goto err_desc_get; |
878 | 840 | ||
879 | desc->lli.sar = reg; | 841 | lli_write(desc, sar, reg); |
880 | desc->lli.dar = mem; | 842 | lli_write(desc, dar, mem); |
881 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 843 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); |
882 | if ((len >> reg_width) > dwc->block_size) { | 844 | if ((len >> reg_width) > dwc->block_size) { |
883 | dlen = dwc->block_size << reg_width; | 845 | dlen = dwc->block_size << reg_width; |
884 | mem += dlen; | 846 | mem += dlen; |
@@ -887,15 +849,14 @@ slave_sg_fromdev_fill_desc: | |||
887 | dlen = len; | 849 | dlen = len; |
888 | len = 0; | 850 | len = 0; |
889 | } | 851 | } |
890 | desc->lli.ctlhi = dlen >> reg_width; | 852 | lli_write(desc, ctlhi, dlen >> reg_width); |
891 | desc->len = dlen; | 853 | desc->len = dlen; |
892 | 854 | ||
893 | if (!first) { | 855 | if (!first) { |
894 | first = desc; | 856 | first = desc; |
895 | } else { | 857 | } else { |
896 | prev->lli.llp = desc->txd.phys; | 858 | lli_write(prev, llp, desc->txd.phys | lms); |
897 | list_add_tail(&desc->desc_node, | 859 | list_add_tail(&desc->desc_node, &first->tx_list); |
898 | &first->tx_list); | ||
899 | } | 860 | } |
900 | prev = desc; | 861 | prev = desc; |
901 | total_len += dlen; | 862 | total_len += dlen; |
@@ -910,9 +871,10 @@ slave_sg_fromdev_fill_desc: | |||
910 | 871 | ||
911 | if (flags & DMA_PREP_INTERRUPT) | 872 | if (flags & DMA_PREP_INTERRUPT) |
912 | /* Trigger interrupt after last block */ | 873 | /* Trigger interrupt after last block */ |
913 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 874 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
914 | 875 | ||
915 | prev->lli.llp = 0; | 876 | prev->lli.llp = 0; |
877 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
916 | first->total_len = total_len; | 878 | first->total_len = total_len; |
917 | 879 | ||
918 | return &first->txd; | 880 | return &first->txd; |
@@ -937,8 +899,8 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) | |||
937 | dwc->src_id = dws->src_id; | 899 | dwc->src_id = dws->src_id; |
938 | dwc->dst_id = dws->dst_id; | 900 | dwc->dst_id = dws->dst_id; |
939 | 901 | ||
940 | dwc->src_master = dws->src_master; | 902 | dwc->m_master = dws->m_master; |
941 | dwc->dst_master = dws->dst_master; | 903 | dwc->p_master = dws->p_master; |
942 | 904 | ||
943 | return true; | 905 | return true; |
944 | } | 906 | } |
@@ -991,7 +953,7 @@ static int dwc_pause(struct dma_chan *chan) | |||
991 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) | 953 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
992 | udelay(2); | 954 | udelay(2); |
993 | 955 | ||
994 | dwc->paused = true; | 956 | set_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
995 | 957 | ||
996 | spin_unlock_irqrestore(&dwc->lock, flags); | 958 | spin_unlock_irqrestore(&dwc->lock, flags); |
997 | 959 | ||
@@ -1004,7 +966,7 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | |||
1004 | 966 | ||
1005 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | 967 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); |
1006 | 968 | ||
1007 | dwc->paused = false; | 969 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
1008 | } | 970 | } |
1009 | 971 | ||
1010 | static int dwc_resume(struct dma_chan *chan) | 972 | static int dwc_resume(struct dma_chan *chan) |
@@ -1012,12 +974,10 @@ static int dwc_resume(struct dma_chan *chan) | |||
1012 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 974 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1013 | unsigned long flags; | 975 | unsigned long flags; |
1014 | 976 | ||
1015 | if (!dwc->paused) | ||
1016 | return 0; | ||
1017 | |||
1018 | spin_lock_irqsave(&dwc->lock, flags); | 977 | spin_lock_irqsave(&dwc->lock, flags); |
1019 | 978 | ||
1020 | dwc_chan_resume(dwc); | 979 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
980 | dwc_chan_resume(dwc); | ||
1021 | 981 | ||
1022 | spin_unlock_irqrestore(&dwc->lock, flags); | 982 | spin_unlock_irqrestore(&dwc->lock, flags); |
1023 | 983 | ||
@@ -1053,16 +1013,37 @@ static int dwc_terminate_all(struct dma_chan *chan) | |||
1053 | return 0; | 1013 | return 0; |
1054 | } | 1014 | } |
1055 | 1015 | ||
1056 | static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) | 1016 | static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) |
1057 | { | 1017 | { |
1018 | struct dw_desc *desc; | ||
1019 | |||
1020 | list_for_each_entry(desc, &dwc->active_list, desc_node) | ||
1021 | if (desc->txd.cookie == c) | ||
1022 | return desc; | ||
1023 | |||
1024 | return NULL; | ||
1025 | } | ||
1026 | |||
1027 | static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie) | ||
1028 | { | ||
1029 | struct dw_desc *desc; | ||
1058 | unsigned long flags; | 1030 | unsigned long flags; |
1059 | u32 residue; | 1031 | u32 residue; |
1060 | 1032 | ||
1061 | spin_lock_irqsave(&dwc->lock, flags); | 1033 | spin_lock_irqsave(&dwc->lock, flags); |
1062 | 1034 | ||
1063 | residue = dwc->residue; | 1035 | desc = dwc_find_desc(dwc, cookie); |
1064 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | 1036 | if (desc) { |
1065 | residue -= dwc_get_sent(dwc); | 1037 | if (desc == dwc_first_active(dwc)) { |
1038 | residue = desc->residue; | ||
1039 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | ||
1040 | residue -= dwc_get_sent(dwc); | ||
1041 | } else { | ||
1042 | residue = desc->total_len; | ||
1043 | } | ||
1044 | } else { | ||
1045 | residue = 0; | ||
1046 | } | ||
1066 | 1047 | ||
1067 | spin_unlock_irqrestore(&dwc->lock, flags); | 1048 | spin_unlock_irqrestore(&dwc->lock, flags); |
1068 | return residue; | 1049 | return residue; |
@@ -1083,10 +1064,12 @@ dwc_tx_status(struct dma_chan *chan, | |||
1083 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 1064 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
1084 | 1065 | ||
1085 | ret = dma_cookie_status(chan, cookie, txstate); | 1066 | ret = dma_cookie_status(chan, cookie, txstate); |
1086 | if (ret != DMA_COMPLETE) | 1067 | if (ret == DMA_COMPLETE) |
1087 | dma_set_residue(txstate, dwc_get_residue(dwc)); | 1068 | return ret; |
1069 | |||
1070 | dma_set_residue(txstate, dwc_get_residue(dwc, cookie)); | ||
1088 | 1071 | ||
1089 | if (dwc->paused && ret == DMA_IN_PROGRESS) | 1072 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS) |
1090 | return DMA_PAUSED; | 1073 | return DMA_PAUSED; |
1091 | 1074 | ||
1092 | return ret; | 1075 | return ret; |
@@ -1107,7 +1090,7 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
1107 | 1090 | ||
1108 | static void dw_dma_off(struct dw_dma *dw) | 1091 | static void dw_dma_off(struct dw_dma *dw) |
1109 | { | 1092 | { |
1110 | int i; | 1093 | unsigned int i; |
1111 | 1094 | ||
1112 | dma_writel(dw, CFG, 0); | 1095 | dma_writel(dw, CFG, 0); |
1113 | 1096 | ||
@@ -1121,7 +1104,7 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1121 | cpu_relax(); | 1104 | cpu_relax(); |
1122 | 1105 | ||
1123 | for (i = 0; i < dw->dma.chancnt; i++) | 1106 | for (i = 0; i < dw->dma.chancnt; i++) |
1124 | dw->chan[i].initialized = false; | 1107 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); |
1125 | } | 1108 | } |
1126 | 1109 | ||
1127 | static void dw_dma_on(struct dw_dma *dw) | 1110 | static void dw_dma_on(struct dw_dma *dw) |
@@ -1133,9 +1116,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1133 | { | 1116 | { |
1134 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1117 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1135 | struct dw_dma *dw = to_dw_dma(chan->device); | 1118 | struct dw_dma *dw = to_dw_dma(chan->device); |
1136 | struct dw_desc *desc; | ||
1137 | int i; | ||
1138 | unsigned long flags; | ||
1139 | 1119 | ||
1140 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 1120 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1141 | 1121 | ||
@@ -1166,48 +1146,13 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1166 | dw_dma_on(dw); | 1146 | dw_dma_on(dw); |
1167 | dw->in_use |= dwc->mask; | 1147 | dw->in_use |= dwc->mask; |
1168 | 1148 | ||
1169 | spin_lock_irqsave(&dwc->lock, flags); | 1149 | return 0; |
1170 | i = dwc->descs_allocated; | ||
1171 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | ||
1172 | dma_addr_t phys; | ||
1173 | |||
1174 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1175 | |||
1176 | desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); | ||
1177 | if (!desc) | ||
1178 | goto err_desc_alloc; | ||
1179 | |||
1180 | memset(desc, 0, sizeof(struct dw_desc)); | ||
1181 | |||
1182 | INIT_LIST_HEAD(&desc->tx_list); | ||
1183 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
1184 | desc->txd.tx_submit = dwc_tx_submit; | ||
1185 | desc->txd.flags = DMA_CTRL_ACK; | ||
1186 | desc->txd.phys = phys; | ||
1187 | |||
1188 | dwc_desc_put(dwc, desc); | ||
1189 | |||
1190 | spin_lock_irqsave(&dwc->lock, flags); | ||
1191 | i = ++dwc->descs_allocated; | ||
1192 | } | ||
1193 | |||
1194 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1195 | |||
1196 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | ||
1197 | |||
1198 | return i; | ||
1199 | |||
1200 | err_desc_alloc: | ||
1201 | dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); | ||
1202 | |||
1203 | return i; | ||
1204 | } | 1150 | } |
1205 | 1151 | ||
1206 | static void dwc_free_chan_resources(struct dma_chan *chan) | 1152 | static void dwc_free_chan_resources(struct dma_chan *chan) |
1207 | { | 1153 | { |
1208 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1154 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1209 | struct dw_dma *dw = to_dw_dma(chan->device); | 1155 | struct dw_dma *dw = to_dw_dma(chan->device); |
1210 | struct dw_desc *desc, *_desc; | ||
1211 | unsigned long flags; | 1156 | unsigned long flags; |
1212 | LIST_HEAD(list); | 1157 | LIST_HEAD(list); |
1213 | 1158 | ||
@@ -1220,17 +1165,15 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1220 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | 1165 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); |
1221 | 1166 | ||
1222 | spin_lock_irqsave(&dwc->lock, flags); | 1167 | spin_lock_irqsave(&dwc->lock, flags); |
1223 | list_splice_init(&dwc->free_list, &list); | ||
1224 | dwc->descs_allocated = 0; | ||
1225 | 1168 | ||
1226 | /* Clear custom channel configuration */ | 1169 | /* Clear custom channel configuration */ |
1227 | dwc->src_id = 0; | 1170 | dwc->src_id = 0; |
1228 | dwc->dst_id = 0; | 1171 | dwc->dst_id = 0; |
1229 | 1172 | ||
1230 | dwc->src_master = 0; | 1173 | dwc->m_master = 0; |
1231 | dwc->dst_master = 0; | 1174 | dwc->p_master = 0; |
1232 | 1175 | ||
1233 | dwc->initialized = false; | 1176 | clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); |
1234 | 1177 | ||
1235 | /* Disable interrupts */ | 1178 | /* Disable interrupts */ |
1236 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1179 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
@@ -1244,11 +1187,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1244 | if (!dw->in_use) | 1187 | if (!dw->in_use) |
1245 | dw_dma_off(dw); | 1188 | dw_dma_off(dw); |
1246 | 1189 | ||
1247 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | ||
1248 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | ||
1249 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | ||
1250 | } | ||
1251 | |||
1252 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1190 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1253 | } | 1191 | } |
1254 | 1192 | ||
@@ -1326,6 +1264,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1326 | struct dw_cyclic_desc *retval = NULL; | 1264 | struct dw_cyclic_desc *retval = NULL; |
1327 | struct dw_desc *desc; | 1265 | struct dw_desc *desc; |
1328 | struct dw_desc *last = NULL; | 1266 | struct dw_desc *last = NULL; |
1267 | u8 lms = DWC_LLP_LMS(dwc->m_master); | ||
1329 | unsigned long was_cyclic; | 1268 | unsigned long was_cyclic; |
1330 | unsigned int reg_width; | 1269 | unsigned int reg_width; |
1331 | unsigned int periods; | 1270 | unsigned int periods; |
@@ -1379,9 +1318,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1379 | 1318 | ||
1380 | retval = ERR_PTR(-ENOMEM); | 1319 | retval = ERR_PTR(-ENOMEM); |
1381 | 1320 | ||
1382 | if (periods > NR_DESCS_PER_CHANNEL) | ||
1383 | goto out_err; | ||
1384 | |||
1385 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | 1321 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); |
1386 | if (!cdesc) | 1322 | if (!cdesc) |
1387 | goto out_err; | 1323 | goto out_err; |
@@ -1397,50 +1333,50 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1397 | 1333 | ||
1398 | switch (direction) { | 1334 | switch (direction) { |
1399 | case DMA_MEM_TO_DEV: | 1335 | case DMA_MEM_TO_DEV: |
1400 | desc->lli.dar = sconfig->dst_addr; | 1336 | lli_write(desc, dar, sconfig->dst_addr); |
1401 | desc->lli.sar = buf_addr + (period_len * i); | 1337 | lli_write(desc, sar, buf_addr + period_len * i); |
1402 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | 1338 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) |
1403 | | DWC_CTLL_DST_WIDTH(reg_width) | 1339 | | DWC_CTLL_DST_WIDTH(reg_width) |
1404 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1340 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1405 | | DWC_CTLL_DST_FIX | 1341 | | DWC_CTLL_DST_FIX |
1406 | | DWC_CTLL_SRC_INC | 1342 | | DWC_CTLL_SRC_INC |
1407 | | DWC_CTLL_INT_EN); | 1343 | | DWC_CTLL_INT_EN)); |
1408 | 1344 | ||
1409 | desc->lli.ctllo |= sconfig->device_fc ? | 1345 | lli_set(desc, ctllo, sconfig->device_fc ? |
1410 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 1346 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
1411 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 1347 | DWC_CTLL_FC(DW_DMA_FC_D_M2P)); |
1412 | 1348 | ||
1413 | break; | 1349 | break; |
1414 | case DMA_DEV_TO_MEM: | 1350 | case DMA_DEV_TO_MEM: |
1415 | desc->lli.dar = buf_addr + (period_len * i); | 1351 | lli_write(desc, dar, buf_addr + period_len * i); |
1416 | desc->lli.sar = sconfig->src_addr; | 1352 | lli_write(desc, sar, sconfig->src_addr); |
1417 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | 1353 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) |
1418 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1354 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1419 | | DWC_CTLL_DST_WIDTH(reg_width) | 1355 | | DWC_CTLL_DST_WIDTH(reg_width) |
1420 | | DWC_CTLL_DST_INC | 1356 | | DWC_CTLL_DST_INC |
1421 | | DWC_CTLL_SRC_FIX | 1357 | | DWC_CTLL_SRC_FIX |
1422 | | DWC_CTLL_INT_EN); | 1358 | | DWC_CTLL_INT_EN)); |
1423 | 1359 | ||
1424 | desc->lli.ctllo |= sconfig->device_fc ? | 1360 | lli_set(desc, ctllo, sconfig->device_fc ? |
1425 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 1361 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
1426 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 1362 | DWC_CTLL_FC(DW_DMA_FC_D_P2M)); |
1427 | 1363 | ||
1428 | break; | 1364 | break; |
1429 | default: | 1365 | default: |
1430 | break; | 1366 | break; |
1431 | } | 1367 | } |
1432 | 1368 | ||
1433 | desc->lli.ctlhi = (period_len >> reg_width); | 1369 | lli_write(desc, ctlhi, period_len >> reg_width); |
1434 | cdesc->desc[i] = desc; | 1370 | cdesc->desc[i] = desc; |
1435 | 1371 | ||
1436 | if (last) | 1372 | if (last) |
1437 | last->lli.llp = desc->txd.phys; | 1373 | lli_write(last, llp, desc->txd.phys | lms); |
1438 | 1374 | ||
1439 | last = desc; | 1375 | last = desc; |
1440 | } | 1376 | } |
1441 | 1377 | ||
1442 | /* Let's make a cyclic list */ | 1378 | /* Let's make a cyclic list */ |
1443 | last->lli.llp = cdesc->desc[0]->txd.phys; | 1379 | lli_write(last, llp, cdesc->desc[0]->txd.phys | lms); |
1444 | 1380 | ||
1445 | dev_dbg(chan2dev(&dwc->chan), | 1381 | dev_dbg(chan2dev(&dwc->chan), |
1446 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", | 1382 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", |
@@ -1471,7 +1407,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1471 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1407 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1472 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1408 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1473 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | 1409 | struct dw_cyclic_desc *cdesc = dwc->cdesc; |
1474 | int i; | 1410 | unsigned int i; |
1475 | unsigned long flags; | 1411 | unsigned long flags; |
1476 | 1412 | ||
1477 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); | 1413 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
@@ -1495,32 +1431,38 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1495 | kfree(cdesc->desc); | 1431 | kfree(cdesc->desc); |
1496 | kfree(cdesc); | 1432 | kfree(cdesc); |
1497 | 1433 | ||
1434 | dwc->cdesc = NULL; | ||
1435 | |||
1498 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 1436 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1499 | } | 1437 | } |
1500 | EXPORT_SYMBOL(dw_dma_cyclic_free); | 1438 | EXPORT_SYMBOL(dw_dma_cyclic_free); |
1501 | 1439 | ||
1502 | /*----------------------------------------------------------------------*/ | 1440 | /*----------------------------------------------------------------------*/ |
1503 | 1441 | ||
1504 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | 1442 | int dw_dma_probe(struct dw_dma_chip *chip) |
1505 | { | 1443 | { |
1444 | struct dw_dma_platform_data *pdata; | ||
1506 | struct dw_dma *dw; | 1445 | struct dw_dma *dw; |
1507 | bool autocfg = false; | 1446 | bool autocfg = false; |
1508 | unsigned int dw_params; | 1447 | unsigned int dw_params; |
1509 | unsigned int max_blk_size = 0; | 1448 | unsigned int i; |
1510 | int err; | 1449 | int err; |
1511 | int i; | ||
1512 | 1450 | ||
1513 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | 1451 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); |
1514 | if (!dw) | 1452 | if (!dw) |
1515 | return -ENOMEM; | 1453 | return -ENOMEM; |
1516 | 1454 | ||
1455 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); | ||
1456 | if (!dw->pdata) | ||
1457 | return -ENOMEM; | ||
1458 | |||
1517 | dw->regs = chip->regs; | 1459 | dw->regs = chip->regs; |
1518 | chip->dw = dw; | 1460 | chip->dw = dw; |
1519 | 1461 | ||
1520 | pm_runtime_get_sync(chip->dev); | 1462 | pm_runtime_get_sync(chip->dev); |
1521 | 1463 | ||
1522 | if (!pdata) { | 1464 | if (!chip->pdata) { |
1523 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | 1465 | dw_params = dma_readl(dw, DW_PARAMS); |
1524 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); | 1466 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); |
1525 | 1467 | ||
1526 | autocfg = dw_params >> DW_PARAMS_EN & 1; | 1468 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
@@ -1529,29 +1471,31 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1529 | goto err_pdata; | 1471 | goto err_pdata; |
1530 | } | 1472 | } |
1531 | 1473 | ||
1532 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); | 1474 | /* Reassign the platform data pointer */ |
1533 | if (!pdata) { | 1475 | pdata = dw->pdata; |
1534 | err = -ENOMEM; | ||
1535 | goto err_pdata; | ||
1536 | } | ||
1537 | 1476 | ||
1538 | /* Get hardware configuration parameters */ | 1477 | /* Get hardware configuration parameters */ |
1539 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | 1478 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; |
1540 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | 1479 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; |
1541 | for (i = 0; i < pdata->nr_masters; i++) { | 1480 | for (i = 0; i < pdata->nr_masters; i++) { |
1542 | pdata->data_width[i] = | 1481 | pdata->data_width[i] = |
1543 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; | 1482 | 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); |
1544 | } | 1483 | } |
1545 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | 1484 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); |
1546 | 1485 | ||
1547 | /* Fill platform data with the default values */ | 1486 | /* Fill platform data with the default values */ |
1548 | pdata->is_private = true; | 1487 | pdata->is_private = true; |
1549 | pdata->is_memcpy = true; | 1488 | pdata->is_memcpy = true; |
1550 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | 1489 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1551 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | 1490 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; |
1552 | } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { | 1491 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
1553 | err = -EINVAL; | 1492 | err = -EINVAL; |
1554 | goto err_pdata; | 1493 | goto err_pdata; |
1494 | } else { | ||
1495 | memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); | ||
1496 | |||
1497 | /* Reassign the platform data pointer */ | ||
1498 | pdata = dw->pdata; | ||
1555 | } | 1499 | } |
1556 | 1500 | ||
1557 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), | 1501 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), |
@@ -1561,11 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1561 | goto err_pdata; | 1505 | goto err_pdata; |
1562 | } | 1506 | } |
1563 | 1507 | ||
1564 | /* Get hardware configuration parameters */ | ||
1565 | dw->nr_masters = pdata->nr_masters; | ||
1566 | for (i = 0; i < dw->nr_masters; i++) | ||
1567 | dw->data_width[i] = pdata->data_width[i]; | ||
1568 | |||
1569 | /* Calculate all channel mask before DMA setup */ | 1508 | /* Calculate all channel mask before DMA setup */ |
1570 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1509 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1571 | 1510 | ||
@@ -1612,7 +1551,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1612 | 1551 | ||
1613 | INIT_LIST_HEAD(&dwc->active_list); | 1552 | INIT_LIST_HEAD(&dwc->active_list); |
1614 | INIT_LIST_HEAD(&dwc->queue); | 1553 | INIT_LIST_HEAD(&dwc->queue); |
1615 | INIT_LIST_HEAD(&dwc->free_list); | ||
1616 | 1554 | ||
1617 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1555 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1618 | 1556 | ||
@@ -1620,11 +1558,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1620 | 1558 | ||
1621 | /* Hardware configuration */ | 1559 | /* Hardware configuration */ |
1622 | if (autocfg) { | 1560 | if (autocfg) { |
1623 | unsigned int dwc_params; | ||
1624 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; | 1561 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; |
1625 | void __iomem *addr = chip->regs + r * sizeof(u32); | 1562 | void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; |
1626 | 1563 | unsigned int dwc_params = dma_readl_native(addr); | |
1627 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); | ||
1628 | 1564 | ||
1629 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, | 1565 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, |
1630 | dwc_params); | 1566 | dwc_params); |
@@ -1635,16 +1571,15 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1635 | * up to 0x0a for 4095. | 1571 | * up to 0x0a for 4095. |
1636 | */ | 1572 | */ |
1637 | dwc->block_size = | 1573 | dwc->block_size = |
1638 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | 1574 | (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; |
1639 | dwc->nollp = | 1575 | dwc->nollp = |
1640 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | 1576 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; |
1641 | } else { | 1577 | } else { |
1642 | dwc->block_size = pdata->block_size; | 1578 | dwc->block_size = pdata->block_size; |
1643 | 1579 | ||
1644 | /* Check if channel supports multi block transfer */ | 1580 | /* Check if channel supports multi block transfer */ |
1645 | channel_writel(dwc, LLP, 0xfffffffc); | 1581 | channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff)); |
1646 | dwc->nollp = | 1582 | dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0; |
1647 | (channel_readl(dwc, LLP) & 0xfffffffc) == 0; | ||
1648 | channel_writel(dwc, LLP, 0); | 1583 | channel_writel(dwc, LLP, 0); |
1649 | } | 1584 | } |
1650 | } | 1585 | } |
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 358f9689a3f5..0ae6c3b1d34e 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -17,8 +17,8 @@ | |||
17 | 17 | ||
18 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | 18 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) |
19 | { | 19 | { |
20 | const struct dw_dma_platform_data *pdata = (void *)pid->driver_data; | ||
20 | struct dw_dma_chip *chip; | 21 | struct dw_dma_chip *chip; |
21 | struct dw_dma_platform_data *pdata = (void *)pid->driver_data; | ||
22 | int ret; | 22 | int ret; |
23 | 23 | ||
24 | ret = pcim_enable_device(pdev); | 24 | ret = pcim_enable_device(pdev); |
@@ -49,8 +49,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
49 | chip->dev = &pdev->dev; | 49 | chip->dev = &pdev->dev; |
50 | chip->regs = pcim_iomap_table(pdev)[0]; | 50 | chip->regs = pcim_iomap_table(pdev)[0]; |
51 | chip->irq = pdev->irq; | 51 | chip->irq = pdev->irq; |
52 | chip->pdata = pdata; | ||
52 | 53 | ||
53 | ret = dw_dma_probe(chip, pdata); | 54 | ret = dw_dma_probe(chip); |
54 | if (ret) | 55 | if (ret) |
55 | return ret; | 56 | return ret; |
56 | 57 | ||
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 26edbe3a27ac..5bda0eb9f393 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -42,13 +42,13 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | |||
42 | 42 | ||
43 | slave.src_id = dma_spec->args[0]; | 43 | slave.src_id = dma_spec->args[0]; |
44 | slave.dst_id = dma_spec->args[0]; | 44 | slave.dst_id = dma_spec->args[0]; |
45 | slave.src_master = dma_spec->args[1]; | 45 | slave.m_master = dma_spec->args[1]; |
46 | slave.dst_master = dma_spec->args[2]; | 46 | slave.p_master = dma_spec->args[2]; |
47 | 47 | ||
48 | if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || | 48 | if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || |
49 | slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || | 49 | slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || |
50 | slave.src_master >= dw->nr_masters || | 50 | slave.m_master >= dw->pdata->nr_masters || |
51 | slave.dst_master >= dw->nr_masters)) | 51 | slave.p_master >= dw->pdata->nr_masters)) |
52 | return NULL; | 52 | return NULL; |
53 | 53 | ||
54 | dma_cap_zero(cap); | 54 | dma_cap_zero(cap); |
@@ -66,8 +66,8 @@ static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) | |||
66 | .dma_dev = dma_spec->dev, | 66 | .dma_dev = dma_spec->dev, |
67 | .src_id = dma_spec->slave_id, | 67 | .src_id = dma_spec->slave_id, |
68 | .dst_id = dma_spec->slave_id, | 68 | .dst_id = dma_spec->slave_id, |
69 | .src_master = 1, | 69 | .m_master = 0, |
70 | .dst_master = 0, | 70 | .p_master = 1, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | return dw_dma_filter(chan, &slave); | 73 | return dw_dma_filter(chan, &slave); |
@@ -103,6 +103,7 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
103 | struct device_node *np = pdev->dev.of_node; | 103 | struct device_node *np = pdev->dev.of_node; |
104 | struct dw_dma_platform_data *pdata; | 104 | struct dw_dma_platform_data *pdata; |
105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; | 105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; |
106 | u32 nr_masters; | ||
106 | u32 nr_channels; | 107 | u32 nr_channels; |
107 | 108 | ||
108 | if (!np) { | 109 | if (!np) { |
@@ -110,6 +111,11 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
110 | return NULL; | 111 | return NULL; |
111 | } | 112 | } |
112 | 113 | ||
114 | if (of_property_read_u32(np, "dma-masters", &nr_masters)) | ||
115 | return NULL; | ||
116 | if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS) | ||
117 | return NULL; | ||
118 | |||
113 | if (of_property_read_u32(np, "dma-channels", &nr_channels)) | 119 | if (of_property_read_u32(np, "dma-channels", &nr_channels)) |
114 | return NULL; | 120 | return NULL; |
115 | 121 | ||
@@ -117,6 +123,7 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
117 | if (!pdata) | 123 | if (!pdata) |
118 | return NULL; | 124 | return NULL; |
119 | 125 | ||
126 | pdata->nr_masters = nr_masters; | ||
120 | pdata->nr_channels = nr_channels; | 127 | pdata->nr_channels = nr_channels; |
121 | 128 | ||
122 | if (of_property_read_bool(np, "is_private")) | 129 | if (of_property_read_bool(np, "is_private")) |
@@ -131,17 +138,13 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
131 | if (!of_property_read_u32(np, "block_size", &tmp)) | 138 | if (!of_property_read_u32(np, "block_size", &tmp)) |
132 | pdata->block_size = tmp; | 139 | pdata->block_size = tmp; |
133 | 140 | ||
134 | if (!of_property_read_u32(np, "dma-masters", &tmp)) { | 141 | if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) { |
135 | if (tmp > DW_DMA_MAX_NR_MASTERS) | 142 | for (tmp = 0; tmp < nr_masters; tmp++) |
136 | return NULL; | ||
137 | |||
138 | pdata->nr_masters = tmp; | ||
139 | } | ||
140 | |||
141 | if (!of_property_read_u32_array(np, "data_width", arr, | ||
142 | pdata->nr_masters)) | ||
143 | for (tmp = 0; tmp < pdata->nr_masters; tmp++) | ||
144 | pdata->data_width[tmp] = arr[tmp]; | 143 | pdata->data_width[tmp] = arr[tmp]; |
144 | } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) { | ||
145 | for (tmp = 0; tmp < nr_masters; tmp++) | ||
146 | pdata->data_width[tmp] = BIT(arr[tmp] & 0x07); | ||
147 | } | ||
145 | 148 | ||
146 | return pdata; | 149 | return pdata; |
147 | } | 150 | } |
@@ -158,7 +161,7 @@ static int dw_probe(struct platform_device *pdev) | |||
158 | struct dw_dma_chip *chip; | 161 | struct dw_dma_chip *chip; |
159 | struct device *dev = &pdev->dev; | 162 | struct device *dev = &pdev->dev; |
160 | struct resource *mem; | 163 | struct resource *mem; |
161 | struct dw_dma_platform_data *pdata; | 164 | const struct dw_dma_platform_data *pdata; |
162 | int err; | 165 | int err; |
163 | 166 | ||
164 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); | 167 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); |
@@ -183,6 +186,7 @@ static int dw_probe(struct platform_device *pdev) | |||
183 | pdata = dw_dma_parse_dt(pdev); | 186 | pdata = dw_dma_parse_dt(pdev); |
184 | 187 | ||
185 | chip->dev = dev; | 188 | chip->dev = dev; |
189 | chip->pdata = pdata; | ||
186 | 190 | ||
187 | chip->clk = devm_clk_get(chip->dev, "hclk"); | 191 | chip->clk = devm_clk_get(chip->dev, "hclk"); |
188 | if (IS_ERR(chip->clk)) | 192 | if (IS_ERR(chip->clk)) |
@@ -193,7 +197,7 @@ static int dw_probe(struct platform_device *pdev) | |||
193 | 197 | ||
194 | pm_runtime_enable(&pdev->dev); | 198 | pm_runtime_enable(&pdev->dev); |
195 | 199 | ||
196 | err = dw_dma_probe(chip, pdata); | 200 | err = dw_dma_probe(chip); |
197 | if (err) | 201 | if (err) |
198 | goto err_dw_dma_probe; | 202 | goto err_dw_dma_probe; |
199 | 203 | ||
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 0a50c18d85b8..4b7bd7834046 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -114,10 +114,6 @@ struct dw_dma_regs { | |||
114 | #define dma_writel_native writel | 114 | #define dma_writel_native writel |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | /* To access the registers in early stage of probe */ | ||
118 | #define dma_read_byaddr(addr, name) \ | ||
119 | dma_readl_native((addr) + offsetof(struct dw_dma_regs, name)) | ||
120 | |||
121 | /* Bitfields in DW_PARAMS */ | 117 | /* Bitfields in DW_PARAMS */ |
122 | #define DW_PARAMS_NR_CHAN 8 /* number of channels */ | 118 | #define DW_PARAMS_NR_CHAN 8 /* number of channels */ |
123 | #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ | 119 | #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ |
@@ -143,6 +139,10 @@ enum dw_dma_msize { | |||
143 | DW_DMA_MSIZE_256, | 139 | DW_DMA_MSIZE_256, |
144 | }; | 140 | }; |
145 | 141 | ||
142 | /* Bitfields in LLP */ | ||
143 | #define DWC_LLP_LMS(x) ((x) & 3) /* list master select */ | ||
144 | #define DWC_LLP_LOC(x) ((x) & ~3) /* next lli */ | ||
145 | |||
146 | /* Bitfields in CTL_LO */ | 146 | /* Bitfields in CTL_LO */ |
147 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | 147 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ |
148 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | 148 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ |
@@ -216,6 +216,8 @@ enum dw_dma_msize { | |||
216 | enum dw_dmac_flags { | 216 | enum dw_dmac_flags { |
217 | DW_DMA_IS_CYCLIC = 0, | 217 | DW_DMA_IS_CYCLIC = 0, |
218 | DW_DMA_IS_SOFT_LLP = 1, | 218 | DW_DMA_IS_SOFT_LLP = 1, |
219 | DW_DMA_IS_PAUSED = 2, | ||
220 | DW_DMA_IS_INITIALIZED = 3, | ||
219 | }; | 221 | }; |
220 | 222 | ||
221 | struct dw_dma_chan { | 223 | struct dw_dma_chan { |
@@ -224,8 +226,6 @@ struct dw_dma_chan { | |||
224 | u8 mask; | 226 | u8 mask; |
225 | u8 priority; | 227 | u8 priority; |
226 | enum dma_transfer_direction direction; | 228 | enum dma_transfer_direction direction; |
227 | bool paused; | ||
228 | bool initialized; | ||
229 | 229 | ||
230 | /* software emulation of the LLP transfers */ | 230 | /* software emulation of the LLP transfers */ |
231 | struct list_head *tx_node_active; | 231 | struct list_head *tx_node_active; |
@@ -236,8 +236,6 @@ struct dw_dma_chan { | |||
236 | unsigned long flags; | 236 | unsigned long flags; |
237 | struct list_head active_list; | 237 | struct list_head active_list; |
238 | struct list_head queue; | 238 | struct list_head queue; |
239 | struct list_head free_list; | ||
240 | u32 residue; | ||
241 | struct dw_cyclic_desc *cdesc; | 239 | struct dw_cyclic_desc *cdesc; |
242 | 240 | ||
243 | unsigned int descs_allocated; | 241 | unsigned int descs_allocated; |
@@ -249,8 +247,8 @@ struct dw_dma_chan { | |||
249 | /* custom slave configuration */ | 247 | /* custom slave configuration */ |
250 | u8 src_id; | 248 | u8 src_id; |
251 | u8 dst_id; | 249 | u8 dst_id; |
252 | u8 src_master; | 250 | u8 m_master; |
253 | u8 dst_master; | 251 | u8 p_master; |
254 | 252 | ||
255 | /* configuration passed via .device_config */ | 253 | /* configuration passed via .device_config */ |
256 | struct dma_slave_config dma_sconfig; | 254 | struct dma_slave_config dma_sconfig; |
@@ -283,9 +281,8 @@ struct dw_dma { | |||
283 | u8 all_chan_mask; | 281 | u8 all_chan_mask; |
284 | u8 in_use; | 282 | u8 in_use; |
285 | 283 | ||
286 | /* hardware configuration */ | 284 | /* platform data */ |
287 | unsigned char nr_masters; | 285 | struct dw_dma_platform_data *pdata; |
288 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; | ||
289 | }; | 286 | }; |
290 | 287 | ||
291 | static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) | 288 | static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) |
@@ -308,32 +305,51 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) | |||
308 | return container_of(ddev, struct dw_dma, dma); | 305 | return container_of(ddev, struct dw_dma, dma); |
309 | } | 306 | } |
310 | 307 | ||
308 | #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO | ||
309 | typedef __be32 __dw32; | ||
310 | #else | ||
311 | typedef __le32 __dw32; | ||
312 | #endif | ||
313 | |||
311 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ | 314 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ |
312 | struct dw_lli { | 315 | struct dw_lli { |
313 | /* values that are not changed by hardware */ | 316 | /* values that are not changed by hardware */ |
314 | u32 sar; | 317 | __dw32 sar; |
315 | u32 dar; | 318 | __dw32 dar; |
316 | u32 llp; /* chain to next lli */ | 319 | __dw32 llp; /* chain to next lli */ |
317 | u32 ctllo; | 320 | __dw32 ctllo; |
318 | /* values that may get written back: */ | 321 | /* values that may get written back: */ |
319 | u32 ctlhi; | 322 | __dw32 ctlhi; |
320 | /* sstat and dstat can snapshot peripheral register state. | 323 | /* sstat and dstat can snapshot peripheral register state. |
321 | * silicon config may discard either or both... | 324 | * silicon config may discard either or both... |
322 | */ | 325 | */ |
323 | u32 sstat; | 326 | __dw32 sstat; |
324 | u32 dstat; | 327 | __dw32 dstat; |
325 | }; | 328 | }; |
326 | 329 | ||
327 | struct dw_desc { | 330 | struct dw_desc { |
328 | /* FIRST values the hardware uses */ | 331 | /* FIRST values the hardware uses */ |
329 | struct dw_lli lli; | 332 | struct dw_lli lli; |
330 | 333 | ||
334 | #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO | ||
335 | #define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_be32(v)) | ||
336 | #define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_be32(v)) | ||
337 | #define lli_read(d, reg) be32_to_cpu((d)->lli.reg) | ||
338 | #define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_be32(v)) | ||
339 | #else | ||
340 | #define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v)) | ||
341 | #define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v)) | ||
342 | #define lli_read(d, reg) le32_to_cpu((d)->lli.reg) | ||
343 | #define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v)) | ||
344 | #endif | ||
345 | |||
331 | /* THEN values for driver housekeeping */ | 346 | /* THEN values for driver housekeeping */ |
332 | struct list_head desc_node; | 347 | struct list_head desc_node; |
333 | struct list_head tx_list; | 348 | struct list_head tx_list; |
334 | struct dma_async_tx_descriptor txd; | 349 | struct dma_async_tx_descriptor txd; |
335 | size_t len; | 350 | size_t len; |
336 | size_t total_len; | 351 | size_t total_len; |
352 | u32 residue; | ||
337 | }; | 353 | }; |
338 | 354 | ||
339 | #define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) | 355 | #define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 04070baab78a..8181ed131386 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -1537,8 +1537,17 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data) | |||
1537 | 1537 | ||
1538 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); | 1538 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); |
1539 | 1539 | ||
1540 | if (!edma_error_pending(ecc)) | 1540 | if (!edma_error_pending(ecc)) { |
1541 | /* | ||
1542 | * The registers indicate no pending error event but the irq | ||
1543 | * handler has been called. | ||
1544 | * Ask eDMA to re-evaluate the error registers. | ||
1545 | */ | ||
1546 | dev_err(ecc->dev, "%s: Error interrupt without error event!\n", | ||
1547 | __func__); | ||
1548 | edma_write(ecc, EDMA_EEVAL, 1); | ||
1541 | return IRQ_NONE; | 1549 | return IRQ_NONE; |
1550 | } | ||
1542 | 1551 | ||
1543 | while (1) { | 1552 | while (1) { |
1544 | /* Event missed register(s) */ | 1553 | /* Event missed register(s) */ |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index aac85c30c2cf..a8828ed639b3 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -462,13 +462,12 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) | |||
462 | struct fsl_desc_sw *desc; | 462 | struct fsl_desc_sw *desc; |
463 | dma_addr_t pdesc; | 463 | dma_addr_t pdesc; |
464 | 464 | ||
465 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 465 | desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
466 | if (!desc) { | 466 | if (!desc) { |
467 | chan_dbg(chan, "out of memory for link descriptor\n"); | 467 | chan_dbg(chan, "out of memory for link descriptor\n"); |
468 | return NULL; | 468 | return NULL; |
469 | } | 469 | } |
470 | 470 | ||
471 | memset(desc, 0, sizeof(*desc)); | ||
472 | INIT_LIST_HEAD(&desc->tx_list); | 471 | INIT_LIST_HEAD(&desc->tx_list); |
473 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | 472 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); |
474 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | 473 | desc->async_tx.tx_submit = fsl_dma_tx_submit; |
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index ee510515ce18..f8c5cd53307c 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c | |||
@@ -77,8 +77,8 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) | |||
77 | hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); | 77 | hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); |
78 | 78 | ||
79 | /* Set descriptors */ | 79 | /* Set descriptors */ |
80 | count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC; | 80 | count = desc->nents - desc->active; |
81 | for (i = 0; i < count; i++) { | 81 | for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) { |
82 | hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); | 82 | hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); |
83 | hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); | 83 | hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); |
84 | 84 | ||
@@ -160,7 +160,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) | |||
160 | return IRQ_NONE; | 160 | return IRQ_NONE; |
161 | 161 | ||
162 | /* Timeout IRQ, need wait some time, see Errata 2 */ | 162 | /* Timeout IRQ, need wait some time, see Errata 2 */ |
163 | if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY)) | 163 | if (sr & HSU_CH_SR_DESCTO_ANY) |
164 | udelay(2); | 164 | udelay(2); |
165 | 165 | ||
166 | sr &= ~HSU_CH_SR_DESCTO_ANY; | 166 | sr &= ~HSU_CH_SR_DESCTO_ANY; |
@@ -420,6 +420,8 @@ int hsu_dma_probe(struct hsu_dma_chip *chip) | |||
420 | 420 | ||
421 | hsu->dma.dev = chip->dev; | 421 | hsu->dma.dev = chip->dev; |
422 | 422 | ||
423 | dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK); | ||
424 | |||
423 | ret = dma_async_device_register(&hsu->dma); | 425 | ret = dma_async_device_register(&hsu->dma); |
424 | if (ret) | 426 | if (ret) |
425 | return ret; | 427 | return ret; |
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h index 6b070c22b1df..486b023b3af0 100644 --- a/drivers/dma/hsu/hsu.h +++ b/drivers/dma/hsu/hsu.h | |||
@@ -58,6 +58,10 @@ | |||
58 | #define HSU_CH_DCR_CHEI BIT(23) | 58 | #define HSU_CH_DCR_CHEI BIT(23) |
59 | #define HSU_CH_DCR_CHTOI(x) BIT(24 + (x)) | 59 | #define HSU_CH_DCR_CHTOI(x) BIT(24 + (x)) |
60 | 60 | ||
61 | /* Bits in HSU_CH_DxTSR */ | ||
62 | #define HSU_CH_DxTSR_MASK GENMASK(15, 0) | ||
63 | #define HSU_CH_DxTSR_TSR(x) ((x) & HSU_CH_DxTSR_MASK) | ||
64 | |||
61 | struct hsu_dma_sg { | 65 | struct hsu_dma_sg { |
62 | dma_addr_t addr; | 66 | dma_addr_t addr; |
63 | unsigned int len; | 67 | unsigned int len; |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index efdee1a69fc4..d406056e8892 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -690,12 +690,11 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | |||
690 | /* allocate a completion writeback area */ | 690 | /* allocate a completion writeback area */ |
691 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | 691 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
692 | ioat_chan->completion = | 692 | ioat_chan->completion = |
693 | dma_pool_alloc(ioat_chan->ioat_dma->completion_pool, | 693 | dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, |
694 | GFP_KERNEL, &ioat_chan->completion_dma); | 694 | GFP_KERNEL, &ioat_chan->completion_dma); |
695 | if (!ioat_chan->completion) | 695 | if (!ioat_chan->completion) |
696 | return -ENOMEM; | 696 | return -ENOMEM; |
697 | 697 | ||
698 | memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); | ||
699 | writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, | 698 | writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, |
700 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | 699 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
701 | writel(((u64)ioat_chan->completion_dma) >> 32, | 700 | writel(((u64)ioat_chan->completion_dma) >> 32, |
@@ -1074,6 +1073,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1074 | struct ioatdma_chan *ioat_chan; | 1073 | struct ioatdma_chan *ioat_chan; |
1075 | bool is_raid_device = false; | 1074 | bool is_raid_device = false; |
1076 | int err; | 1075 | int err; |
1076 | u16 val16; | ||
1077 | 1077 | ||
1078 | dma = &ioat_dma->dma_dev; | 1078 | dma = &ioat_dma->dma_dev; |
1079 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; | 1079 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; |
@@ -1173,6 +1173,17 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1173 | if (dca) | 1173 | if (dca) |
1174 | ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); | 1174 | ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); |
1175 | 1175 | ||
1176 | /* disable relaxed ordering */ | ||
1177 | err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16); | ||
1178 | if (err) | ||
1179 | return err; | ||
1180 | |||
1181 | /* clear relaxed ordering enable */ | ||
1182 | val16 &= ~IOAT_DEVCTRL_ROE; | ||
1183 | err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16); | ||
1184 | if (err) | ||
1185 | return err; | ||
1186 | |||
1176 | return 0; | 1187 | return 0; |
1177 | } | 1188 | } |
1178 | 1189 | ||
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 4994a3623aee..70534981a49b 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -26,6 +26,13 @@ | |||
26 | #define IOAT_PCI_CHANERR_INT_OFFSET 0x180 | 26 | #define IOAT_PCI_CHANERR_INT_OFFSET 0x180 |
27 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 | 27 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 |
28 | 28 | ||
29 | /* PCIe config registers */ | ||
30 | |||
31 | /* EXPCAPID + N */ | ||
32 | #define IOAT_DEVCTRL_OFFSET 0x8 | ||
33 | /* relaxed ordering enable */ | ||
34 | #define IOAT_DEVCTRL_ROE 0x10 | ||
35 | |||
29 | /* MMIO Device Registers */ | 36 | /* MMIO Device Registers */ |
30 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ | 37 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ |
31 | 38 | ||
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index e39457f13d4d..56f1fd68b620 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -364,13 +364,12 @@ mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | |||
364 | struct mmp_pdma_desc_sw *desc; | 364 | struct mmp_pdma_desc_sw *desc; |
365 | dma_addr_t pdesc; | 365 | dma_addr_t pdesc; |
366 | 366 | ||
367 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 367 | desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
368 | if (!desc) { | 368 | if (!desc) { |
369 | dev_err(chan->dev, "out of memory for link descriptor\n"); | 369 | dev_err(chan->dev, "out of memory for link descriptor\n"); |
370 | return NULL; | 370 | return NULL; |
371 | } | 371 | } |
372 | 372 | ||
373 | memset(desc, 0, sizeof(*desc)); | ||
374 | INIT_LIST_HEAD(&desc->tx_list); | 373 | INIT_LIST_HEAD(&desc->tx_list); |
375 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | 374 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); |
376 | /* each desc has submit */ | 375 | /* each desc has submit */ |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index aae76fb39adc..ccadafa51d5e 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Copyright (C) Semihalf 2009 | 3 | * Copyright (C) Semihalf 2009 |
4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | 4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 |
5 | * Copyright (C) Alexander Popov, Promcontroller 2014 | 5 | * Copyright (C) Alexander Popov, Promcontroller 2014 |
6 | * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016 | ||
6 | * | 7 | * |
7 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | 8 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description |
8 | * (defines, structures and comments) was taken from MPC5121 DMA driver | 9 | * (defines, structures and comments) was taken from MPC5121 DMA driver |
@@ -26,18 +27,19 @@ | |||
26 | */ | 27 | */ |
27 | 28 | ||
28 | /* | 29 | /* |
29 | * MPC512x and MPC8308 DMA driver. It supports | 30 | * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers |
30 | * memory to memory data transfers (tested using dmatest module) and | 31 | * (tested using dmatest module) and data transfers between memory and |
31 | * data transfers between memory and peripheral I/O memory | 32 | * peripheral I/O memory by means of slave scatter/gather with these |
32 | * by means of slave scatter/gather with these limitations: | 33 | * limitations: |
33 | * - chunked transfers (described by s/g lists with more than one item) | 34 | * - chunked transfers (described by s/g lists with more than one item) are |
34 | * are refused as long as proper support for scatter/gather is missing; | 35 | * refused as long as proper support for scatter/gather is missing |
35 | * - transfers on MPC8308 always start from software as this SoC appears | 36 | * - transfers on MPC8308 always start from software as this SoC does not have |
36 | * not to have external request lines for peripheral flow control; | 37 | * external request lines for peripheral flow control |
37 | * - only peripheral devices with 4-byte FIFO access register are supported; | 38 | * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for |
38 | * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently | 39 | * MPC512x), and 32 bytes are supported, and, consequently, source |
39 | * source and destination addresses must be 4-byte aligned | 40 | * addresses and destination addresses must be aligned accordingly; |
40 | * and transfer size must be aligned on (4 * maxburst) boundary; | 41 | * furthermore, for MPC512x SoCs, the transfer size must be aligned on |
42 | * (chunk size * maxburst) | ||
41 | */ | 43 | */ |
42 | 44 | ||
43 | #include <linux/module.h> | 45 | #include <linux/module.h> |
@@ -213,8 +215,10 @@ struct mpc_dma_chan { | |||
213 | /* Settings for access to peripheral FIFO */ | 215 | /* Settings for access to peripheral FIFO */ |
214 | dma_addr_t src_per_paddr; | 216 | dma_addr_t src_per_paddr; |
215 | u32 src_tcd_nunits; | 217 | u32 src_tcd_nunits; |
218 | u8 swidth; | ||
216 | dma_addr_t dst_per_paddr; | 219 | dma_addr_t dst_per_paddr; |
217 | u32 dst_tcd_nunits; | 220 | u32 dst_tcd_nunits; |
221 | u8 dwidth; | ||
218 | 222 | ||
219 | /* Lock for this structure */ | 223 | /* Lock for this structure */ |
220 | spinlock_t lock; | 224 | spinlock_t lock; |
@@ -247,6 +251,7 @@ static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) | |||
247 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) | 251 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) |
248 | { | 252 | { |
249 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); | 253 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); |
254 | |||
250 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); | 255 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); |
251 | } | 256 | } |
252 | 257 | ||
@@ -254,9 +259,9 @@ static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) | |||
254 | * Execute all queued DMA descriptors. | 259 | * Execute all queued DMA descriptors. |
255 | * | 260 | * |
256 | * Following requirements must be met while calling mpc_dma_execute(): | 261 | * Following requirements must be met while calling mpc_dma_execute(): |
257 | * a) mchan->lock is acquired, | 262 | * a) mchan->lock is acquired, |
258 | * b) mchan->active list is empty, | 263 | * b) mchan->active list is empty, |
259 | * c) mchan->queued list contains at least one entry. | 264 | * c) mchan->queued list contains at least one entry. |
260 | */ | 265 | */ |
261 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) | 266 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) |
262 | { | 267 | { |
@@ -446,20 +451,15 @@ static void mpc_dma_tasklet(unsigned long data) | |||
446 | if (es & MPC_DMA_DMAES_SAE) | 451 | if (es & MPC_DMA_DMAES_SAE) |
447 | dev_err(mdma->dma.dev, "- Source Address Error\n"); | 452 | dev_err(mdma->dma.dev, "- Source Address Error\n"); |
448 | if (es & MPC_DMA_DMAES_SOE) | 453 | if (es & MPC_DMA_DMAES_SOE) |
449 | dev_err(mdma->dma.dev, "- Source Offset" | 454 | dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n"); |
450 | " Configuration Error\n"); | ||
451 | if (es & MPC_DMA_DMAES_DAE) | 455 | if (es & MPC_DMA_DMAES_DAE) |
452 | dev_err(mdma->dma.dev, "- Destination Address" | 456 | dev_err(mdma->dma.dev, "- Destination Address Error\n"); |
453 | " Error\n"); | ||
454 | if (es & MPC_DMA_DMAES_DOE) | 457 | if (es & MPC_DMA_DMAES_DOE) |
455 | dev_err(mdma->dma.dev, "- Destination Offset" | 458 | dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n"); |
456 | " Configuration Error\n"); | ||
457 | if (es & MPC_DMA_DMAES_NCE) | 459 | if (es & MPC_DMA_DMAES_NCE) |
458 | dev_err(mdma->dma.dev, "- NBytes/Citter" | 460 | dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n"); |
459 | " Configuration Error\n"); | ||
460 | if (es & MPC_DMA_DMAES_SGE) | 461 | if (es & MPC_DMA_DMAES_SGE) |
461 | dev_err(mdma->dma.dev, "- Scatter/Gather" | 462 | dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n"); |
462 | " Configuration Error\n"); | ||
463 | if (es & MPC_DMA_DMAES_SBE) | 463 | if (es & MPC_DMA_DMAES_SBE) |
464 | dev_err(mdma->dma.dev, "- Source Bus Error\n"); | 464 | dev_err(mdma->dma.dev, "- Source Bus Error\n"); |
465 | if (es & MPC_DMA_DMAES_DBE) | 465 | if (es & MPC_DMA_DMAES_DBE) |
@@ -518,8 +518,8 @@ static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) | |||
518 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { | 518 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { |
519 | mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); | 519 | mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); |
520 | if (!mdesc) { | 520 | if (!mdesc) { |
521 | dev_notice(mdma->dma.dev, "Memory allocation error. " | 521 | dev_notice(mdma->dma.dev, |
522 | "Allocated only %u descriptors\n", i); | 522 | "Memory allocation error. Allocated only %u descriptors\n", i); |
523 | break; | 523 | break; |
524 | } | 524 | } |
525 | 525 | ||
@@ -684,6 +684,15 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
684 | return &mdesc->desc; | 684 | return &mdesc->desc; |
685 | } | 685 | } |
686 | 686 | ||
687 | inline u8 buswidth_to_dmatsize(u8 buswidth) | ||
688 | { | ||
689 | u8 res; | ||
690 | |||
691 | for (res = 0; buswidth > 1; buswidth /= 2) | ||
692 | res++; | ||
693 | return res; | ||
694 | } | ||
695 | |||
687 | static struct dma_async_tx_descriptor * | 696 | static struct dma_async_tx_descriptor * |
688 | mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 697 | mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
689 | unsigned int sg_len, enum dma_transfer_direction direction, | 698 | unsigned int sg_len, enum dma_transfer_direction direction, |
@@ -742,39 +751,54 @@ mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
742 | 751 | ||
743 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); | 752 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); |
744 | 753 | ||
745 | if (!IS_ALIGNED(sg_dma_address(sg), 4)) | ||
746 | goto err_prep; | ||
747 | |||
748 | if (direction == DMA_DEV_TO_MEM) { | 754 | if (direction == DMA_DEV_TO_MEM) { |
749 | tcd->saddr = per_paddr; | 755 | tcd->saddr = per_paddr; |
750 | tcd->daddr = sg_dma_address(sg); | 756 | tcd->daddr = sg_dma_address(sg); |
757 | |||
758 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth)) | ||
759 | goto err_prep; | ||
760 | |||
751 | tcd->soff = 0; | 761 | tcd->soff = 0; |
752 | tcd->doff = 4; | 762 | tcd->doff = mchan->dwidth; |
753 | } else { | 763 | } else { |
754 | tcd->saddr = sg_dma_address(sg); | 764 | tcd->saddr = sg_dma_address(sg); |
755 | tcd->daddr = per_paddr; | 765 | tcd->daddr = per_paddr; |
756 | tcd->soff = 4; | 766 | |
767 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth)) | ||
768 | goto err_prep; | ||
769 | |||
770 | tcd->soff = mchan->swidth; | ||
757 | tcd->doff = 0; | 771 | tcd->doff = 0; |
758 | } | 772 | } |
759 | 773 | ||
760 | tcd->ssize = MPC_DMA_TSIZE_4; | 774 | tcd->ssize = buswidth_to_dmatsize(mchan->swidth); |
761 | tcd->dsize = MPC_DMA_TSIZE_4; | 775 | tcd->dsize = buswidth_to_dmatsize(mchan->dwidth); |
762 | 776 | ||
763 | len = sg_dma_len(sg); | 777 | if (mdma->is_mpc8308) { |
764 | tcd->nbytes = tcd_nunits * 4; | 778 | tcd->nbytes = sg_dma_len(sg); |
765 | if (!IS_ALIGNED(len, tcd->nbytes)) | 779 | if (!IS_ALIGNED(tcd->nbytes, mchan->swidth)) |
766 | goto err_prep; | 780 | goto err_prep; |
767 | 781 | ||
768 | iter = len / tcd->nbytes; | 782 | /* No major loops for MPC8303 */ |
769 | if (iter >= 1 << 15) { | 783 | tcd->biter = 1; |
770 | /* len is too big */ | 784 | tcd->citer = 1; |
771 | goto err_prep; | 785 | } else { |
786 | len = sg_dma_len(sg); | ||
787 | tcd->nbytes = tcd_nunits * tcd->ssize; | ||
788 | if (!IS_ALIGNED(len, tcd->nbytes)) | ||
789 | goto err_prep; | ||
790 | |||
791 | iter = len / tcd->nbytes; | ||
792 | if (iter >= 1 << 15) { | ||
793 | /* len is too big */ | ||
794 | goto err_prep; | ||
795 | } | ||
796 | /* citer_linkch contains the high bits of iter */ | ||
797 | tcd->biter = iter & 0x1ff; | ||
798 | tcd->biter_linkch = iter >> 9; | ||
799 | tcd->citer = tcd->biter; | ||
800 | tcd->citer_linkch = tcd->biter_linkch; | ||
772 | } | 801 | } |
773 | /* citer_linkch contains the high bits of iter */ | ||
774 | tcd->biter = iter & 0x1ff; | ||
775 | tcd->biter_linkch = iter >> 9; | ||
776 | tcd->citer = tcd->biter; | ||
777 | tcd->citer_linkch = tcd->biter_linkch; | ||
778 | 802 | ||
779 | tcd->e_sg = 0; | 803 | tcd->e_sg = 0; |
780 | tcd->d_req = 1; | 804 | tcd->d_req = 1; |
@@ -796,40 +820,62 @@ err_prep: | |||
796 | return NULL; | 820 | return NULL; |
797 | } | 821 | } |
798 | 822 | ||
823 | inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) | ||
824 | { | ||
825 | switch (buswidth) { | ||
826 | case 16: | ||
827 | if (is_mpc8308) | ||
828 | return false; | ||
829 | case 1: | ||
830 | case 2: | ||
831 | case 4: | ||
832 | case 32: | ||
833 | break; | ||
834 | default: | ||
835 | return false; | ||
836 | } | ||
837 | |||
838 | return true; | ||
839 | } | ||
840 | |||
799 | static int mpc_dma_device_config(struct dma_chan *chan, | 841 | static int mpc_dma_device_config(struct dma_chan *chan, |
800 | struct dma_slave_config *cfg) | 842 | struct dma_slave_config *cfg) |
801 | { | 843 | { |
802 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | 844 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
845 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); | ||
803 | unsigned long flags; | 846 | unsigned long flags; |
804 | 847 | ||
805 | /* | 848 | /* |
806 | * Software constraints: | 849 | * Software constraints: |
807 | * - only transfers between a peripheral device and | 850 | * - only transfers between a peripheral device and memory are |
808 | * memory are supported; | 851 | * supported |
809 | * - only peripheral devices with 4-byte FIFO access register | 852 | * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes |
810 | * are supported; | 853 | * are supported, and, consequently, source addresses and |
811 | * - minimal transfer chunk is 4 bytes and consequently | 854 | * destination addresses; must be aligned accordingly; furthermore, |
812 | * source and destination addresses must be 4-byte aligned | 855 | * for MPC512x SoCs, the transfer size must be aligned on (chunk |
813 | * and transfer size must be aligned on (4 * maxburst) | 856 | * size * maxburst) |
814 | * boundary; | 857 | * - during the transfer, the RAM address is incremented by the size |
815 | * - during the transfer RAM address is being incremented by | 858 | * of transfer chunk |
816 | * the size of minimal transfer chunk; | 859 | * - the peripheral port's address is constant during the transfer. |
817 | * - peripheral port's address is constant during the transfer. | ||
818 | */ | 860 | */ |
819 | 861 | ||
820 | if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | 862 | if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) || |
821 | cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | 863 | !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) { |
822 | !IS_ALIGNED(cfg->src_addr, 4) || | ||
823 | !IS_ALIGNED(cfg->dst_addr, 4)) { | ||
824 | return -EINVAL; | 864 | return -EINVAL; |
825 | } | 865 | } |
826 | 866 | ||
867 | if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) || | ||
868 | !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308)) | ||
869 | return -EINVAL; | ||
870 | |||
827 | spin_lock_irqsave(&mchan->lock, flags); | 871 | spin_lock_irqsave(&mchan->lock, flags); |
828 | 872 | ||
829 | mchan->src_per_paddr = cfg->src_addr; | 873 | mchan->src_per_paddr = cfg->src_addr; |
830 | mchan->src_tcd_nunits = cfg->src_maxburst; | 874 | mchan->src_tcd_nunits = cfg->src_maxburst; |
875 | mchan->swidth = cfg->src_addr_width; | ||
831 | mchan->dst_per_paddr = cfg->dst_addr; | 876 | mchan->dst_per_paddr = cfg->dst_addr; |
832 | mchan->dst_tcd_nunits = cfg->dst_maxburst; | 877 | mchan->dst_tcd_nunits = cfg->dst_maxburst; |
878 | mchan->dwidth = cfg->dst_addr_width; | ||
833 | 879 | ||
834 | /* Apply defaults */ | 880 | /* Apply defaults */ |
835 | if (mchan->src_tcd_nunits == 0) | 881 | if (mchan->src_tcd_nunits == 0) |
@@ -875,7 +921,6 @@ static int mpc_dma_probe(struct platform_device *op) | |||
875 | 921 | ||
876 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); | 922 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); |
877 | if (!mdma) { | 923 | if (!mdma) { |
878 | dev_err(dev, "Memory exhausted!\n"); | ||
879 | retval = -ENOMEM; | 924 | retval = -ENOMEM; |
880 | goto err; | 925 | goto err; |
881 | } | 926 | } |
@@ -999,7 +1044,8 @@ static int mpc_dma_probe(struct platform_device *op) | |||
999 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); | 1044 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); |
1000 | } else { | 1045 | } else { |
1001 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | | 1046 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
1002 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | 1047 | MPC_DMA_DMACR_ERGA | |
1048 | MPC_DMA_DMACR_ERCA); | ||
1003 | 1049 | ||
1004 | /* Disable hardware DMA requests */ | 1050 | /* Disable hardware DMA requests */ |
1005 | out_be32(&mdma->regs->dmaerqh, 0); | 1051 | out_be32(&mdma->regs->dmaerqh, 0); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 3922a5d56806..25d1dadcddd1 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -31,6 +31,12 @@ | |||
31 | #include "dmaengine.h" | 31 | #include "dmaengine.h" |
32 | #include "mv_xor.h" | 32 | #include "mv_xor.h" |
33 | 33 | ||
34 | enum mv_xor_type { | ||
35 | XOR_ORION, | ||
36 | XOR_ARMADA_38X, | ||
37 | XOR_ARMADA_37XX, | ||
38 | }; | ||
39 | |||
34 | enum mv_xor_mode { | 40 | enum mv_xor_mode { |
35 | XOR_MODE_IN_REG, | 41 | XOR_MODE_IN_REG, |
36 | XOR_MODE_IN_DESC, | 42 | XOR_MODE_IN_DESC, |
@@ -477,7 +483,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
477 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | 483 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
478 | 484 | ||
479 | dev_dbg(mv_chan_to_devp(mv_chan), | 485 | dev_dbg(mv_chan_to_devp(mv_chan), |
480 | "%s src_cnt: %d len: %u dest %pad flags: %ld\n", | 486 | "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", |
481 | __func__, src_cnt, len, &dest, flags); | 487 | __func__, src_cnt, len, &dest, flags); |
482 | 488 | ||
483 | sw_desc = mv_chan_alloc_slot(mv_chan); | 489 | sw_desc = mv_chan_alloc_slot(mv_chan); |
@@ -933,7 +939,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) | |||
933 | static struct mv_xor_chan * | 939 | static struct mv_xor_chan * |
934 | mv_xor_channel_add(struct mv_xor_device *xordev, | 940 | mv_xor_channel_add(struct mv_xor_device *xordev, |
935 | struct platform_device *pdev, | 941 | struct platform_device *pdev, |
936 | int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc) | 942 | int idx, dma_cap_mask_t cap_mask, int irq) |
937 | { | 943 | { |
938 | int ret = 0; | 944 | int ret = 0; |
939 | struct mv_xor_chan *mv_chan; | 945 | struct mv_xor_chan *mv_chan; |
@@ -945,7 +951,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
945 | 951 | ||
946 | mv_chan->idx = idx; | 952 | mv_chan->idx = idx; |
947 | mv_chan->irq = irq; | 953 | mv_chan->irq = irq; |
948 | mv_chan->op_in_desc = op_in_desc; | 954 | if (xordev->xor_type == XOR_ORION) |
955 | mv_chan->op_in_desc = XOR_MODE_IN_REG; | ||
956 | else | ||
957 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; | ||
949 | 958 | ||
950 | dma_dev = &mv_chan->dmadev; | 959 | dma_dev = &mv_chan->dmadev; |
951 | 960 | ||
@@ -1085,6 +1094,33 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | |||
1085 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | 1094 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); |
1086 | } | 1095 | } |
1087 | 1096 | ||
1097 | static void | ||
1098 | mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev) | ||
1099 | { | ||
1100 | void __iomem *base = xordev->xor_high_base; | ||
1101 | u32 win_enable = 0; | ||
1102 | int i; | ||
1103 | |||
1104 | for (i = 0; i < 8; i++) { | ||
1105 | writel(0, base + WINDOW_BASE(i)); | ||
1106 | writel(0, base + WINDOW_SIZE(i)); | ||
1107 | if (i < 4) | ||
1108 | writel(0, base + WINDOW_REMAP_HIGH(i)); | ||
1109 | } | ||
1110 | /* | ||
1111 | * For Armada3700 open default 4GB Mbus window. The dram | ||
1112 | * related configuration are done at AXIS level. | ||
1113 | */ | ||
1114 | writel(0xffff0000, base + WINDOW_SIZE(0)); | ||
1115 | win_enable |= 1; | ||
1116 | win_enable |= 3 << 16; | ||
1117 | |||
1118 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | ||
1119 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | ||
1120 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); | ||
1121 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | ||
1122 | } | ||
1123 | |||
1088 | /* | 1124 | /* |
1089 | * Since this XOR driver is basically used only for RAID5, we don't | 1125 | * Since this XOR driver is basically used only for RAID5, we don't |
1090 | * need to care about synchronizing ->suspend with DMA activity, | 1126 | * need to care about synchronizing ->suspend with DMA activity, |
@@ -1129,6 +1165,11 @@ static int mv_xor_resume(struct platform_device *dev) | |||
1129 | XOR_INTR_MASK(mv_chan)); | 1165 | XOR_INTR_MASK(mv_chan)); |
1130 | } | 1166 | } |
1131 | 1167 | ||
1168 | if (xordev->xor_type == XOR_ARMADA_37XX) { | ||
1169 | mv_xor_conf_mbus_windows_a3700(xordev); | ||
1170 | return 0; | ||
1171 | } | ||
1172 | |||
1132 | dram = mv_mbus_dram_info(); | 1173 | dram = mv_mbus_dram_info(); |
1133 | if (dram) | 1174 | if (dram) |
1134 | mv_xor_conf_mbus_windows(xordev, dram); | 1175 | mv_xor_conf_mbus_windows(xordev, dram); |
@@ -1137,8 +1178,9 @@ static int mv_xor_resume(struct platform_device *dev) | |||
1137 | } | 1178 | } |
1138 | 1179 | ||
1139 | static const struct of_device_id mv_xor_dt_ids[] = { | 1180 | static const struct of_device_id mv_xor_dt_ids[] = { |
1140 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, | 1181 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION }, |
1141 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, | 1182 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X }, |
1183 | { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX }, | ||
1142 | {}, | 1184 | {}, |
1143 | }; | 1185 | }; |
1144 | 1186 | ||
@@ -1152,7 +1194,6 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1152 | struct resource *res; | 1194 | struct resource *res; |
1153 | unsigned int max_engines, max_channels; | 1195 | unsigned int max_engines, max_channels; |
1154 | int i, ret; | 1196 | int i, ret; |
1155 | int op_in_desc; | ||
1156 | 1197 | ||
1157 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); | 1198 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
1158 | 1199 | ||
@@ -1180,12 +1221,30 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1180 | 1221 | ||
1181 | platform_set_drvdata(pdev, xordev); | 1222 | platform_set_drvdata(pdev, xordev); |
1182 | 1223 | ||
1224 | |||
1225 | /* | ||
1226 | * We need to know which type of XOR device we use before | ||
1227 | * setting up. In non-dt case it can only be the legacy one. | ||
1228 | */ | ||
1229 | xordev->xor_type = XOR_ORION; | ||
1230 | if (pdev->dev.of_node) { | ||
1231 | const struct of_device_id *of_id = | ||
1232 | of_match_device(mv_xor_dt_ids, | ||
1233 | &pdev->dev); | ||
1234 | |||
1235 | xordev->xor_type = (uintptr_t)of_id->data; | ||
1236 | } | ||
1237 | |||
1183 | /* | 1238 | /* |
1184 | * (Re-)program MBUS remapping windows if we are asked to. | 1239 | * (Re-)program MBUS remapping windows if we are asked to. |
1185 | */ | 1240 | */ |
1186 | dram = mv_mbus_dram_info(); | 1241 | if (xordev->xor_type == XOR_ARMADA_37XX) { |
1187 | if (dram) | 1242 | mv_xor_conf_mbus_windows_a3700(xordev); |
1188 | mv_xor_conf_mbus_windows(xordev, dram); | 1243 | } else { |
1244 | dram = mv_mbus_dram_info(); | ||
1245 | if (dram) | ||
1246 | mv_xor_conf_mbus_windows(xordev, dram); | ||
1247 | } | ||
1189 | 1248 | ||
1190 | /* Not all platforms can gate the clock, so it is not | 1249 | /* Not all platforms can gate the clock, so it is not |
1191 | * an error if the clock does not exists. | 1250 | * an error if the clock does not exists. |
@@ -1199,12 +1258,16 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1199 | * order for async_tx to perform well. So we limit the number | 1258 | * order for async_tx to perform well. So we limit the number |
1200 | * of engines and channels so that we take into account this | 1259 | * of engines and channels so that we take into account this |
1201 | * constraint. Note that we also want to use channels from | 1260 | * constraint. Note that we also want to use channels from |
1202 | * separate engines when possible. | 1261 | * separate engines when possible. For dual-CPU Armada 3700 |
1262 | * SoC with single XOR engine allow using its both channels. | ||
1203 | */ | 1263 | */ |
1204 | max_engines = num_present_cpus(); | 1264 | max_engines = num_present_cpus(); |
1205 | max_channels = min_t(unsigned int, | 1265 | if (xordev->xor_type == XOR_ARMADA_37XX) |
1206 | MV_XOR_MAX_CHANNELS, | 1266 | max_channels = num_present_cpus(); |
1207 | DIV_ROUND_UP(num_present_cpus(), 2)); | 1267 | else |
1268 | max_channels = min_t(unsigned int, | ||
1269 | MV_XOR_MAX_CHANNELS, | ||
1270 | DIV_ROUND_UP(num_present_cpus(), 2)); | ||
1208 | 1271 | ||
1209 | if (mv_xor_engine_count >= max_engines) | 1272 | if (mv_xor_engine_count >= max_engines) |
1210 | return 0; | 1273 | return 0; |
@@ -1212,15 +1275,11 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1212 | if (pdev->dev.of_node) { | 1275 | if (pdev->dev.of_node) { |
1213 | struct device_node *np; | 1276 | struct device_node *np; |
1214 | int i = 0; | 1277 | int i = 0; |
1215 | const struct of_device_id *of_id = | ||
1216 | of_match_device(mv_xor_dt_ids, | ||
1217 | &pdev->dev); | ||
1218 | 1278 | ||
1219 | for_each_child_of_node(pdev->dev.of_node, np) { | 1279 | for_each_child_of_node(pdev->dev.of_node, np) { |
1220 | struct mv_xor_chan *chan; | 1280 | struct mv_xor_chan *chan; |
1221 | dma_cap_mask_t cap_mask; | 1281 | dma_cap_mask_t cap_mask; |
1222 | int irq; | 1282 | int irq; |
1223 | op_in_desc = (int)of_id->data; | ||
1224 | 1283 | ||
1225 | if (i >= max_channels) | 1284 | if (i >= max_channels) |
1226 | continue; | 1285 | continue; |
@@ -1237,7 +1296,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1237 | } | 1296 | } |
1238 | 1297 | ||
1239 | chan = mv_xor_channel_add(xordev, pdev, i, | 1298 | chan = mv_xor_channel_add(xordev, pdev, i, |
1240 | cap_mask, irq, op_in_desc); | 1299 | cap_mask, irq); |
1241 | if (IS_ERR(chan)) { | 1300 | if (IS_ERR(chan)) { |
1242 | ret = PTR_ERR(chan); | 1301 | ret = PTR_ERR(chan); |
1243 | irq_dispose_mapping(irq); | 1302 | irq_dispose_mapping(irq); |
@@ -1266,8 +1325,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1266 | } | 1325 | } |
1267 | 1326 | ||
1268 | chan = mv_xor_channel_add(xordev, pdev, i, | 1327 | chan = mv_xor_channel_add(xordev, pdev, i, |
1269 | cd->cap_mask, irq, | 1328 | cd->cap_mask, irq); |
1270 | XOR_MODE_IN_REG); | ||
1271 | if (IS_ERR(chan)) { | 1329 | if (IS_ERR(chan)) { |
1272 | ret = PTR_ERR(chan); | 1330 | ret = PTR_ERR(chan); |
1273 | goto err_channel_add; | 1331 | goto err_channel_add; |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index c19fe30e5ae9..bf56e082e7cd 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -85,6 +85,7 @@ struct mv_xor_device { | |||
85 | void __iomem *xor_high_base; | 85 | void __iomem *xor_high_base; |
86 | struct clk *clk; | 86 | struct clk *clk; |
87 | struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; | 87 | struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; |
88 | int xor_type; | ||
88 | }; | 89 | }; |
89 | 90 | ||
90 | /** | 91 | /** |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 1e1f2986eba8..faae0bfe1109 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -240,8 +240,9 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
240 | struct of_phandle_args dma_spec; | 240 | struct of_phandle_args dma_spec; |
241 | struct of_dma *ofdma; | 241 | struct of_dma *ofdma; |
242 | struct dma_chan *chan; | 242 | struct dma_chan *chan; |
243 | int count, i; | 243 | int count, i, start; |
244 | int ret_no_channel = -ENODEV; | 244 | int ret_no_channel = -ENODEV; |
245 | static atomic_t last_index; | ||
245 | 246 | ||
246 | if (!np || !name) { | 247 | if (!np || !name) { |
247 | pr_err("%s: not enough information provided\n", __func__); | 248 | pr_err("%s: not enough information provided\n", __func__); |
@@ -259,8 +260,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
259 | return ERR_PTR(-ENODEV); | 260 | return ERR_PTR(-ENODEV); |
260 | } | 261 | } |
261 | 262 | ||
263 | /* | ||
264 | * approximate an average distribution across multiple | ||
265 | * entries with the same name | ||
266 | */ | ||
267 | start = atomic_inc_return(&last_index); | ||
262 | for (i = 0; i < count; i++) { | 268 | for (i = 0; i < count; i++) { |
263 | if (of_dma_match_channel(np, name, i, &dma_spec)) | 269 | if (of_dma_match_channel(np, name, |
270 | (i + start) % count, | ||
271 | &dma_spec)) | ||
264 | continue; | 272 | continue; |
265 | 273 | ||
266 | mutex_lock(&of_dma_lock); | 274 | mutex_lock(&of_dma_lock); |
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 77c1c44009d8..e756a30ccba2 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -117,6 +117,7 @@ struct pxad_chan { | |||
117 | /* protected by vc->lock */ | 117 | /* protected by vc->lock */ |
118 | struct pxad_phy *phy; | 118 | struct pxad_phy *phy; |
119 | struct dma_pool *desc_pool; /* Descriptors pool */ | 119 | struct dma_pool *desc_pool; /* Descriptors pool */ |
120 | dma_cookie_t bus_error; | ||
120 | }; | 121 | }; |
121 | 122 | ||
122 | struct pxad_device { | 123 | struct pxad_device { |
@@ -563,6 +564,7 @@ static void pxad_launch_chan(struct pxad_chan *chan, | |||
563 | return; | 564 | return; |
564 | } | 565 | } |
565 | } | 566 | } |
567 | chan->bus_error = 0; | ||
566 | 568 | ||
567 | /* | 569 | /* |
568 | * Program the descriptor's address into the DMA controller, | 570 | * Program the descriptor's address into the DMA controller, |
@@ -666,6 +668,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
666 | struct virt_dma_desc *vd, *tmp; | 668 | struct virt_dma_desc *vd, *tmp; |
667 | unsigned int dcsr; | 669 | unsigned int dcsr; |
668 | unsigned long flags; | 670 | unsigned long flags; |
671 | dma_cookie_t last_started = 0; | ||
669 | 672 | ||
670 | BUG_ON(!chan); | 673 | BUG_ON(!chan); |
671 | 674 | ||
@@ -678,6 +681,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
678 | dev_dbg(&chan->vc.chan.dev->device, | 681 | dev_dbg(&chan->vc.chan.dev->device, |
679 | "%s(): checking txd %p[%x]: completed=%d\n", | 682 | "%s(): checking txd %p[%x]: completed=%d\n", |
680 | __func__, vd, vd->tx.cookie, is_desc_completed(vd)); | 683 | __func__, vd, vd->tx.cookie, is_desc_completed(vd)); |
684 | last_started = vd->tx.cookie; | ||
681 | if (to_pxad_sw_desc(vd)->cyclic) { | 685 | if (to_pxad_sw_desc(vd)->cyclic) { |
682 | vchan_cyclic_callback(vd); | 686 | vchan_cyclic_callback(vd); |
683 | break; | 687 | break; |
@@ -690,7 +694,12 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
690 | } | 694 | } |
691 | } | 695 | } |
692 | 696 | ||
693 | if (dcsr & PXA_DCSR_STOPSTATE) { | 697 | if (dcsr & PXA_DCSR_BUSERR) { |
698 | chan->bus_error = last_started; | ||
699 | phy_disable(phy); | ||
700 | } | ||
701 | |||
702 | if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) { | ||
694 | dev_dbg(&chan->vc.chan.dev->device, | 703 | dev_dbg(&chan->vc.chan.dev->device, |
695 | "%s(): channel stopped, submitted_empty=%d issued_empty=%d", | 704 | "%s(): channel stopped, submitted_empty=%d issued_empty=%d", |
696 | __func__, | 705 | __func__, |
@@ -1249,6 +1258,9 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan, | |||
1249 | struct pxad_chan *chan = to_pxad_chan(dchan); | 1258 | struct pxad_chan *chan = to_pxad_chan(dchan); |
1250 | enum dma_status ret; | 1259 | enum dma_status ret; |
1251 | 1260 | ||
1261 | if (cookie == chan->bus_error) | ||
1262 | return DMA_ERROR; | ||
1263 | |||
1252 | ret = dma_cookie_status(dchan, cookie, txstate); | 1264 | ret = dma_cookie_status(dchan, cookie, txstate); |
1253 | if (likely(txstate && (ret != DMA_ERROR))) | 1265 | if (likely(txstate && (ret != DMA_ERROR))) |
1254 | dma_set_residue(txstate, pxad_residue(chan, cookie)); | 1266 | dma_set_residue(txstate, pxad_residue(chan, cookie)); |
@@ -1321,7 +1333,7 @@ static int pxad_init_phys(struct platform_device *op, | |||
1321 | return 0; | 1333 | return 0; |
1322 | } | 1334 | } |
1323 | 1335 | ||
1324 | static const struct of_device_id const pxad_dt_ids[] = { | 1336 | static const struct of_device_id pxad_dt_ids[] = { |
1325 | { .compatible = "marvell,pdma-1.0", }, | 1337 | { .compatible = "marvell,pdma-1.0", }, |
1326 | {} | 1338 | {} |
1327 | }; | 1339 | }; |
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile index bfea6990229f..4bfc38b45220 100644 --- a/drivers/dma/qcom/Makefile +++ b/drivers/dma/qcom/Makefile | |||
@@ -1,3 +1,5 @@ | |||
1 | obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o | 1 | obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o |
2 | obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o | 2 | obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o |
3 | hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o | 3 | hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o |
4 | obj-$(CONFIG_QCOM_HIDMA) += hdma.o | ||
5 | hdma-objs := hidma_ll.o hidma.o hidma_dbg.o | ||
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d5e0a9c3ad5d..969b48176745 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -342,7 +342,7 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = { | |||
342 | 342 | ||
343 | #define BAM_DESC_FIFO_SIZE SZ_32K | 343 | #define BAM_DESC_FIFO_SIZE SZ_32K |
344 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) | 344 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) |
345 | #define BAM_MAX_DATA_SIZE (SZ_32K - 8) | 345 | #define BAM_FIFO_SIZE (SZ_32K - 8) |
346 | 346 | ||
347 | struct bam_chan { | 347 | struct bam_chan { |
348 | struct virt_dma_chan vc; | 348 | struct virt_dma_chan vc; |
@@ -387,6 +387,7 @@ struct bam_device { | |||
387 | 387 | ||
388 | /* execution environment ID, from DT */ | 388 | /* execution environment ID, from DT */ |
389 | u32 ee; | 389 | u32 ee; |
390 | bool controlled_remotely; | ||
390 | 391 | ||
391 | const struct reg_offset_data *layout; | 392 | const struct reg_offset_data *layout; |
392 | 393 | ||
@@ -458,7 +459,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan, | |||
458 | */ | 459 | */ |
459 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), | 460 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), |
460 | bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); | 461 | bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); |
461 | writel_relaxed(BAM_DESC_FIFO_SIZE, | 462 | writel_relaxed(BAM_FIFO_SIZE, |
462 | bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); | 463 | bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); |
463 | 464 | ||
464 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ | 465 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ |
@@ -604,7 +605,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
604 | 605 | ||
605 | /* calculate number of required entries */ | 606 | /* calculate number of required entries */ |
606 | for_each_sg(sgl, sg, sg_len, i) | 607 | for_each_sg(sgl, sg, sg_len, i) |
607 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); | 608 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); |
608 | 609 | ||
609 | /* allocate enough room to accomodate the number of entries */ | 610 | /* allocate enough room to accomodate the number of entries */ |
610 | async_desc = kzalloc(sizeof(*async_desc) + | 611 | async_desc = kzalloc(sizeof(*async_desc) + |
@@ -635,10 +636,10 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
635 | desc->addr = cpu_to_le32(sg_dma_address(sg) + | 636 | desc->addr = cpu_to_le32(sg_dma_address(sg) + |
636 | curr_offset); | 637 | curr_offset); |
637 | 638 | ||
638 | if (remainder > BAM_MAX_DATA_SIZE) { | 639 | if (remainder > BAM_FIFO_SIZE) { |
639 | desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); | 640 | desc->size = cpu_to_le16(BAM_FIFO_SIZE); |
640 | remainder -= BAM_MAX_DATA_SIZE; | 641 | remainder -= BAM_FIFO_SIZE; |
641 | curr_offset += BAM_MAX_DATA_SIZE; | 642 | curr_offset += BAM_FIFO_SIZE; |
642 | } else { | 643 | } else { |
643 | desc->size = cpu_to_le16(remainder); | 644 | desc->size = cpu_to_le16(remainder); |
644 | remainder = 0; | 645 | remainder = 0; |
@@ -801,13 +802,17 @@ static irqreturn_t bam_dma_irq(int irq, void *data) | |||
801 | if (srcs & P_IRQ) | 802 | if (srcs & P_IRQ) |
802 | tasklet_schedule(&bdev->task); | 803 | tasklet_schedule(&bdev->task); |
803 | 804 | ||
804 | if (srcs & BAM_IRQ) | 805 | if (srcs & BAM_IRQ) { |
805 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); | 806 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); |
806 | 807 | ||
807 | /* don't allow reorder of the various accesses to the BAM registers */ | 808 | /* |
808 | mb(); | 809 | * don't allow reorder of the various accesses to the BAM |
810 | * registers | ||
811 | */ | ||
812 | mb(); | ||
809 | 813 | ||
810 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); | 814 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); |
815 | } | ||
811 | 816 | ||
812 | return IRQ_HANDLED; | 817 | return IRQ_HANDLED; |
813 | } | 818 | } |
@@ -1038,6 +1043,9 @@ static int bam_init(struct bam_device *bdev) | |||
1038 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); | 1043 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); |
1039 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; | 1044 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; |
1040 | 1045 | ||
1046 | if (bdev->controlled_remotely) | ||
1047 | return 0; | ||
1048 | |||
1041 | /* s/w reset bam */ | 1049 | /* s/w reset bam */ |
1042 | /* after reset all pipes are disabled and idle */ | 1050 | /* after reset all pipes are disabled and idle */ |
1043 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); | 1051 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); |
@@ -1125,6 +1133,9 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
1125 | return ret; | 1133 | return ret; |
1126 | } | 1134 | } |
1127 | 1135 | ||
1136 | bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, | ||
1137 | "qcom,controlled-remotely"); | ||
1138 | |||
1128 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); | 1139 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); |
1129 | if (IS_ERR(bdev->bamclk)) | 1140 | if (IS_ERR(bdev->bamclk)) |
1130 | return PTR_ERR(bdev->bamclk); | 1141 | return PTR_ERR(bdev->bamclk); |
@@ -1163,7 +1174,7 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
1163 | /* set max dma segment size */ | 1174 | /* set max dma segment size */ |
1164 | bdev->common.dev = bdev->dev; | 1175 | bdev->common.dev = bdev->dev; |
1165 | bdev->common.dev->dma_parms = &bdev->dma_parms; | 1176 | bdev->common.dev->dma_parms = &bdev->dma_parms; |
1166 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); | 1177 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE); |
1167 | if (ret) { | 1178 | if (ret) { |
1168 | dev_err(bdev->dev, "cannot set maximum segment size\n"); | 1179 | dev_err(bdev->dev, "cannot set maximum segment size\n"); |
1169 | goto err_bam_channel_exit; | 1180 | goto err_bam_channel_exit; |
@@ -1234,6 +1245,9 @@ static int bam_dma_remove(struct platform_device *pdev) | |||
1234 | bam_dma_terminate_all(&bdev->channels[i].vc.chan); | 1245 | bam_dma_terminate_all(&bdev->channels[i].vc.chan); |
1235 | tasklet_kill(&bdev->channels[i].vc.task); | 1246 | tasklet_kill(&bdev->channels[i].vc.task); |
1236 | 1247 | ||
1248 | if (!bdev->channels[i].fifo_virt) | ||
1249 | continue; | ||
1250 | |||
1237 | dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, | 1251 | dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, |
1238 | bdev->channels[i].fifo_virt, | 1252 | bdev->channels[i].fifo_virt, |
1239 | bdev->channels[i].fifo_phys); | 1253 | bdev->channels[i].fifo_phys); |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index cccc78efbca9..41b5c6dee713 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Qualcomm Technologies HIDMA DMA engine interface | 2 | * Qualcomm Technologies HIDMA DMA engine interface |
3 | * | 3 | * |
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | 4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 and | 7 | * it under the terms of the GNU General Public License version 2 and |
@@ -404,7 +404,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) | |||
404 | spin_unlock_irqrestore(&mchan->lock, irqflags); | 404 | spin_unlock_irqrestore(&mchan->lock, irqflags); |
405 | 405 | ||
406 | /* this suspends the existing transfer */ | 406 | /* this suspends the existing transfer */ |
407 | rc = hidma_ll_pause(dmadev->lldev); | 407 | rc = hidma_ll_disable(dmadev->lldev); |
408 | if (rc) { | 408 | if (rc) { |
409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); | 409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); |
410 | goto out; | 410 | goto out; |
@@ -427,7 +427,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) | |||
427 | list_move(&mdesc->node, &mchan->free); | 427 | list_move(&mdesc->node, &mchan->free); |
428 | } | 428 | } |
429 | 429 | ||
430 | rc = hidma_ll_resume(dmadev->lldev); | 430 | rc = hidma_ll_enable(dmadev->lldev); |
431 | out: | 431 | out: |
432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | 432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | 433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); |
@@ -488,7 +488,7 @@ static int hidma_pause(struct dma_chan *chan) | |||
488 | dmadev = to_hidma_dev(mchan->chan.device); | 488 | dmadev = to_hidma_dev(mchan->chan.device); |
489 | if (!mchan->paused) { | 489 | if (!mchan->paused) { |
490 | pm_runtime_get_sync(dmadev->ddev.dev); | 490 | pm_runtime_get_sync(dmadev->ddev.dev); |
491 | if (hidma_ll_pause(dmadev->lldev)) | 491 | if (hidma_ll_disable(dmadev->lldev)) |
492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); | 492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); |
493 | mchan->paused = true; | 493 | mchan->paused = true; |
494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | 494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
@@ -507,7 +507,7 @@ static int hidma_resume(struct dma_chan *chan) | |||
507 | dmadev = to_hidma_dev(mchan->chan.device); | 507 | dmadev = to_hidma_dev(mchan->chan.device); |
508 | if (mchan->paused) { | 508 | if (mchan->paused) { |
509 | pm_runtime_get_sync(dmadev->ddev.dev); | 509 | pm_runtime_get_sync(dmadev->ddev.dev); |
510 | rc = hidma_ll_resume(dmadev->lldev); | 510 | rc = hidma_ll_enable(dmadev->lldev); |
511 | if (!rc) | 511 | if (!rc) |
512 | mchan->paused = false; | 512 | mchan->paused = false; |
513 | else | 513 | else |
@@ -530,6 +530,43 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | |||
530 | return hidma_ll_inthandler(chirq, lldev); | 530 | return hidma_ll_inthandler(chirq, lldev); |
531 | } | 531 | } |
532 | 532 | ||
533 | static ssize_t hidma_show_values(struct device *dev, | ||
534 | struct device_attribute *attr, char *buf) | ||
535 | { | ||
536 | struct platform_device *pdev = to_platform_device(dev); | ||
537 | struct hidma_dev *mdev = platform_get_drvdata(pdev); | ||
538 | |||
539 | buf[0] = 0; | ||
540 | |||
541 | if (strcmp(attr->attr.name, "chid") == 0) | ||
542 | sprintf(buf, "%d\n", mdev->chidx); | ||
543 | |||
544 | return strlen(buf); | ||
545 | } | ||
546 | |||
547 | static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, | ||
548 | int mode) | ||
549 | { | ||
550 | struct device_attribute *attrs; | ||
551 | char *name_copy; | ||
552 | |||
553 | attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), | ||
554 | GFP_KERNEL); | ||
555 | if (!attrs) | ||
556 | return -ENOMEM; | ||
557 | |||
558 | name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); | ||
559 | if (!name_copy) | ||
560 | return -ENOMEM; | ||
561 | |||
562 | attrs->attr.name = name_copy; | ||
563 | attrs->attr.mode = mode; | ||
564 | attrs->show = hidma_show_values; | ||
565 | sysfs_attr_init(&attrs->attr); | ||
566 | |||
567 | return device_create_file(dev->ddev.dev, attrs); | ||
568 | } | ||
569 | |||
533 | static int hidma_probe(struct platform_device *pdev) | 570 | static int hidma_probe(struct platform_device *pdev) |
534 | { | 571 | { |
535 | struct hidma_dev *dmadev; | 572 | struct hidma_dev *dmadev; |
@@ -644,6 +681,8 @@ static int hidma_probe(struct platform_device *pdev) | |||
644 | 681 | ||
645 | dmadev->irq = chirq; | 682 | dmadev->irq = chirq; |
646 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | 683 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); |
684 | hidma_debug_init(dmadev); | ||
685 | hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO); | ||
647 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); | 686 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); |
648 | platform_set_drvdata(pdev, dmadev); | 687 | platform_set_drvdata(pdev, dmadev); |
649 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | 688 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
@@ -651,6 +690,7 @@ static int hidma_probe(struct platform_device *pdev) | |||
651 | return 0; | 690 | return 0; |
652 | 691 | ||
653 | uninit: | 692 | uninit: |
693 | hidma_debug_uninit(dmadev); | ||
654 | hidma_ll_uninit(dmadev->lldev); | 694 | hidma_ll_uninit(dmadev->lldev); |
655 | dmafree: | 695 | dmafree: |
656 | if (dmadev) | 696 | if (dmadev) |
@@ -668,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev) | |||
668 | pm_runtime_get_sync(dmadev->ddev.dev); | 708 | pm_runtime_get_sync(dmadev->ddev.dev); |
669 | dma_async_device_unregister(&dmadev->ddev); | 709 | dma_async_device_unregister(&dmadev->ddev); |
670 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | 710 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); |
711 | hidma_debug_uninit(dmadev); | ||
671 | hidma_ll_uninit(dmadev->lldev); | 712 | hidma_ll_uninit(dmadev->lldev); |
672 | hidma_free(dmadev); | 713 | hidma_free(dmadev); |
673 | 714 | ||
@@ -689,7 +730,6 @@ static const struct of_device_id hidma_match[] = { | |||
689 | {.compatible = "qcom,hidma-1.0",}, | 730 | {.compatible = "qcom,hidma-1.0",}, |
690 | {}, | 731 | {}, |
691 | }; | 732 | }; |
692 | |||
693 | MODULE_DEVICE_TABLE(of, hidma_match); | 733 | MODULE_DEVICE_TABLE(of, hidma_match); |
694 | 734 | ||
695 | static struct platform_driver hidma_driver = { | 735 | static struct platform_driver hidma_driver = { |
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index 231e306f6d87..db413a5efc4e 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Qualcomm Technologies HIDMA data structures | 2 | * Qualcomm Technologies HIDMA data structures |
3 | * | 3 | * |
4 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. | 4 | * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 and | 7 | * it under the terms of the GNU General Public License version 2 and |
@@ -20,32 +20,29 @@ | |||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/dmaengine.h> | 21 | #include <linux/dmaengine.h> |
22 | 22 | ||
23 | #define TRE_SIZE 32 /* each TRE is 32 bytes */ | 23 | #define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */ |
24 | #define TRE_CFG_IDX 0 | 24 | #define HIDMA_TRE_CFG_IDX 0 |
25 | #define TRE_LEN_IDX 1 | 25 | #define HIDMA_TRE_LEN_IDX 1 |
26 | #define TRE_SRC_LOW_IDX 2 | 26 | #define HIDMA_TRE_SRC_LOW_IDX 2 |
27 | #define TRE_SRC_HI_IDX 3 | 27 | #define HIDMA_TRE_SRC_HI_IDX 3 |
28 | #define TRE_DEST_LOW_IDX 4 | 28 | #define HIDMA_TRE_DEST_LOW_IDX 4 |
29 | #define TRE_DEST_HI_IDX 5 | 29 | #define HIDMA_TRE_DEST_HI_IDX 5 |
30 | |||
31 | struct hidma_tx_status { | ||
32 | u8 err_info; /* error record in this transfer */ | ||
33 | u8 err_code; /* completion code */ | ||
34 | }; | ||
35 | 30 | ||
36 | struct hidma_tre { | 31 | struct hidma_tre { |
37 | atomic_t allocated; /* if this channel is allocated */ | 32 | atomic_t allocated; /* if this channel is allocated */ |
38 | bool queued; /* flag whether this is pending */ | 33 | bool queued; /* flag whether this is pending */ |
39 | u16 status; /* status */ | 34 | u16 status; /* status */ |
40 | u32 chidx; /* index of the tre */ | 35 | u32 idx; /* index of the tre */ |
41 | u32 dma_sig; /* signature of the tre */ | 36 | u32 dma_sig; /* signature of the tre */ |
42 | const char *dev_name; /* name of the device */ | 37 | const char *dev_name; /* name of the device */ |
43 | void (*callback)(void *data); /* requester callback */ | 38 | void (*callback)(void *data); /* requester callback */ |
44 | void *data; /* Data associated with this channel*/ | 39 | void *data; /* Data associated with this channel*/ |
45 | struct hidma_lldev *lldev; /* lldma device pointer */ | 40 | struct hidma_lldev *lldev; /* lldma device pointer */ |
46 | u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ | 41 | u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ |
47 | u32 tre_index; /* the offset where this was written*/ | 42 | u32 tre_index; /* the offset where this was written*/ |
48 | u32 int_flags; /* interrupt flags */ | 43 | u32 int_flags; /* interrupt flags */ |
44 | u8 err_info; /* error record in this transfer */ | ||
45 | u8 err_code; /* completion code */ | ||
49 | }; | 46 | }; |
50 | 47 | ||
51 | struct hidma_lldev { | 48 | struct hidma_lldev { |
@@ -61,22 +58,21 @@ struct hidma_lldev { | |||
61 | void __iomem *evca; /* Event Channel address */ | 58 | void __iomem *evca; /* Event Channel address */ |
62 | struct hidma_tre | 59 | struct hidma_tre |
63 | **pending_tre_list; /* Pointers to pending TREs */ | 60 | **pending_tre_list; /* Pointers to pending TREs */ |
64 | struct hidma_tx_status | ||
65 | *tx_status_list; /* Pointers to pending TREs status*/ | ||
66 | s32 pending_tre_count; /* Number of TREs pending */ | 61 | s32 pending_tre_count; /* Number of TREs pending */ |
67 | 62 | ||
68 | void *tre_ring; /* TRE ring */ | 63 | void *tre_ring; /* TRE ring */ |
69 | dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */ | 64 | dma_addr_t tre_dma; /* TRE ring to be shared with HW */ |
70 | u32 tre_ring_size; /* Byte size of the ring */ | 65 | u32 tre_ring_size; /* Byte size of the ring */ |
71 | u32 tre_processed_off; /* last processed TRE */ | 66 | u32 tre_processed_off; /* last processed TRE */ |
72 | 67 | ||
73 | void *evre_ring; /* EVRE ring */ | 68 | void *evre_ring; /* EVRE ring */ |
74 | dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */ | 69 | dma_addr_t evre_dma; /* EVRE ring to be shared with HW */ |
75 | u32 evre_ring_size; /* Byte size of the ring */ | 70 | u32 evre_ring_size; /* Byte size of the ring */ |
76 | u32 evre_processed_off; /* last processed EVRE */ | 71 | u32 evre_processed_off; /* last processed EVRE */ |
77 | 72 | ||
78 | u32 tre_write_offset; /* TRE write location */ | 73 | u32 tre_write_offset; /* TRE write location */ |
79 | struct tasklet_struct task; /* task delivering notifications */ | 74 | struct tasklet_struct task; /* task delivering notifications */ |
75 | struct tasklet_struct rst_task; /* task to reset HW */ | ||
80 | DECLARE_KFIFO_PTR(handoff_fifo, | 76 | DECLARE_KFIFO_PTR(handoff_fifo, |
81 | struct hidma_tre *); /* pending TREs FIFO */ | 77 | struct hidma_tre *); /* pending TREs FIFO */ |
82 | }; | 78 | }; |
@@ -145,8 +141,8 @@ enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); | |||
145 | bool hidma_ll_isenabled(struct hidma_lldev *llhndl); | 141 | bool hidma_ll_isenabled(struct hidma_lldev *llhndl); |
146 | void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); | 142 | void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); |
147 | void hidma_ll_start(struct hidma_lldev *llhndl); | 143 | void hidma_ll_start(struct hidma_lldev *llhndl); |
148 | int hidma_ll_pause(struct hidma_lldev *llhndl); | 144 | int hidma_ll_disable(struct hidma_lldev *lldev); |
149 | int hidma_ll_resume(struct hidma_lldev *llhndl); | 145 | int hidma_ll_enable(struct hidma_lldev *llhndl); |
150 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, | 146 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, |
151 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); | 147 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); |
152 | int hidma_ll_setup(struct hidma_lldev *lldev); | 148 | int hidma_ll_setup(struct hidma_lldev *lldev); |
@@ -157,4 +153,6 @@ int hidma_ll_uninit(struct hidma_lldev *llhndl); | |||
157 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); | 153 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); |
158 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, | 154 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, |
159 | u8 err_code); | 155 | u8 err_code); |
156 | int hidma_debug_init(struct hidma_dev *dmadev); | ||
157 | void hidma_debug_uninit(struct hidma_dev *dmadev); | ||
160 | #endif | 158 | #endif |
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c new file mode 100644 index 000000000000..fa827e5ffd68 --- /dev/null +++ b/drivers/dma/qcom/hidma_dbg.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA debug file | ||
3 | * | ||
4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/debugfs.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/list.h> | ||
19 | #include <linux/pm_runtime.h> | ||
20 | |||
21 | #include "hidma.h" | ||
22 | |||
23 | static void hidma_ll_chstats(struct seq_file *s, void *llhndl, u32 tre_ch) | ||
24 | { | ||
25 | struct hidma_lldev *lldev = llhndl; | ||
26 | struct hidma_tre *tre; | ||
27 | u32 length; | ||
28 | dma_addr_t src_start; | ||
29 | dma_addr_t dest_start; | ||
30 | u32 *tre_local; | ||
31 | |||
32 | if (tre_ch >= lldev->nr_tres) { | ||
33 | dev_err(lldev->dev, "invalid TRE number in chstats:%d", tre_ch); | ||
34 | return; | ||
35 | } | ||
36 | tre = &lldev->trepool[tre_ch]; | ||
37 | seq_printf(s, "------Channel %d -----\n", tre_ch); | ||
38 | seq_printf(s, "allocated=%d\n", atomic_read(&tre->allocated)); | ||
39 | seq_printf(s, "queued = 0x%x\n", tre->queued); | ||
40 | seq_printf(s, "err_info = 0x%x\n", tre->err_info); | ||
41 | seq_printf(s, "err_code = 0x%x\n", tre->err_code); | ||
42 | seq_printf(s, "status = 0x%x\n", tre->status); | ||
43 | seq_printf(s, "idx = 0x%x\n", tre->idx); | ||
44 | seq_printf(s, "dma_sig = 0x%x\n", tre->dma_sig); | ||
45 | seq_printf(s, "dev_name=%s\n", tre->dev_name); | ||
46 | seq_printf(s, "callback=%p\n", tre->callback); | ||
47 | seq_printf(s, "data=%p\n", tre->data); | ||
48 | seq_printf(s, "tre_index = 0x%x\n", tre->tre_index); | ||
49 | |||
50 | tre_local = &tre->tre_local[0]; | ||
51 | src_start = tre_local[HIDMA_TRE_SRC_LOW_IDX]; | ||
52 | src_start = ((u64) (tre_local[HIDMA_TRE_SRC_HI_IDX]) << 32) + src_start; | ||
53 | dest_start = tre_local[HIDMA_TRE_DEST_LOW_IDX]; | ||
54 | dest_start += ((u64) (tre_local[HIDMA_TRE_DEST_HI_IDX]) << 32); | ||
55 | length = tre_local[HIDMA_TRE_LEN_IDX]; | ||
56 | |||
57 | seq_printf(s, "src=%pap\n", &src_start); | ||
58 | seq_printf(s, "dest=%pap\n", &dest_start); | ||
59 | seq_printf(s, "length = 0x%x\n", length); | ||
60 | } | ||
61 | |||
62 | static void hidma_ll_devstats(struct seq_file *s, void *llhndl) | ||
63 | { | ||
64 | struct hidma_lldev *lldev = llhndl; | ||
65 | |||
66 | seq_puts(s, "------Device -----\n"); | ||
67 | seq_printf(s, "lldev init = 0x%x\n", lldev->initialized); | ||
68 | seq_printf(s, "trch_state = 0x%x\n", lldev->trch_state); | ||
69 | seq_printf(s, "evch_state = 0x%x\n", lldev->evch_state); | ||
70 | seq_printf(s, "chidx = 0x%x\n", lldev->chidx); | ||
71 | seq_printf(s, "nr_tres = 0x%x\n", lldev->nr_tres); | ||
72 | seq_printf(s, "trca=%p\n", lldev->trca); | ||
73 | seq_printf(s, "tre_ring=%p\n", lldev->tre_ring); | ||
74 | seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma); | ||
75 | seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size); | ||
76 | seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off); | ||
77 | seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count); | ||
78 | seq_printf(s, "evca=%p\n", lldev->evca); | ||
79 | seq_printf(s, "evre_ring=%p\n", lldev->evre_ring); | ||
80 | seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma); | ||
81 | seq_printf(s, "evre_ring_size = 0x%x\n", lldev->evre_ring_size); | ||
82 | seq_printf(s, "evre_processed_off = 0x%x\n", lldev->evre_processed_off); | ||
83 | seq_printf(s, "tre_write_offset = 0x%x\n", lldev->tre_write_offset); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * hidma_chan_stats: display HIDMA channel statistics | ||
88 | * | ||
89 | * Display the statistics for the current HIDMA virtual channel device. | ||
90 | */ | ||
91 | static int hidma_chan_stats(struct seq_file *s, void *unused) | ||
92 | { | ||
93 | struct hidma_chan *mchan = s->private; | ||
94 | struct hidma_desc *mdesc; | ||
95 | struct hidma_dev *dmadev = mchan->dmadev; | ||
96 | |||
97 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
98 | seq_printf(s, "paused=%u\n", mchan->paused); | ||
99 | seq_printf(s, "dma_sig=%u\n", mchan->dma_sig); | ||
100 | seq_puts(s, "prepared\n"); | ||
101 | list_for_each_entry(mdesc, &mchan->prepared, node) | ||
102 | hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); | ||
103 | |||
104 | seq_puts(s, "active\n"); | ||
105 | list_for_each_entry(mdesc, &mchan->active, node) | ||
106 | hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); | ||
107 | |||
108 | seq_puts(s, "completed\n"); | ||
109 | list_for_each_entry(mdesc, &mchan->completed, node) | ||
110 | hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); | ||
111 | |||
112 | hidma_ll_devstats(s, mchan->dmadev->lldev); | ||
113 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
114 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * hidma_dma_info: display HIDMA device info | ||
120 | * | ||
121 | * Display the info for the current HIDMA device. | ||
122 | */ | ||
123 | static int hidma_dma_info(struct seq_file *s, void *unused) | ||
124 | { | ||
125 | struct hidma_dev *dmadev = s->private; | ||
126 | resource_size_t sz; | ||
127 | |||
128 | seq_printf(s, "nr_descriptors=%d\n", dmadev->nr_descriptors); | ||
129 | seq_printf(s, "dev_trca=%p\n", &dmadev->dev_trca); | ||
130 | seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start); | ||
131 | sz = resource_size(dmadev->trca_resource); | ||
132 | seq_printf(s, "dev_trca_size=%pa\n", &sz); | ||
133 | seq_printf(s, "dev_evca=%p\n", &dmadev->dev_evca); | ||
134 | seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start); | ||
135 | sz = resource_size(dmadev->evca_resource); | ||
136 | seq_printf(s, "dev_evca_size=%pa\n", &sz); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int hidma_chan_stats_open(struct inode *inode, struct file *file) | ||
141 | { | ||
142 | return single_open(file, hidma_chan_stats, inode->i_private); | ||
143 | } | ||
144 | |||
145 | static int hidma_dma_info_open(struct inode *inode, struct file *file) | ||
146 | { | ||
147 | return single_open(file, hidma_dma_info, inode->i_private); | ||
148 | } | ||
149 | |||
150 | static const struct file_operations hidma_chan_fops = { | ||
151 | .open = hidma_chan_stats_open, | ||
152 | .read = seq_read, | ||
153 | .llseek = seq_lseek, | ||
154 | .release = single_release, | ||
155 | }; | ||
156 | |||
157 | static const struct file_operations hidma_dma_fops = { | ||
158 | .open = hidma_dma_info_open, | ||
159 | .read = seq_read, | ||
160 | .llseek = seq_lseek, | ||
161 | .release = single_release, | ||
162 | }; | ||
163 | |||
164 | void hidma_debug_uninit(struct hidma_dev *dmadev) | ||
165 | { | ||
166 | debugfs_remove_recursive(dmadev->debugfs); | ||
167 | debugfs_remove_recursive(dmadev->stats); | ||
168 | } | ||
169 | |||
170 | int hidma_debug_init(struct hidma_dev *dmadev) | ||
171 | { | ||
172 | int rc = 0; | ||
173 | int chidx = 0; | ||
174 | struct list_head *position = NULL; | ||
175 | |||
176 | dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); | ||
177 | if (!dmadev->debugfs) { | ||
178 | rc = -ENODEV; | ||
179 | return rc; | ||
180 | } | ||
181 | |||
182 | /* walk through the virtual channel list */ | ||
183 | list_for_each(position, &dmadev->ddev.channels) { | ||
184 | struct hidma_chan *chan; | ||
185 | |||
186 | chan = list_entry(position, struct hidma_chan, | ||
187 | chan.device_node); | ||
188 | sprintf(chan->dbg_name, "chan%d", chidx); | ||
189 | chan->debugfs = debugfs_create_dir(chan->dbg_name, | ||
190 | dmadev->debugfs); | ||
191 | if (!chan->debugfs) { | ||
192 | rc = -ENOMEM; | ||
193 | goto cleanup; | ||
194 | } | ||
195 | chan->stats = debugfs_create_file("stats", S_IRUGO, | ||
196 | chan->debugfs, chan, | ||
197 | &hidma_chan_fops); | ||
198 | if (!chan->stats) { | ||
199 | rc = -ENOMEM; | ||
200 | goto cleanup; | ||
201 | } | ||
202 | chidx++; | ||
203 | } | ||
204 | |||
205 | dmadev->stats = debugfs_create_file("stats", S_IRUGO, | ||
206 | dmadev->debugfs, dmadev, | ||
207 | &hidma_dma_fops); | ||
208 | if (!dmadev->stats) { | ||
209 | rc = -ENOMEM; | ||
210 | goto cleanup; | ||
211 | } | ||
212 | |||
213 | return 0; | ||
214 | cleanup: | ||
215 | hidma_debug_uninit(dmadev); | ||
216 | return rc; | ||
217 | } | ||
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c new file mode 100644 index 000000000000..f3929001539b --- /dev/null +++ b/drivers/dma/qcom/hidma_ll.c | |||
@@ -0,0 +1,872 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA DMA engine low level code | ||
3 | * | ||
4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/highmem.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/atomic.h> | ||
24 | #include <linux/iopoll.h> | ||
25 | #include <linux/kfifo.h> | ||
26 | #include <linux/bitops.h> | ||
27 | |||
28 | #include "hidma.h" | ||
29 | |||
30 | #define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */ | ||
31 | |||
32 | #define HIDMA_TRCA_CTRLSTS_REG 0x000 | ||
33 | #define HIDMA_TRCA_RING_LOW_REG 0x008 | ||
34 | #define HIDMA_TRCA_RING_HIGH_REG 0x00C | ||
35 | #define HIDMA_TRCA_RING_LEN_REG 0x010 | ||
36 | #define HIDMA_TRCA_DOORBELL_REG 0x400 | ||
37 | |||
38 | #define HIDMA_EVCA_CTRLSTS_REG 0x000 | ||
39 | #define HIDMA_EVCA_INTCTRL_REG 0x004 | ||
40 | #define HIDMA_EVCA_RING_LOW_REG 0x008 | ||
41 | #define HIDMA_EVCA_RING_HIGH_REG 0x00C | ||
42 | #define HIDMA_EVCA_RING_LEN_REG 0x010 | ||
43 | #define HIDMA_EVCA_WRITE_PTR_REG 0x020 | ||
44 | #define HIDMA_EVCA_DOORBELL_REG 0x400 | ||
45 | |||
46 | #define HIDMA_EVCA_IRQ_STAT_REG 0x100 | ||
47 | #define HIDMA_EVCA_IRQ_CLR_REG 0x108 | ||
48 | #define HIDMA_EVCA_IRQ_EN_REG 0x110 | ||
49 | |||
50 | #define HIDMA_EVRE_CFG_IDX 0 | ||
51 | |||
52 | #define HIDMA_EVRE_ERRINFO_BIT_POS 24 | ||
53 | #define HIDMA_EVRE_CODE_BIT_POS 28 | ||
54 | |||
55 | #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0) | ||
56 | #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0) | ||
57 | |||
58 | #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0) | ||
59 | #define HIDMA_CH_STATE_MASK GENMASK(7, 0) | ||
60 | #define HIDMA_CH_STATE_BIT_POS 0x8 | ||
61 | |||
62 | #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0 | ||
63 | #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1 | ||
64 | #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9 | ||
65 | #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10 | ||
66 | #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11 | ||
67 | #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14 | ||
68 | |||
69 | #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \ | ||
70 | BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ | ||
71 | BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ | ||
72 | BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ | ||
73 | BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \ | ||
74 | BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)) | ||
75 | |||
76 | #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \ | ||
77 | do { \ | ||
78 | iter += size; \ | ||
79 | if (iter >= ring_size) \ | ||
80 | iter -= ring_size; \ | ||
81 | } while (0) | ||
82 | |||
83 | #define HIDMA_CH_STATE(val) \ | ||
84 | ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK) | ||
85 | |||
86 | #define HIDMA_ERR_INT_MASK \ | ||
87 | (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \ | ||
88 | BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ | ||
89 | BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ | ||
90 | BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ | ||
91 | BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS)) | ||
92 | |||
93 | enum ch_command { | ||
94 | HIDMA_CH_DISABLE = 0, | ||
95 | HIDMA_CH_ENABLE = 1, | ||
96 | HIDMA_CH_SUSPEND = 2, | ||
97 | HIDMA_CH_RESET = 9, | ||
98 | }; | ||
99 | |||
100 | enum ch_state { | ||
101 | HIDMA_CH_DISABLED = 0, | ||
102 | HIDMA_CH_ENABLED = 1, | ||
103 | HIDMA_CH_RUNNING = 2, | ||
104 | HIDMA_CH_SUSPENDED = 3, | ||
105 | HIDMA_CH_STOPPED = 4, | ||
106 | }; | ||
107 | |||
108 | enum tre_type { | ||
109 | HIDMA_TRE_MEMCPY = 3, | ||
110 | }; | ||
111 | |||
112 | enum err_code { | ||
113 | HIDMA_EVRE_STATUS_COMPLETE = 1, | ||
114 | HIDMA_EVRE_STATUS_ERROR = 4, | ||
115 | }; | ||
116 | |||
117 | static int hidma_is_chan_enabled(int state) | ||
118 | { | ||
119 | switch (state) { | ||
120 | case HIDMA_CH_ENABLED: | ||
121 | case HIDMA_CH_RUNNING: | ||
122 | return true; | ||
123 | default: | ||
124 | return false; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch) | ||
129 | { | ||
130 | struct hidma_tre *tre; | ||
131 | |||
132 | if (tre_ch >= lldev->nr_tres) { | ||
133 | dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch); | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | tre = &lldev->trepool[tre_ch]; | ||
138 | if (atomic_read(&tre->allocated) != true) { | ||
139 | dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch); | ||
140 | return; | ||
141 | } | ||
142 | |||
143 | atomic_set(&tre->allocated, 0); | ||
144 | } | ||
145 | |||
146 | int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name, | ||
147 | void (*callback)(void *data), void *data, u32 *tre_ch) | ||
148 | { | ||
149 | unsigned int i; | ||
150 | struct hidma_tre *tre; | ||
151 | u32 *tre_local; | ||
152 | |||
153 | if (!tre_ch || !lldev) | ||
154 | return -EINVAL; | ||
155 | |||
156 | /* need to have at least one empty spot in the queue */ | ||
157 | for (i = 0; i < lldev->nr_tres - 1; i++) { | ||
158 | if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1)) | ||
159 | break; | ||
160 | } | ||
161 | |||
162 | if (i == (lldev->nr_tres - 1)) | ||
163 | return -ENOMEM; | ||
164 | |||
165 | tre = &lldev->trepool[i]; | ||
166 | tre->dma_sig = sig; | ||
167 | tre->dev_name = dev_name; | ||
168 | tre->callback = callback; | ||
169 | tre->data = data; | ||
170 | tre->idx = i; | ||
171 | tre->status = 0; | ||
172 | tre->queued = 0; | ||
173 | tre->err_code = 0; | ||
174 | tre->err_info = 0; | ||
175 | tre->lldev = lldev; | ||
176 | tre_local = &tre->tre_local[0]; | ||
177 | tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY; | ||
178 | tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8; | ||
179 | tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ | ||
180 | *tre_ch = i; | ||
181 | if (callback) | ||
182 | callback(data); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Multiple TREs may be queued and waiting in the pending queue. | ||
188 | */ | ||
189 | static void hidma_ll_tre_complete(unsigned long arg) | ||
190 | { | ||
191 | struct hidma_lldev *lldev = (struct hidma_lldev *)arg; | ||
192 | struct hidma_tre *tre; | ||
193 | |||
194 | while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) { | ||
195 | /* call the user if it has been read by the hardware */ | ||
196 | if (tre->callback) | ||
197 | tre->callback(tre->data); | ||
198 | } | ||
199 | } | ||
200 | |||
201 | static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, | ||
202 | u8 err_info, u8 err_code) | ||
203 | { | ||
204 | struct hidma_tre *tre; | ||
205 | unsigned long flags; | ||
206 | |||
207 | spin_lock_irqsave(&lldev->lock, flags); | ||
208 | tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE]; | ||
209 | if (!tre) { | ||
210 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
211 | dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n", | ||
212 | tre_iterator / HIDMA_TRE_SIZE); | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | lldev->pending_tre_list[tre->tre_index] = NULL; | ||
216 | |||
217 | /* | ||
218 | * Keep track of pending TREs that SW is expecting to receive | ||
219 | * from HW. We got one now. Decrement our counter. | ||
220 | */ | ||
221 | lldev->pending_tre_count--; | ||
222 | if (lldev->pending_tre_count < 0) { | ||
223 | dev_warn(lldev->dev, "tre count mismatch on completion"); | ||
224 | lldev->pending_tre_count = 0; | ||
225 | } | ||
226 | |||
227 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
228 | |||
229 | tre->err_info = err_info; | ||
230 | tre->err_code = err_code; | ||
231 | tre->queued = 0; | ||
232 | |||
233 | kfifo_put(&lldev->handoff_fifo, tre); | ||
234 | tasklet_schedule(&lldev->task); | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * Called to handle the interrupt for the channel. | ||
241 | * Return a positive number if TRE or EVRE were consumed on this run. | ||
242 | * Return a positive number if there are pending TREs or EVREs. | ||
243 | * Return 0 if there is nothing to consume or no pending TREs/EVREs found. | ||
244 | */ | ||
245 | static int hidma_handle_tre_completion(struct hidma_lldev *lldev) | ||
246 | { | ||
247 | u32 evre_ring_size = lldev->evre_ring_size; | ||
248 | u32 tre_ring_size = lldev->tre_ring_size; | ||
249 | u32 err_info, err_code, evre_write_off; | ||
250 | u32 tre_iterator, evre_iterator; | ||
251 | u32 num_completed = 0; | ||
252 | |||
253 | evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | ||
254 | tre_iterator = lldev->tre_processed_off; | ||
255 | evre_iterator = lldev->evre_processed_off; | ||
256 | |||
257 | if ((evre_write_off > evre_ring_size) || | ||
258 | (evre_write_off % HIDMA_EVRE_SIZE)) { | ||
259 | dev_err(lldev->dev, "HW reports invalid EVRE write offset\n"); | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * By the time control reaches here the number of EVREs and TREs | ||
265 | * may not match. Only consume the ones that hardware told us. | ||
266 | */ | ||
267 | while ((evre_iterator != evre_write_off)) { | ||
268 | u32 *current_evre = lldev->evre_ring + evre_iterator; | ||
269 | u32 cfg; | ||
270 | |||
271 | cfg = current_evre[HIDMA_EVRE_CFG_IDX]; | ||
272 | err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS; | ||
273 | err_info &= HIDMA_EVRE_ERRINFO_MASK; | ||
274 | err_code = | ||
275 | (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK; | ||
276 | |||
277 | if (hidma_post_completed(lldev, tre_iterator, err_info, | ||
278 | err_code)) | ||
279 | break; | ||
280 | |||
281 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | ||
282 | tre_ring_size); | ||
283 | HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE, | ||
284 | evre_ring_size); | ||
285 | |||
286 | /* | ||
287 | * Read the new event descriptor written by the HW. | ||
288 | * As we are processing the delivered events, other events | ||
289 | * get queued to the SW for processing. | ||
290 | */ | ||
291 | evre_write_off = | ||
292 | readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | ||
293 | num_completed++; | ||
294 | } | ||
295 | |||
296 | if (num_completed) { | ||
297 | u32 evre_read_off = (lldev->evre_processed_off + | ||
298 | HIDMA_EVRE_SIZE * num_completed); | ||
299 | u32 tre_read_off = (lldev->tre_processed_off + | ||
300 | HIDMA_TRE_SIZE * num_completed); | ||
301 | |||
302 | evre_read_off = evre_read_off % evre_ring_size; | ||
303 | tre_read_off = tre_read_off % tre_ring_size; | ||
304 | |||
305 | writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG); | ||
306 | |||
307 | /* record the last processed tre offset */ | ||
308 | lldev->tre_processed_off = tre_read_off; | ||
309 | lldev->evre_processed_off = evre_read_off; | ||
310 | } | ||
311 | |||
312 | return num_completed; | ||
313 | } | ||
314 | |||
315 | void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info, | ||
316 | u8 err_code) | ||
317 | { | ||
318 | u32 tre_iterator; | ||
319 | u32 tre_ring_size = lldev->tre_ring_size; | ||
320 | int num_completed = 0; | ||
321 | u32 tre_read_off; | ||
322 | |||
323 | tre_iterator = lldev->tre_processed_off; | ||
324 | while (lldev->pending_tre_count) { | ||
325 | if (hidma_post_completed(lldev, tre_iterator, err_info, | ||
326 | err_code)) | ||
327 | break; | ||
328 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | ||
329 | tre_ring_size); | ||
330 | num_completed++; | ||
331 | } | ||
332 | tre_read_off = (lldev->tre_processed_off + | ||
333 | HIDMA_TRE_SIZE * num_completed); | ||
334 | |||
335 | tre_read_off = tre_read_off % tre_ring_size; | ||
336 | |||
337 | /* record the last processed tre offset */ | ||
338 | lldev->tre_processed_off = tre_read_off; | ||
339 | } | ||
340 | |||
341 | static int hidma_ll_reset(struct hidma_lldev *lldev) | ||
342 | { | ||
343 | u32 val; | ||
344 | int ret; | ||
345 | |||
346 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
347 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
348 | val |= HIDMA_CH_RESET << 16; | ||
349 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
350 | |||
351 | /* | ||
352 | * Delay 10ms after reset to allow DMA logic to quiesce. | ||
353 | * Do a polled read up to 1ms and 10ms maximum. | ||
354 | */ | ||
355 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | ||
356 | HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, | ||
357 | 1000, 10000); | ||
358 | if (ret) { | ||
359 | dev_err(lldev->dev, "transfer channel did not reset\n"); | ||
360 | return ret; | ||
361 | } | ||
362 | |||
363 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
364 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
365 | val |= HIDMA_CH_RESET << 16; | ||
366 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
367 | |||
368 | /* | ||
369 | * Delay 10ms after reset to allow DMA logic to quiesce. | ||
370 | * Do a polled read up to 1ms and 10ms maximum. | ||
371 | */ | ||
372 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | ||
373 | HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, | ||
374 | 1000, 10000); | ||
375 | if (ret) | ||
376 | return ret; | ||
377 | |||
378 | lldev->trch_state = HIDMA_CH_DISABLED; | ||
379 | lldev->evch_state = HIDMA_CH_DISABLED; | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Abort all transactions and perform a reset. | ||
385 | */ | ||
386 | static void hidma_ll_abort(unsigned long arg) | ||
387 | { | ||
388 | struct hidma_lldev *lldev = (struct hidma_lldev *)arg; | ||
389 | u8 err_code = HIDMA_EVRE_STATUS_ERROR; | ||
390 | u8 err_info = 0xFF; | ||
391 | int rc; | ||
392 | |||
393 | hidma_cleanup_pending_tre(lldev, err_info, err_code); | ||
394 | |||
395 | /* reset the channel for recovery */ | ||
396 | rc = hidma_ll_setup(lldev); | ||
397 | if (rc) { | ||
398 | dev_err(lldev->dev, "channel reinitialize failed after error\n"); | ||
399 | return; | ||
400 | } | ||
401 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * The interrupt handler for HIDMA will try to consume as many pending | ||
406 | * EVRE from the event queue as possible. Each EVRE has an associated | ||
407 | * TRE that holds the user interface parameters. EVRE reports the | ||
408 | * result of the transaction. Hardware guarantees ordering between EVREs | ||
409 | * and TREs. We use last processed offset to figure out which TRE is | ||
410 | * associated with which EVRE. If two TREs are consumed by HW, the EVREs | ||
411 | * are in order in the event ring. | ||
412 | * | ||
413 | * This handler will do a one pass for consuming EVREs. Other EVREs may | ||
414 | * be delivered while we are working. It will try to consume incoming | ||
415 | * EVREs one more time and return. | ||
416 | * | ||
417 | * For unprocessed EVREs, hardware will trigger another interrupt until | ||
418 | * all the interrupt bits are cleared. | ||
419 | * | ||
420 | * Hardware guarantees that by the time interrupt is observed, all data | ||
421 | * transactions in flight are delivered to their respective places and | ||
422 | * are visible to the CPU. | ||
423 | * | ||
424 | * On demand paging for IOMMU is only supported for PCIe via PRI | ||
425 | * (Page Request Interface) not for HIDMA. All other hardware instances | ||
426 | * including HIDMA work on pinned DMA addresses. | ||
427 | * | ||
428 | * HIDMA is not aware of IOMMU presence since it follows the DMA API. All | ||
429 | * IOMMU latency will be built into the data movement time. By the time | ||
430 | * interrupt happens, IOMMU lookups + data movement has already taken place. | ||
431 | * | ||
432 | * While the first read in a typical PCI endpoint ISR flushes all outstanding | ||
433 | * requests traditionally to the destination, this concept does not apply | ||
434 | * here for this HW. | ||
435 | */ | ||
436 | irqreturn_t hidma_ll_inthandler(int chirq, void *arg) | ||
437 | { | ||
438 | struct hidma_lldev *lldev = arg; | ||
439 | u32 status; | ||
440 | u32 enable; | ||
441 | u32 cause; | ||
442 | |||
443 | /* | ||
444 | * Fine tuned for this HW... | ||
445 | * | ||
446 | * This ISR has been designed for this particular hardware. Relaxed | ||
447 | * read and write accessors are used for performance reasons due to | ||
448 | * interrupt delivery guarantees. Do not copy this code blindly and | ||
449 | * expect that to work. | ||
450 | */ | ||
451 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
452 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
453 | cause = status & enable; | ||
454 | |||
455 | while (cause) { | ||
456 | if (cause & HIDMA_ERR_INT_MASK) { | ||
457 | dev_err(lldev->dev, "error 0x%x, resetting...\n", | ||
458 | cause); | ||
459 | |||
460 | /* Clear out pending interrupts */ | ||
461 | writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
462 | |||
463 | tasklet_schedule(&lldev->rst_task); | ||
464 | goto out; | ||
465 | } | ||
466 | |||
467 | /* | ||
468 | * Try to consume as many EVREs as possible. | ||
469 | */ | ||
470 | hidma_handle_tre_completion(lldev); | ||
471 | |||
472 | /* We consumed TREs or there are pending TREs or EVREs. */ | ||
473 | writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
474 | |||
475 | /* | ||
476 | * Another interrupt might have arrived while we are | ||
477 | * processing this one. Read the new cause. | ||
478 | */ | ||
479 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
480 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
481 | cause = status & enable; | ||
482 | } | ||
483 | |||
484 | out: | ||
485 | return IRQ_HANDLED; | ||
486 | } | ||
487 | |||
488 | int hidma_ll_enable(struct hidma_lldev *lldev) | ||
489 | { | ||
490 | u32 val; | ||
491 | int ret; | ||
492 | |||
493 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
494 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
495 | val |= HIDMA_CH_ENABLE << 16; | ||
496 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
497 | |||
498 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | ||
499 | hidma_is_chan_enabled(HIDMA_CH_STATE(val)), | ||
500 | 1000, 10000); | ||
501 | if (ret) { | ||
502 | dev_err(lldev->dev, "event channel did not get enabled\n"); | ||
503 | return ret; | ||
504 | } | ||
505 | |||
506 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
507 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
508 | val |= HIDMA_CH_ENABLE << 16; | ||
509 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
510 | |||
511 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | ||
512 | hidma_is_chan_enabled(HIDMA_CH_STATE(val)), | ||
513 | 1000, 10000); | ||
514 | if (ret) { | ||
515 | dev_err(lldev->dev, "transfer channel did not get enabled\n"); | ||
516 | return ret; | ||
517 | } | ||
518 | |||
519 | lldev->trch_state = HIDMA_CH_ENABLED; | ||
520 | lldev->evch_state = HIDMA_CH_ENABLED; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | void hidma_ll_start(struct hidma_lldev *lldev) | ||
526 | { | ||
527 | unsigned long irqflags; | ||
528 | |||
529 | spin_lock_irqsave(&lldev->lock, irqflags); | ||
530 | writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG); | ||
531 | spin_unlock_irqrestore(&lldev->lock, irqflags); | ||
532 | } | ||
533 | |||
534 | bool hidma_ll_isenabled(struct hidma_lldev *lldev) | ||
535 | { | ||
536 | u32 val; | ||
537 | |||
538 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
539 | lldev->trch_state = HIDMA_CH_STATE(val); | ||
540 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
541 | lldev->evch_state = HIDMA_CH_STATE(val); | ||
542 | |||
543 | /* both channels have to be enabled before calling this function */ | ||
544 | if (hidma_is_chan_enabled(lldev->trch_state) && | ||
545 | hidma_is_chan_enabled(lldev->evch_state)) | ||
546 | return true; | ||
547 | |||
548 | return false; | ||
549 | } | ||
550 | |||
551 | void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch) | ||
552 | { | ||
553 | struct hidma_tre *tre; | ||
554 | unsigned long flags; | ||
555 | |||
556 | tre = &lldev->trepool[tre_ch]; | ||
557 | |||
558 | /* copy the TRE into its location in the TRE ring */ | ||
559 | spin_lock_irqsave(&lldev->lock, flags); | ||
560 | tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE; | ||
561 | lldev->pending_tre_list[tre->tre_index] = tre; | ||
562 | memcpy(lldev->tre_ring + lldev->tre_write_offset, | ||
563 | &tre->tre_local[0], HIDMA_TRE_SIZE); | ||
564 | tre->err_code = 0; | ||
565 | tre->err_info = 0; | ||
566 | tre->queued = 1; | ||
567 | lldev->pending_tre_count++; | ||
568 | lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE) | ||
569 | % lldev->tre_ring_size; | ||
570 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * Note that even though we stop this channel if there is a pending transaction | ||
575 | * in flight it will complete and follow the callback. This request will | ||
576 | * prevent further requests to be made. | ||
577 | */ | ||
578 | int hidma_ll_disable(struct hidma_lldev *lldev) | ||
579 | { | ||
580 | u32 val; | ||
581 | int ret; | ||
582 | |||
583 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
584 | lldev->evch_state = HIDMA_CH_STATE(val); | ||
585 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
586 | lldev->trch_state = HIDMA_CH_STATE(val); | ||
587 | |||
588 | /* already suspended by this OS */ | ||
589 | if ((lldev->trch_state == HIDMA_CH_SUSPENDED) || | ||
590 | (lldev->evch_state == HIDMA_CH_SUSPENDED)) | ||
591 | return 0; | ||
592 | |||
593 | /* already stopped by the manager */ | ||
594 | if ((lldev->trch_state == HIDMA_CH_STOPPED) || | ||
595 | (lldev->evch_state == HIDMA_CH_STOPPED)) | ||
596 | return 0; | ||
597 | |||
598 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
599 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
600 | val |= HIDMA_CH_SUSPEND << 16; | ||
601 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
602 | |||
603 | /* | ||
604 | * Start the wait right after the suspend is confirmed. | ||
605 | * Do a polled read up to 1ms and 10ms maximum. | ||
606 | */ | ||
607 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | ||
608 | HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, | ||
609 | 1000, 10000); | ||
610 | if (ret) | ||
611 | return ret; | ||
612 | |||
613 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
614 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
615 | val |= HIDMA_CH_SUSPEND << 16; | ||
616 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
617 | |||
618 | /* | ||
619 | * Start the wait right after the suspend is confirmed | ||
620 | * Delay up to 10ms after reset to allow DMA logic to quiesce. | ||
621 | */ | ||
622 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | ||
623 | HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, | ||
624 | 1000, 10000); | ||
625 | if (ret) | ||
626 | return ret; | ||
627 | |||
628 | lldev->trch_state = HIDMA_CH_SUSPENDED; | ||
629 | lldev->evch_state = HIDMA_CH_SUSPENDED; | ||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, | ||
634 | dma_addr_t src, dma_addr_t dest, u32 len, | ||
635 | u32 flags) | ||
636 | { | ||
637 | struct hidma_tre *tre; | ||
638 | u32 *tre_local; | ||
639 | |||
640 | if (tre_ch >= lldev->nr_tres) { | ||
641 | dev_err(lldev->dev, "invalid TRE number in transfer params:%d", | ||
642 | tre_ch); | ||
643 | return; | ||
644 | } | ||
645 | |||
646 | tre = &lldev->trepool[tre_ch]; | ||
647 | if (atomic_read(&tre->allocated) != true) { | ||
648 | dev_err(lldev->dev, "trying to set params on an unused TRE:%d", | ||
649 | tre_ch); | ||
650 | return; | ||
651 | } | ||
652 | |||
653 | tre_local = &tre->tre_local[0]; | ||
654 | tre_local[HIDMA_TRE_LEN_IDX] = len; | ||
655 | tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); | ||
656 | tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); | ||
657 | tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest); | ||
658 | tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest); | ||
659 | tre->int_flags = flags; | ||
660 | } | ||
661 | |||
662 | /* | ||
663 | * Called during initialization and after an error condition | ||
664 | * to restore hardware state. | ||
665 | */ | ||
666 | int hidma_ll_setup(struct hidma_lldev *lldev) | ||
667 | { | ||
668 | int rc; | ||
669 | u64 addr; | ||
670 | u32 val; | ||
671 | u32 nr_tres = lldev->nr_tres; | ||
672 | |||
673 | lldev->pending_tre_count = 0; | ||
674 | lldev->tre_processed_off = 0; | ||
675 | lldev->evre_processed_off = 0; | ||
676 | lldev->tre_write_offset = 0; | ||
677 | |||
678 | /* disable interrupts */ | ||
679 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
680 | |||
681 | /* clear all pending interrupts */ | ||
682 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
683 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
684 | |||
685 | rc = hidma_ll_reset(lldev); | ||
686 | if (rc) | ||
687 | return rc; | ||
688 | |||
689 | /* | ||
690 | * Clear all pending interrupts again. | ||
691 | * Otherwise, we observe reset complete interrupts. | ||
692 | */ | ||
693 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
694 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
695 | |||
696 | /* disable interrupts again after reset */ | ||
697 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
698 | |||
699 | addr = lldev->tre_dma; | ||
700 | writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG); | ||
701 | writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG); | ||
702 | writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG); | ||
703 | |||
704 | addr = lldev->evre_dma; | ||
705 | writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG); | ||
706 | writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG); | ||
707 | writel(HIDMA_EVRE_SIZE * nr_tres, | ||
708 | lldev->evca + HIDMA_EVCA_RING_LEN_REG); | ||
709 | |||
710 | /* support IRQ only for now */ | ||
711 | val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG); | ||
712 | val &= ~0xF; | ||
713 | val |= 0x1; | ||
714 | writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG); | ||
715 | |||
716 | /* clear all pending interrupts and enable them */ | ||
717 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
718 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
719 | |||
720 | return hidma_ll_enable(lldev); | ||
721 | } | ||
722 | |||
723 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, | ||
724 | void __iomem *trca, void __iomem *evca, | ||
725 | u8 chidx) | ||
726 | { | ||
727 | u32 required_bytes; | ||
728 | struct hidma_lldev *lldev; | ||
729 | int rc; | ||
730 | size_t sz; | ||
731 | |||
732 | if (!trca || !evca || !dev || !nr_tres) | ||
733 | return NULL; | ||
734 | |||
735 | /* need at least four TREs */ | ||
736 | if (nr_tres < 4) | ||
737 | return NULL; | ||
738 | |||
739 | /* need an extra space */ | ||
740 | nr_tres += 1; | ||
741 | |||
742 | lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL); | ||
743 | if (!lldev) | ||
744 | return NULL; | ||
745 | |||
746 | lldev->evca = evca; | ||
747 | lldev->trca = trca; | ||
748 | lldev->dev = dev; | ||
749 | sz = sizeof(struct hidma_tre); | ||
750 | lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL); | ||
751 | if (!lldev->trepool) | ||
752 | return NULL; | ||
753 | |||
754 | required_bytes = sizeof(lldev->pending_tre_list[0]); | ||
755 | lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes, | ||
756 | GFP_KERNEL); | ||
757 | if (!lldev->pending_tre_list) | ||
758 | return NULL; | ||
759 | |||
760 | sz = (HIDMA_TRE_SIZE + 1) * nr_tres; | ||
761 | lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma, | ||
762 | GFP_KERNEL); | ||
763 | if (!lldev->tre_ring) | ||
764 | return NULL; | ||
765 | |||
766 | memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres); | ||
767 | lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres; | ||
768 | lldev->nr_tres = nr_tres; | ||
769 | |||
770 | /* the TRE ring has to be TRE_SIZE aligned */ | ||
771 | if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) { | ||
772 | u8 tre_ring_shift; | ||
773 | |||
774 | tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE; | ||
775 | tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift; | ||
776 | lldev->tre_dma += tre_ring_shift; | ||
777 | lldev->tre_ring += tre_ring_shift; | ||
778 | } | ||
779 | |||
780 | sz = (HIDMA_EVRE_SIZE + 1) * nr_tres; | ||
781 | lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma, | ||
782 | GFP_KERNEL); | ||
783 | if (!lldev->evre_ring) | ||
784 | return NULL; | ||
785 | |||
786 | memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres); | ||
787 | lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres; | ||
788 | |||
789 | /* the EVRE ring has to be EVRE_SIZE aligned */ | ||
790 | if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) { | ||
791 | u8 evre_ring_shift; | ||
792 | |||
793 | evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE; | ||
794 | evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift; | ||
795 | lldev->evre_dma += evre_ring_shift; | ||
796 | lldev->evre_ring += evre_ring_shift; | ||
797 | } | ||
798 | lldev->nr_tres = nr_tres; | ||
799 | lldev->chidx = chidx; | ||
800 | |||
801 | sz = nr_tres * sizeof(struct hidma_tre *); | ||
802 | rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL); | ||
803 | if (rc) | ||
804 | return NULL; | ||
805 | |||
806 | rc = hidma_ll_setup(lldev); | ||
807 | if (rc) | ||
808 | return NULL; | ||
809 | |||
810 | spin_lock_init(&lldev->lock); | ||
811 | tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev); | ||
812 | tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); | ||
813 | lldev->initialized = 1; | ||
814 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
815 | return lldev; | ||
816 | } | ||
817 | |||
818 | int hidma_ll_uninit(struct hidma_lldev *lldev) | ||
819 | { | ||
820 | u32 required_bytes; | ||
821 | int rc = 0; | ||
822 | u32 val; | ||
823 | |||
824 | if (!lldev) | ||
825 | return -ENODEV; | ||
826 | |||
827 | if (!lldev->initialized) | ||
828 | return 0; | ||
829 | |||
830 | lldev->initialized = 0; | ||
831 | |||
832 | required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; | ||
833 | tasklet_kill(&lldev->task); | ||
834 | memset(lldev->trepool, 0, required_bytes); | ||
835 | lldev->trepool = NULL; | ||
836 | lldev->pending_tre_count = 0; | ||
837 | lldev->tre_write_offset = 0; | ||
838 | |||
839 | rc = hidma_ll_reset(lldev); | ||
840 | |||
841 | /* | ||
842 | * Clear all pending interrupts again. | ||
843 | * Otherwise, we observe reset complete interrupts. | ||
844 | */ | ||
845 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
846 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
847 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
848 | return rc; | ||
849 | } | ||
850 | |||
851 | enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch) | ||
852 | { | ||
853 | enum dma_status ret = DMA_ERROR; | ||
854 | struct hidma_tre *tre; | ||
855 | unsigned long flags; | ||
856 | u8 err_code; | ||
857 | |||
858 | spin_lock_irqsave(&lldev->lock, flags); | ||
859 | |||
860 | tre = &lldev->trepool[tre_ch]; | ||
861 | err_code = tre->err_code; | ||
862 | |||
863 | if (err_code & HIDMA_EVRE_STATUS_COMPLETE) | ||
864 | ret = DMA_COMPLETE; | ||
865 | else if (err_code & HIDMA_EVRE_STATUS_ERROR) | ||
866 | ret = DMA_ERROR; | ||
867 | else | ||
868 | ret = DMA_IN_PROGRESS; | ||
869 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
870 | |||
871 | return ret; | ||
872 | } | ||
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index ef491b893f40..c0e365321310 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Qualcomm Technologies HIDMA DMA engine Management interface | 2 | * Qualcomm Technologies HIDMA DMA engine Management interface |
3 | * | 3 | * |
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | 4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 and | 7 | * it under the terms of the GNU General Public License version 2 and |
@@ -17,13 +17,14 @@ | |||
17 | #include <linux/acpi.h> | 17 | #include <linux/acpi.h> |
18 | #include <linux/of.h> | 18 | #include <linux/of.h> |
19 | #include <linux/property.h> | 19 | #include <linux/property.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/of_irq.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/of_platform.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
26 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
27 | #include <linux/dma-mapping.h> | ||
27 | 28 | ||
28 | #include "hidma_mgmt.h" | 29 | #include "hidma_mgmt.h" |
29 | 30 | ||
@@ -298,5 +299,109 @@ static struct platform_driver hidma_mgmt_driver = { | |||
298 | }, | 299 | }, |
299 | }; | 300 | }; |
300 | 301 | ||
301 | module_platform_driver(hidma_mgmt_driver); | 302 | #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) |
303 | static int object_counter; | ||
304 | |||
305 | static int __init hidma_mgmt_of_populate_channels(struct device_node *np) | ||
306 | { | ||
307 | struct platform_device *pdev_parent = of_find_device_by_node(np); | ||
308 | struct platform_device_info pdevinfo; | ||
309 | struct of_phandle_args out_irq; | ||
310 | struct device_node *child; | ||
311 | struct resource *res; | ||
312 | const __be32 *cell; | ||
313 | int ret = 0, size, i, num; | ||
314 | u64 addr, addr_size; | ||
315 | |||
316 | for_each_available_child_of_node(np, child) { | ||
317 | struct resource *res_iter; | ||
318 | struct platform_device *new_pdev; | ||
319 | |||
320 | cell = of_get_property(child, "reg", &size); | ||
321 | if (!cell) { | ||
322 | ret = -EINVAL; | ||
323 | goto out; | ||
324 | } | ||
325 | |||
326 | size /= sizeof(*cell); | ||
327 | num = size / | ||
328 | (of_n_addr_cells(child) + of_n_size_cells(child)) + 1; | ||
329 | |||
330 | /* allocate a resource array */ | ||
331 | res = kcalloc(num, sizeof(*res), GFP_KERNEL); | ||
332 | if (!res) { | ||
333 | ret = -ENOMEM; | ||
334 | goto out; | ||
335 | } | ||
336 | |||
337 | /* read each reg value */ | ||
338 | i = 0; | ||
339 | res_iter = res; | ||
340 | while (i < size) { | ||
341 | addr = of_read_number(&cell[i], | ||
342 | of_n_addr_cells(child)); | ||
343 | i += of_n_addr_cells(child); | ||
344 | |||
345 | addr_size = of_read_number(&cell[i], | ||
346 | of_n_size_cells(child)); | ||
347 | i += of_n_size_cells(child); | ||
348 | |||
349 | res_iter->start = addr; | ||
350 | res_iter->end = res_iter->start + addr_size - 1; | ||
351 | res_iter->flags = IORESOURCE_MEM; | ||
352 | res_iter++; | ||
353 | } | ||
354 | |||
355 | ret = of_irq_parse_one(child, 0, &out_irq); | ||
356 | if (ret) | ||
357 | goto out; | ||
358 | |||
359 | res_iter->start = irq_create_of_mapping(&out_irq); | ||
360 | res_iter->name = "hidma event irq"; | ||
361 | res_iter->flags = IORESOURCE_IRQ; | ||
362 | |||
363 | memset(&pdevinfo, 0, sizeof(pdevinfo)); | ||
364 | pdevinfo.fwnode = &child->fwnode; | ||
365 | pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; | ||
366 | pdevinfo.name = child->name; | ||
367 | pdevinfo.id = object_counter++; | ||
368 | pdevinfo.res = res; | ||
369 | pdevinfo.num_res = num; | ||
370 | pdevinfo.data = NULL; | ||
371 | pdevinfo.size_data = 0; | ||
372 | pdevinfo.dma_mask = DMA_BIT_MASK(64); | ||
373 | new_pdev = platform_device_register_full(&pdevinfo); | ||
374 | if (!new_pdev) { | ||
375 | ret = -ENODEV; | ||
376 | goto out; | ||
377 | } | ||
378 | of_dma_configure(&new_pdev->dev, child); | ||
379 | |||
380 | kfree(res); | ||
381 | res = NULL; | ||
382 | } | ||
383 | out: | ||
384 | kfree(res); | ||
385 | |||
386 | return ret; | ||
387 | } | ||
388 | #endif | ||
389 | |||
390 | static int __init hidma_mgmt_init(void) | ||
391 | { | ||
392 | #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) | ||
393 | struct device_node *child; | ||
394 | |||
395 | for (child = of_find_matching_node(NULL, hidma_mgmt_match); child; | ||
396 | child = of_find_matching_node(child, hidma_mgmt_match)) { | ||
397 | /* device tree based firmware here */ | ||
398 | hidma_mgmt_of_populate_channels(child); | ||
399 | of_node_put(child); | ||
400 | } | ||
401 | #endif | ||
402 | platform_driver_register(&hidma_mgmt_driver); | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | module_init(hidma_mgmt_init); | ||
302 | MODULE_LICENSE("GPL v2"); | 407 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 2db12e493c53..5065ca43face 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -146,6 +146,8 @@ struct sun6i_vchan { | |||
146 | struct dma_slave_config cfg; | 146 | struct dma_slave_config cfg; |
147 | struct sun6i_pchan *phy; | 147 | struct sun6i_pchan *phy; |
148 | u8 port; | 148 | u8 port; |
149 | u8 irq_type; | ||
150 | bool cyclic; | ||
149 | }; | 151 | }; |
150 | 152 | ||
151 | struct sun6i_dma_dev { | 153 | struct sun6i_dma_dev { |
@@ -254,6 +256,30 @@ static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) | |||
254 | return addr_width >> 1; | 256 | return addr_width >> 1; |
255 | } | 257 | } |
256 | 258 | ||
259 | static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) | ||
260 | { | ||
261 | struct sun6i_desc *txd = pchan->desc; | ||
262 | struct sun6i_dma_lli *lli; | ||
263 | size_t bytes; | ||
264 | dma_addr_t pos; | ||
265 | |||
266 | pos = readl(pchan->base + DMA_CHAN_LLI_ADDR); | ||
267 | bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); | ||
268 | |||
269 | if (pos == LLI_LAST_ITEM) | ||
270 | return bytes; | ||
271 | |||
272 | for (lli = txd->v_lli; lli; lli = lli->v_lli_next) { | ||
273 | if (lli->p_lli_next == pos) { | ||
274 | for (lli = lli->v_lli_next; lli; lli = lli->v_lli_next) | ||
275 | bytes += lli->len; | ||
276 | break; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | return bytes; | ||
281 | } | ||
282 | |||
257 | static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, | 283 | static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, |
258 | struct sun6i_dma_lli *next, | 284 | struct sun6i_dma_lli *next, |
259 | dma_addr_t next_phy, | 285 | dma_addr_t next_phy, |
@@ -276,45 +302,6 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, | |||
276 | return next; | 302 | return next; |
277 | } | 303 | } |
278 | 304 | ||
279 | static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, | ||
280 | dma_addr_t src, | ||
281 | dma_addr_t dst, u32 len, | ||
282 | struct dma_slave_config *config) | ||
283 | { | ||
284 | u8 src_width, dst_width, src_burst, dst_burst; | ||
285 | |||
286 | if (!config) | ||
287 | return -EINVAL; | ||
288 | |||
289 | src_burst = convert_burst(config->src_maxburst); | ||
290 | if (src_burst) | ||
291 | return src_burst; | ||
292 | |||
293 | dst_burst = convert_burst(config->dst_maxburst); | ||
294 | if (dst_burst) | ||
295 | return dst_burst; | ||
296 | |||
297 | src_width = convert_buswidth(config->src_addr_width); | ||
298 | if (src_width) | ||
299 | return src_width; | ||
300 | |||
301 | dst_width = convert_buswidth(config->dst_addr_width); | ||
302 | if (dst_width) | ||
303 | return dst_width; | ||
304 | |||
305 | lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | | ||
306 | DMA_CHAN_CFG_SRC_WIDTH(src_width) | | ||
307 | DMA_CHAN_CFG_DST_BURST(dst_burst) | | ||
308 | DMA_CHAN_CFG_DST_WIDTH(dst_width); | ||
309 | |||
310 | lli->src = src; | ||
311 | lli->dst = dst; | ||
312 | lli->len = len; | ||
313 | lli->para = NORMAL_WAIT; | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, | 305 | static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, |
319 | struct sun6i_dma_lli *lli) | 306 | struct sun6i_dma_lli *lli) |
320 | { | 307 | { |
@@ -381,9 +368,13 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) | |||
381 | irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; | 368 | irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; |
382 | irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; | 369 | irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; |
383 | 370 | ||
384 | irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset)); | 371 | vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE; |
385 | irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH); | 372 | |
386 | writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset)); | 373 | irq_val = readl(sdev->base + DMA_IRQ_EN(irq_reg)); |
374 | irq_val &= ~((DMA_IRQ_HALF | DMA_IRQ_PKG | DMA_IRQ_QUEUE) << | ||
375 | (irq_offset * DMA_IRQ_CHAN_WIDTH)); | ||
376 | irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH); | ||
377 | writel(irq_val, sdev->base + DMA_IRQ_EN(irq_reg)); | ||
387 | 378 | ||
388 | writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); | 379 | writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); |
389 | writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); | 380 | writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); |
@@ -479,11 +470,12 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) | |||
479 | writel(status, sdev->base + DMA_IRQ_STAT(i)); | 470 | writel(status, sdev->base + DMA_IRQ_STAT(i)); |
480 | 471 | ||
481 | for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) { | 472 | for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) { |
482 | if (status & DMA_IRQ_QUEUE) { | 473 | pchan = sdev->pchans + j; |
483 | pchan = sdev->pchans + j; | 474 | vchan = pchan->vchan; |
484 | vchan = pchan->vchan; | 475 | if (vchan && (status & vchan->irq_type)) { |
485 | 476 | if (vchan->cyclic) { | |
486 | if (vchan) { | 477 | vchan_cyclic_callback(&pchan->desc->vd); |
478 | } else { | ||
487 | spin_lock(&vchan->vc.lock); | 479 | spin_lock(&vchan->vc.lock); |
488 | vchan_cookie_complete(&pchan->desc->vd); | 480 | vchan_cookie_complete(&pchan->desc->vd); |
489 | pchan->done = pchan->desc; | 481 | pchan->done = pchan->desc; |
@@ -502,6 +494,55 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) | |||
502 | return ret; | 494 | return ret; |
503 | } | 495 | } |
504 | 496 | ||
497 | static int set_config(struct sun6i_dma_dev *sdev, | ||
498 | struct dma_slave_config *sconfig, | ||
499 | enum dma_transfer_direction direction, | ||
500 | u32 *p_cfg) | ||
501 | { | ||
502 | s8 src_width, dst_width, src_burst, dst_burst; | ||
503 | |||
504 | switch (direction) { | ||
505 | case DMA_MEM_TO_DEV: | ||
506 | src_burst = convert_burst(sconfig->src_maxburst ? | ||
507 | sconfig->src_maxburst : 8); | ||
508 | src_width = convert_buswidth(sconfig->src_addr_width != | ||
509 | DMA_SLAVE_BUSWIDTH_UNDEFINED ? | ||
510 | sconfig->src_addr_width : | ||
511 | DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
512 | dst_burst = convert_burst(sconfig->dst_maxburst); | ||
513 | dst_width = convert_buswidth(sconfig->dst_addr_width); | ||
514 | break; | ||
515 | case DMA_DEV_TO_MEM: | ||
516 | src_burst = convert_burst(sconfig->src_maxburst); | ||
517 | src_width = convert_buswidth(sconfig->src_addr_width); | ||
518 | dst_burst = convert_burst(sconfig->dst_maxburst ? | ||
519 | sconfig->dst_maxburst : 8); | ||
520 | dst_width = convert_buswidth(sconfig->dst_addr_width != | ||
521 | DMA_SLAVE_BUSWIDTH_UNDEFINED ? | ||
522 | sconfig->dst_addr_width : | ||
523 | DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
524 | break; | ||
525 | default: | ||
526 | return -EINVAL; | ||
527 | } | ||
528 | |||
529 | if (src_burst < 0) | ||
530 | return src_burst; | ||
531 | if (src_width < 0) | ||
532 | return src_width; | ||
533 | if (dst_burst < 0) | ||
534 | return dst_burst; | ||
535 | if (dst_width < 0) | ||
536 | return dst_width; | ||
537 | |||
538 | *p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | | ||
539 | DMA_CHAN_CFG_SRC_WIDTH(src_width) | | ||
540 | DMA_CHAN_CFG_DST_BURST(dst_burst) | | ||
541 | DMA_CHAN_CFG_DST_WIDTH(dst_width); | ||
542 | |||
543 | return 0; | ||
544 | } | ||
545 | |||
505 | static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( | 546 | static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( |
506 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 547 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
507 | size_t len, unsigned long flags) | 548 | size_t len, unsigned long flags) |
@@ -569,13 +610,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | |||
569 | struct sun6i_desc *txd; | 610 | struct sun6i_desc *txd; |
570 | struct scatterlist *sg; | 611 | struct scatterlist *sg; |
571 | dma_addr_t p_lli; | 612 | dma_addr_t p_lli; |
613 | u32 lli_cfg; | ||
572 | int i, ret; | 614 | int i, ret; |
573 | 615 | ||
574 | if (!sgl) | 616 | if (!sgl) |
575 | return NULL; | 617 | return NULL; |
576 | 618 | ||
577 | if (!is_slave_direction(dir)) { | 619 | ret = set_config(sdev, sconfig, dir, &lli_cfg); |
578 | dev_err(chan2dev(chan), "Invalid DMA direction\n"); | 620 | if (ret) { |
621 | dev_err(chan2dev(chan), "Invalid DMA configuration\n"); | ||
579 | return NULL; | 622 | return NULL; |
580 | } | 623 | } |
581 | 624 | ||
@@ -588,14 +631,14 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | |||
588 | if (!v_lli) | 631 | if (!v_lli) |
589 | goto err_lli_free; | 632 | goto err_lli_free; |
590 | 633 | ||
591 | if (dir == DMA_MEM_TO_DEV) { | 634 | v_lli->len = sg_dma_len(sg); |
592 | ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg), | 635 | v_lli->para = NORMAL_WAIT; |
593 | sconfig->dst_addr, sg_dma_len(sg), | ||
594 | sconfig); | ||
595 | if (ret) | ||
596 | goto err_cur_lli_free; | ||
597 | 636 | ||
598 | v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE | | 637 | if (dir == DMA_MEM_TO_DEV) { |
638 | v_lli->src = sg_dma_address(sg); | ||
639 | v_lli->dst = sconfig->dst_addr; | ||
640 | v_lli->cfg = lli_cfg | | ||
641 | DMA_CHAN_CFG_DST_IO_MODE | | ||
599 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 642 | DMA_CHAN_CFG_SRC_LINEAR_MODE | |
600 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | 643 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | |
601 | DMA_CHAN_CFG_DST_DRQ(vchan->port); | 644 | DMA_CHAN_CFG_DST_DRQ(vchan->port); |
@@ -607,13 +650,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | |||
607 | sg_dma_len(sg), flags); | 650 | sg_dma_len(sg), flags); |
608 | 651 | ||
609 | } else { | 652 | } else { |
610 | ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr, | 653 | v_lli->src = sconfig->src_addr; |
611 | sg_dma_address(sg), sg_dma_len(sg), | 654 | v_lli->dst = sg_dma_address(sg); |
612 | sconfig); | 655 | v_lli->cfg = lli_cfg | |
613 | if (ret) | 656 | DMA_CHAN_CFG_DST_LINEAR_MODE | |
614 | goto err_cur_lli_free; | ||
615 | |||
616 | v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE | | ||
617 | DMA_CHAN_CFG_SRC_IO_MODE | | 657 | DMA_CHAN_CFG_SRC_IO_MODE | |
618 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | 658 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | |
619 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); | 659 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); |
@@ -634,8 +674,78 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | |||
634 | 674 | ||
635 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | 675 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); |
636 | 676 | ||
637 | err_cur_lli_free: | 677 | err_lli_free: |
638 | dma_pool_free(sdev->pool, v_lli, p_lli); | 678 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) |
679 | dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); | ||
680 | kfree(txd); | ||
681 | return NULL; | ||
682 | } | ||
683 | |||
684 | static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( | ||
685 | struct dma_chan *chan, | ||
686 | dma_addr_t buf_addr, | ||
687 | size_t buf_len, | ||
688 | size_t period_len, | ||
689 | enum dma_transfer_direction dir, | ||
690 | unsigned long flags) | ||
691 | { | ||
692 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | ||
693 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
694 | struct dma_slave_config *sconfig = &vchan->cfg; | ||
695 | struct sun6i_dma_lli *v_lli, *prev = NULL; | ||
696 | struct sun6i_desc *txd; | ||
697 | dma_addr_t p_lli; | ||
698 | u32 lli_cfg; | ||
699 | unsigned int i, periods = buf_len / period_len; | ||
700 | int ret; | ||
701 | |||
702 | ret = set_config(sdev, sconfig, dir, &lli_cfg); | ||
703 | if (ret) { | ||
704 | dev_err(chan2dev(chan), "Invalid DMA configuration\n"); | ||
705 | return NULL; | ||
706 | } | ||
707 | |||
708 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | ||
709 | if (!txd) | ||
710 | return NULL; | ||
711 | |||
712 | for (i = 0; i < periods; i++) { | ||
713 | v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); | ||
714 | if (!v_lli) { | ||
715 | dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); | ||
716 | goto err_lli_free; | ||
717 | } | ||
718 | |||
719 | v_lli->len = period_len; | ||
720 | v_lli->para = NORMAL_WAIT; | ||
721 | |||
722 | if (dir == DMA_MEM_TO_DEV) { | ||
723 | v_lli->src = buf_addr + period_len * i; | ||
724 | v_lli->dst = sconfig->dst_addr; | ||
725 | v_lli->cfg = lli_cfg | | ||
726 | DMA_CHAN_CFG_DST_IO_MODE | | ||
727 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | ||
728 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | ||
729 | DMA_CHAN_CFG_DST_DRQ(vchan->port); | ||
730 | } else { | ||
731 | v_lli->src = sconfig->src_addr; | ||
732 | v_lli->dst = buf_addr + period_len * i; | ||
733 | v_lli->cfg = lli_cfg | | ||
734 | DMA_CHAN_CFG_DST_LINEAR_MODE | | ||
735 | DMA_CHAN_CFG_SRC_IO_MODE | | ||
736 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | ||
737 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); | ||
738 | } | ||
739 | |||
740 | prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); | ||
741 | } | ||
742 | |||
743 | prev->p_lli_next = txd->p_lli; /* cyclic list */ | ||
744 | |||
745 | vchan->cyclic = true; | ||
746 | |||
747 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | ||
748 | |||
639 | err_lli_free: | 749 | err_lli_free: |
640 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) | 750 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) |
641 | dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); | 751 | dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); |
@@ -712,6 +822,16 @@ static int sun6i_dma_terminate_all(struct dma_chan *chan) | |||
712 | 822 | ||
713 | spin_lock_irqsave(&vchan->vc.lock, flags); | 823 | spin_lock_irqsave(&vchan->vc.lock, flags); |
714 | 824 | ||
825 | if (vchan->cyclic) { | ||
826 | vchan->cyclic = false; | ||
827 | if (pchan && pchan->desc) { | ||
828 | struct virt_dma_desc *vd = &pchan->desc->vd; | ||
829 | struct virt_dma_chan *vc = &vchan->vc; | ||
830 | |||
831 | list_add_tail(&vd->node, &vc->desc_completed); | ||
832 | } | ||
833 | } | ||
834 | |||
715 | vchan_get_all_descriptors(&vchan->vc, &head); | 835 | vchan_get_all_descriptors(&vchan->vc, &head); |
716 | 836 | ||
717 | if (pchan) { | 837 | if (pchan) { |
@@ -759,7 +879,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, | |||
759 | } else if (!pchan || !pchan->desc) { | 879 | } else if (!pchan || !pchan->desc) { |
760 | bytes = 0; | 880 | bytes = 0; |
761 | } else { | 881 | } else { |
762 | bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); | 882 | bytes = sun6i_get_chan_size(pchan); |
763 | } | 883 | } |
764 | 884 | ||
765 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | 885 | spin_unlock_irqrestore(&vchan->vc.lock, flags); |
@@ -963,6 +1083,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
963 | dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); | 1083 | dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); |
964 | dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); | 1084 | dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); |
965 | dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); | 1085 | dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); |
1086 | dma_cap_set(DMA_CYCLIC, sdc->slave.cap_mask); | ||
966 | 1087 | ||
967 | INIT_LIST_HEAD(&sdc->slave.channels); | 1088 | INIT_LIST_HEAD(&sdc->slave.channels); |
968 | sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; | 1089 | sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; |
@@ -970,6 +1091,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
970 | sdc->slave.device_issue_pending = sun6i_dma_issue_pending; | 1091 | sdc->slave.device_issue_pending = sun6i_dma_issue_pending; |
971 | sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; | 1092 | sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; |
972 | sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; | 1093 | sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; |
1094 | sdc->slave.device_prep_dma_cyclic = sun6i_dma_prep_dma_cyclic; | ||
973 | sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES; | 1095 | sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES; |
974 | sdc->slave.device_config = sun6i_dma_config; | 1096 | sdc->slave.device_config = sun6i_dma_config; |
975 | sdc->slave.device_pause = sun6i_dma_pause; | 1097 | sdc->slave.device_pause = sun6i_dma_pause; |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 3871f29e523d..01e316f73559 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #define TEGRA_APBDMA_CSR_ONCE BIT(27) | 54 | #define TEGRA_APBDMA_CSR_ONCE BIT(27) |
55 | #define TEGRA_APBDMA_CSR_FLOW BIT(21) | 55 | #define TEGRA_APBDMA_CSR_FLOW BIT(21) |
56 | #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 | 56 | #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 |
57 | #define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F | ||
57 | #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC | 58 | #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC |
58 | 59 | ||
59 | /* STATUS register */ | 60 | /* STATUS register */ |
@@ -114,6 +115,8 @@ | |||
114 | /* Channel base address offset from APBDMA base address */ | 115 | /* Channel base address offset from APBDMA base address */ |
115 | #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 | 116 | #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 |
116 | 117 | ||
118 | #define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1) | ||
119 | |||
117 | struct tegra_dma; | 120 | struct tegra_dma; |
118 | 121 | ||
119 | /* | 122 | /* |
@@ -353,8 +356,11 @@ static int tegra_dma_slave_config(struct dma_chan *dc, | |||
353 | } | 356 | } |
354 | 357 | ||
355 | memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); | 358 | memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); |
356 | if (!tdc->slave_id) | 359 | if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) { |
360 | if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK) | ||
361 | return -EINVAL; | ||
357 | tdc->slave_id = sconfig->slave_id; | 362 | tdc->slave_id = sconfig->slave_id; |
363 | } | ||
358 | tdc->config_init = true; | 364 | tdc->config_init = true; |
359 | return 0; | 365 | return 0; |
360 | } | 366 | } |
@@ -1236,7 +1242,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) | |||
1236 | } | 1242 | } |
1237 | pm_runtime_put(tdma->dev); | 1243 | pm_runtime_put(tdma->dev); |
1238 | 1244 | ||
1239 | tdc->slave_id = 0; | 1245 | tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; |
1240 | } | 1246 | } |
1241 | 1247 | ||
1242 | static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, | 1248 | static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, |
@@ -1246,6 +1252,11 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, | |||
1246 | struct dma_chan *chan; | 1252 | struct dma_chan *chan; |
1247 | struct tegra_dma_channel *tdc; | 1253 | struct tegra_dma_channel *tdc; |
1248 | 1254 | ||
1255 | if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) { | ||
1256 | dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]); | ||
1257 | return NULL; | ||
1258 | } | ||
1259 | |||
1249 | chan = dma_get_any_slave_channel(&tdma->dma_dev); | 1260 | chan = dma_get_any_slave_channel(&tdma->dma_dev); |
1250 | if (!chan) | 1261 | if (!chan) |
1251 | return NULL; | 1262 | return NULL; |
@@ -1389,6 +1400,7 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1389 | &tdma->dma_dev.channels); | 1400 | &tdma->dma_dev.channels); |
1390 | tdc->tdma = tdma; | 1401 | tdc->tdma = tdma; |
1391 | tdc->id = i; | 1402 | tdc->id = i; |
1403 | tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; | ||
1392 | 1404 | ||
1393 | tasklet_init(&tdc->tasklet, tegra_dma_tasklet, | 1405 | tasklet_init(&tdc->tasklet, tegra_dma_tasklet, |
1394 | (unsigned long)tdc); | 1406 | (unsigned long)tdc); |
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c new file mode 100644 index 000000000000..c4b121c4559d --- /dev/null +++ b/drivers/dma/tegra210-adma.c | |||
@@ -0,0 +1,840 @@ | |||
1 | /* | ||
2 | * ADMA driver for Nvidia's Tegra210 ADMA controller. | ||
3 | * | ||
4 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/clk.h> | ||
20 | #include <linux/iopoll.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/of_device.h> | ||
23 | #include <linux/of_dma.h> | ||
24 | #include <linux/of_irq.h> | ||
25 | #include <linux/pm_clock.h> | ||
26 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include "virt-dma.h" | ||
30 | |||
31 | #define ADMA_CH_CMD 0x00 | ||
32 | #define ADMA_CH_STATUS 0x0c | ||
33 | #define ADMA_CH_STATUS_XFER_EN BIT(0) | ||
34 | |||
35 | #define ADMA_CH_INT_STATUS 0x10 | ||
36 | #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0) | ||
37 | |||
38 | #define ADMA_CH_INT_CLEAR 0x1c | ||
39 | #define ADMA_CH_CTRL 0x24 | ||
40 | #define ADMA_CH_CTRL_TX_REQ(val) (((val) & 0xf) << 28) | ||
41 | #define ADMA_CH_CTRL_TX_REQ_MAX 10 | ||
42 | #define ADMA_CH_CTRL_RX_REQ(val) (((val) & 0xf) << 24) | ||
43 | #define ADMA_CH_CTRL_RX_REQ_MAX 10 | ||
44 | #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12) | ||
45 | #define ADMA_CH_CTRL_DIR_AHUB2MEM 2 | ||
46 | #define ADMA_CH_CTRL_DIR_MEM2AHUB 4 | ||
47 | #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8) | ||
48 | #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1) | ||
49 | |||
50 | #define ADMA_CH_CONFIG 0x28 | ||
51 | #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28) | ||
52 | #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24) | ||
53 | #define ADMA_CH_CONFIG_BURST_SIZE(val) (((val) & 0x7) << 20) | ||
54 | #define ADMA_CH_CONFIG_BURST_16 5 | ||
55 | #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) | ||
56 | #define ADMA_CH_CONFIG_MAX_BUFS 8 | ||
57 | |||
58 | #define ADMA_CH_FIFO_CTRL 0x2c | ||
59 | #define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24) | ||
60 | #define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16) | ||
61 | #define ADMA_CH_FIFO_CTRL_TX_SIZE(val) (((val) & 0xf) << 8) | ||
62 | #define ADMA_CH_FIFO_CTRL_RX_SIZE(val) ((val) & 0xf) | ||
63 | |||
64 | #define ADMA_CH_LOWER_SRC_ADDR 0x34 | ||
65 | #define ADMA_CH_LOWER_TRG_ADDR 0x3c | ||
66 | #define ADMA_CH_TC 0x44 | ||
67 | #define ADMA_CH_TC_COUNT_MASK 0x3ffffffc | ||
68 | |||
69 | #define ADMA_CH_XFER_STATUS 0x54 | ||
70 | #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff | ||
71 | |||
72 | #define ADMA_GLOBAL_CMD 0xc00 | ||
73 | #define ADMA_GLOBAL_SOFT_RESET 0xc04 | ||
74 | #define ADMA_GLOBAL_INT_CLEAR 0xc20 | ||
75 | #define ADMA_GLOBAL_CTRL 0xc24 | ||
76 | |||
77 | #define ADMA_CH_REG_OFFSET(a) (a * 0x80) | ||
78 | |||
79 | #define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \ | ||
80 | ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \ | ||
81 | ADMA_CH_FIFO_CTRL_TX_SIZE(3) | \ | ||
82 | ADMA_CH_FIFO_CTRL_RX_SIZE(3)) | ||
83 | struct tegra_adma; | ||
84 | |||
85 | /* | ||
86 | * struct tegra_adma_chip_data - Tegra chip specific data | ||
87 | * @nr_channels: Number of DMA channels available. | ||
88 | */ | ||
89 | struct tegra_adma_chip_data { | ||
90 | int nr_channels; | ||
91 | }; | ||
92 | |||
93 | /* | ||
94 | * struct tegra_adma_chan_regs - Tegra ADMA channel registers | ||
95 | */ | ||
96 | struct tegra_adma_chan_regs { | ||
97 | unsigned int ctrl; | ||
98 | unsigned int config; | ||
99 | unsigned int src_addr; | ||
100 | unsigned int trg_addr; | ||
101 | unsigned int fifo_ctrl; | ||
102 | unsigned int tc; | ||
103 | }; | ||
104 | |||
105 | /* | ||
106 | * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests. | ||
107 | */ | ||
108 | struct tegra_adma_desc { | ||
109 | struct virt_dma_desc vd; | ||
110 | struct tegra_adma_chan_regs ch_regs; | ||
111 | size_t buf_len; | ||
112 | size_t period_len; | ||
113 | size_t num_periods; | ||
114 | }; | ||
115 | |||
116 | /* | ||
117 | * struct tegra_adma_chan - Tegra ADMA channel information | ||
118 | */ | ||
119 | struct tegra_adma_chan { | ||
120 | struct virt_dma_chan vc; | ||
121 | struct tegra_adma_desc *desc; | ||
122 | struct tegra_adma *tdma; | ||
123 | int irq; | ||
124 | void __iomem *chan_addr; | ||
125 | |||
126 | /* Slave channel configuration info */ | ||
127 | struct dma_slave_config sconfig; | ||
128 | enum dma_transfer_direction sreq_dir; | ||
129 | unsigned int sreq_index; | ||
130 | bool sreq_reserved; | ||
131 | |||
132 | /* Transfer count and position info */ | ||
133 | unsigned int tx_buf_count; | ||
134 | unsigned int tx_buf_pos; | ||
135 | }; | ||
136 | |||
137 | /* | ||
138 | * struct tegra_adma - Tegra ADMA controller information | ||
139 | */ | ||
140 | struct tegra_adma { | ||
141 | struct dma_device dma_dev; | ||
142 | struct device *dev; | ||
143 | void __iomem *base_addr; | ||
144 | unsigned int nr_channels; | ||
145 | unsigned long rx_requests_reserved; | ||
146 | unsigned long tx_requests_reserved; | ||
147 | |||
148 | /* Used to store global command register state when suspending */ | ||
149 | unsigned int global_cmd; | ||
150 | |||
151 | /* Last member of the structure */ | ||
152 | struct tegra_adma_chan channels[0]; | ||
153 | }; | ||
154 | |||
155 | static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) | ||
156 | { | ||
157 | writel(val, tdma->base_addr + reg); | ||
158 | } | ||
159 | |||
160 | static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg) | ||
161 | { | ||
162 | return readl(tdma->base_addr + reg); | ||
163 | } | ||
164 | |||
165 | static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val) | ||
166 | { | ||
167 | writel(val, tdc->chan_addr + reg); | ||
168 | } | ||
169 | |||
170 | static inline u32 tdma_ch_read(struct tegra_adma_chan *tdc, u32 reg) | ||
171 | { | ||
172 | return readl(tdc->chan_addr + reg); | ||
173 | } | ||
174 | |||
175 | static inline struct tegra_adma_chan *to_tegra_adma_chan(struct dma_chan *dc) | ||
176 | { | ||
177 | return container_of(dc, struct tegra_adma_chan, vc.chan); | ||
178 | } | ||
179 | |||
180 | static inline struct tegra_adma_desc *to_tegra_adma_desc( | ||
181 | struct dma_async_tx_descriptor *td) | ||
182 | { | ||
183 | return container_of(td, struct tegra_adma_desc, vd.tx); | ||
184 | } | ||
185 | |||
186 | static inline struct device *tdc2dev(struct tegra_adma_chan *tdc) | ||
187 | { | ||
188 | return tdc->tdma->dev; | ||
189 | } | ||
190 | |||
191 | static void tegra_adma_desc_free(struct virt_dma_desc *vd) | ||
192 | { | ||
193 | kfree(container_of(vd, struct tegra_adma_desc, vd)); | ||
194 | } | ||
195 | |||
196 | static int tegra_adma_slave_config(struct dma_chan *dc, | ||
197 | struct dma_slave_config *sconfig) | ||
198 | { | ||
199 | struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); | ||
200 | |||
201 | memcpy(&tdc->sconfig, sconfig, sizeof(*sconfig)); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int tegra_adma_init(struct tegra_adma *tdma) | ||
207 | { | ||
208 | u32 status; | ||
209 | int ret; | ||
210 | |||
211 | /* Clear any interrupts */ | ||
212 | tdma_write(tdma, ADMA_GLOBAL_INT_CLEAR, 0x1); | ||
213 | |||
214 | /* Assert soft reset */ | ||
215 | tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); | ||
216 | |||
217 | /* Wait for reset to clear */ | ||
218 | ret = readx_poll_timeout(readl, | ||
219 | tdma->base_addr + ADMA_GLOBAL_SOFT_RESET, | ||
220 | status, status == 0, 20, 10000); | ||
221 | if (ret) | ||
222 | return ret; | ||
223 | |||
224 | /* Enable global ADMA registers */ | ||
225 | tdma_write(tdma, ADMA_GLOBAL_CMD, 1); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, | ||
231 | enum dma_transfer_direction direction) | ||
232 | { | ||
233 | struct tegra_adma *tdma = tdc->tdma; | ||
234 | unsigned int sreq_index = tdc->sreq_index; | ||
235 | |||
236 | if (tdc->sreq_reserved) | ||
237 | return tdc->sreq_dir == direction ? 0 : -EINVAL; | ||
238 | |||
239 | switch (direction) { | ||
240 | case DMA_MEM_TO_DEV: | ||
241 | if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) { | ||
242 | dev_err(tdma->dev, "invalid DMA request\n"); | ||
243 | return -EINVAL; | ||
244 | } | ||
245 | |||
246 | if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) { | ||
247 | dev_err(tdma->dev, "DMA request reserved\n"); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | break; | ||
251 | |||
252 | case DMA_DEV_TO_MEM: | ||
253 | if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) { | ||
254 | dev_err(tdma->dev, "invalid DMA request\n"); | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
258 | if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) { | ||
259 | dev_err(tdma->dev, "DMA request reserved\n"); | ||
260 | return -EINVAL; | ||
261 | } | ||
262 | break; | ||
263 | |||
264 | default: | ||
265 | dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", | ||
266 | dma_chan_name(&tdc->vc.chan)); | ||
267 | return -EINVAL; | ||
268 | } | ||
269 | |||
270 | tdc->sreq_dir = direction; | ||
271 | tdc->sreq_reserved = true; | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static void tegra_adma_request_free(struct tegra_adma_chan *tdc) | ||
277 | { | ||
278 | struct tegra_adma *tdma = tdc->tdma; | ||
279 | |||
280 | if (!tdc->sreq_reserved) | ||
281 | return; | ||
282 | |||
283 | switch (tdc->sreq_dir) { | ||
284 | case DMA_MEM_TO_DEV: | ||
285 | clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved); | ||
286 | break; | ||
287 | |||
288 | case DMA_DEV_TO_MEM: | ||
289 | clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved); | ||
290 | break; | ||
291 | |||
292 | default: | ||
293 | dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", | ||
294 | dma_chan_name(&tdc->vc.chan)); | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | tdc->sreq_reserved = false; | ||
299 | } | ||
300 | |||
301 | static u32 tegra_adma_irq_status(struct tegra_adma_chan *tdc) | ||
302 | { | ||
303 | u32 status = tdma_ch_read(tdc, ADMA_CH_INT_STATUS); | ||
304 | |||
305 | return status & ADMA_CH_INT_STATUS_XFER_DONE; | ||
306 | } | ||
307 | |||
308 | static u32 tegra_adma_irq_clear(struct tegra_adma_chan *tdc) | ||
309 | { | ||
310 | u32 status = tegra_adma_irq_status(tdc); | ||
311 | |||
312 | if (status) | ||
313 | tdma_ch_write(tdc, ADMA_CH_INT_CLEAR, status); | ||
314 | |||
315 | return status; | ||
316 | } | ||
317 | |||
318 | static void tegra_adma_stop(struct tegra_adma_chan *tdc) | ||
319 | { | ||
320 | unsigned int status; | ||
321 | |||
322 | /* Disable ADMA */ | ||
323 | tdma_ch_write(tdc, ADMA_CH_CMD, 0); | ||
324 | |||
325 | /* Clear interrupt status */ | ||
326 | tegra_adma_irq_clear(tdc); | ||
327 | |||
328 | if (readx_poll_timeout_atomic(readl, tdc->chan_addr + ADMA_CH_STATUS, | ||
329 | status, !(status & ADMA_CH_STATUS_XFER_EN), | ||
330 | 20, 10000)) { | ||
331 | dev_err(tdc2dev(tdc), "unable to stop DMA channel\n"); | ||
332 | return; | ||
333 | } | ||
334 | |||
335 | kfree(tdc->desc); | ||
336 | tdc->desc = NULL; | ||
337 | } | ||
338 | |||
339 | static void tegra_adma_start(struct tegra_adma_chan *tdc) | ||
340 | { | ||
341 | struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc); | ||
342 | struct tegra_adma_chan_regs *ch_regs; | ||
343 | struct tegra_adma_desc *desc; | ||
344 | |||
345 | if (!vd) | ||
346 | return; | ||
347 | |||
348 | list_del(&vd->node); | ||
349 | |||
350 | desc = to_tegra_adma_desc(&vd->tx); | ||
351 | |||
352 | if (!desc) { | ||
353 | dev_warn(tdc2dev(tdc), "unable to start DMA, no descriptor\n"); | ||
354 | return; | ||
355 | } | ||
356 | |||
357 | ch_regs = &desc->ch_regs; | ||
358 | |||
359 | tdc->tx_buf_pos = 0; | ||
360 | tdc->tx_buf_count = 0; | ||
361 | tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc); | ||
362 | tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); | ||
363 | tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr); | ||
364 | tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr); | ||
365 | tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl); | ||
366 | tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config); | ||
367 | |||
368 | /* Start ADMA */ | ||
369 | tdma_ch_write(tdc, ADMA_CH_CMD, 1); | ||
370 | |||
371 | tdc->desc = desc; | ||
372 | } | ||
373 | |||
374 | static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc) | ||
375 | { | ||
376 | struct tegra_adma_desc *desc = tdc->desc; | ||
377 | unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1; | ||
378 | unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS); | ||
379 | unsigned int periods_remaining; | ||
380 | |||
381 | /* | ||
382 | * Handle wrap around of buffer count register | ||
383 | */ | ||
384 | if (pos < tdc->tx_buf_pos) | ||
385 | tdc->tx_buf_count += pos + (max - tdc->tx_buf_pos); | ||
386 | else | ||
387 | tdc->tx_buf_count += pos - tdc->tx_buf_pos; | ||
388 | |||
389 | periods_remaining = tdc->tx_buf_count % desc->num_periods; | ||
390 | tdc->tx_buf_pos = pos; | ||
391 | |||
392 | return desc->buf_len - (periods_remaining * desc->period_len); | ||
393 | } | ||
394 | |||
395 | static irqreturn_t tegra_adma_isr(int irq, void *dev_id) | ||
396 | { | ||
397 | struct tegra_adma_chan *tdc = dev_id; | ||
398 | unsigned long status; | ||
399 | unsigned long flags; | ||
400 | |||
401 | spin_lock_irqsave(&tdc->vc.lock, flags); | ||
402 | |||
403 | status = tegra_adma_irq_clear(tdc); | ||
404 | if (status == 0 || !tdc->desc) { | ||
405 | spin_unlock_irqrestore(&tdc->vc.lock, flags); | ||
406 | return IRQ_NONE; | ||
407 | } | ||
408 | |||
409 | vchan_cyclic_callback(&tdc->desc->vd); | ||
410 | |||
411 | spin_unlock_irqrestore(&tdc->vc.lock, flags); | ||
412 | |||
413 | return IRQ_HANDLED; | ||
414 | } | ||
415 | |||
416 | static void tegra_adma_issue_pending(struct dma_chan *dc) | ||
417 | { | ||
418 | struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); | ||
419 | unsigned long flags; | ||
420 | |||
421 | spin_lock_irqsave(&tdc->vc.lock, flags); | ||
422 | |||
423 | if (vchan_issue_pending(&tdc->vc)) { | ||
424 | if (!tdc->desc) | ||
425 | tegra_adma_start(tdc); | ||
426 | } | ||
427 | |||
428 | spin_unlock_irqrestore(&tdc->vc.lock, flags); | ||
429 | } | ||
430 | |||
431 | static int tegra_adma_terminate_all(struct dma_chan *dc) | ||
432 | { | ||
433 | struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); | ||
434 | unsigned long flags; | ||
435 | LIST_HEAD(head); | ||
436 | |||
437 | spin_lock_irqsave(&tdc->vc.lock, flags); | ||
438 | |||
439 | if (tdc->desc) | ||
440 | tegra_adma_stop(tdc); | ||
441 | |||
442 | tegra_adma_request_free(tdc); | ||
443 | vchan_get_all_descriptors(&tdc->vc, &head); | ||
444 | spin_unlock_irqrestore(&tdc->vc.lock, flags); | ||
445 | vchan_dma_desc_free_list(&tdc->vc, &head); | ||
446 | |||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static enum dma_status tegra_adma_tx_status(struct dma_chan *dc, | ||
451 | dma_cookie_t cookie, | ||
452 | struct dma_tx_state *txstate) | ||
453 | { | ||
454 | struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); | ||
455 | struct tegra_adma_desc *desc; | ||
456 | struct virt_dma_desc *vd; | ||
457 | enum dma_status ret; | ||
458 | unsigned long flags; | ||
459 | unsigned int residual; | ||
460 | |||
461 | ret = dma_cookie_status(dc, cookie, txstate); | ||
462 | if (ret == DMA_COMPLETE || !txstate) | ||
463 | return ret; | ||
464 | |||
465 | spin_lock_irqsave(&tdc->vc.lock, flags); | ||
466 | |||
467 | vd = vchan_find_desc(&tdc->vc, cookie); | ||
468 | if (vd) { | ||
469 | desc = to_tegra_adma_desc(&vd->tx); | ||
470 | residual = desc->ch_regs.tc; | ||
471 | } else if (tdc->desc && tdc->desc->vd.tx.cookie == cookie) { | ||
472 | residual = tegra_adma_get_residue(tdc); | ||
473 | } else { | ||
474 | residual = 0; | ||
475 | } | ||
476 | |||
477 | spin_unlock_irqrestore(&tdc->vc.lock, flags); | ||
478 | |||
479 | dma_set_residue(txstate, residual); | ||
480 | |||
481 | return ret; | ||
482 | } | ||
483 | |||
484 | static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, | ||
485 | struct tegra_adma_desc *desc, | ||
486 | dma_addr_t buf_addr, | ||
487 | enum dma_transfer_direction direction) | ||
488 | { | ||
489 | struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; | ||
490 | unsigned int burst_size, adma_dir; | ||
491 | |||
492 | if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS) | ||
493 | return -EINVAL; | ||
494 | |||
495 | switch (direction) { | ||
496 | case DMA_MEM_TO_DEV: | ||
497 | adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; | ||
498 | burst_size = fls(tdc->sconfig.dst_maxburst); | ||
499 | ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); | ||
500 | ch_regs->ctrl = ADMA_CH_CTRL_TX_REQ(tdc->sreq_index); | ||
501 | ch_regs->src_addr = buf_addr; | ||
502 | break; | ||
503 | |||
504 | case DMA_DEV_TO_MEM: | ||
505 | adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; | ||
506 | burst_size = fls(tdc->sconfig.src_maxburst); | ||
507 | ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); | ||
508 | ch_regs->ctrl = ADMA_CH_CTRL_RX_REQ(tdc->sreq_index); | ||
509 | ch_regs->trg_addr = buf_addr; | ||
510 | break; | ||
511 | |||
512 | default: | ||
513 | dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | if (!burst_size || burst_size > ADMA_CH_CONFIG_BURST_16) | ||
518 | burst_size = ADMA_CH_CONFIG_BURST_16; | ||
519 | |||
520 | ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) | | ||
521 | ADMA_CH_CTRL_MODE_CONTINUOUS | | ||
522 | ADMA_CH_CTRL_FLOWCTRL_EN; | ||
523 | ch_regs->config |= ADMA_CH_CONFIG_BURST_SIZE(burst_size); | ||
524 | ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); | ||
525 | ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT; | ||
526 | ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; | ||
527 | |||
528 | return tegra_adma_request_alloc(tdc, direction); | ||
529 | } | ||
530 | |||
531 | static struct dma_async_tx_descriptor *tegra_adma_prep_dma_cyclic( | ||
532 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, | ||
533 | size_t period_len, enum dma_transfer_direction direction, | ||
534 | unsigned long flags) | ||
535 | { | ||
536 | struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); | ||
537 | struct tegra_adma_desc *desc = NULL; | ||
538 | |||
539 | if (!buf_len || !period_len || period_len > ADMA_CH_TC_COUNT_MASK) { | ||
540 | dev_err(tdc2dev(tdc), "invalid buffer/period len\n"); | ||
541 | return NULL; | ||
542 | } | ||
543 | |||
544 | if (buf_len % period_len) { | ||
545 | dev_err(tdc2dev(tdc), "buf_len not a multiple of period_len\n"); | ||
546 | return NULL; | ||
547 | } | ||
548 | |||
549 | if (!IS_ALIGNED(buf_addr, 4)) { | ||
550 | dev_err(tdc2dev(tdc), "invalid buffer alignment\n"); | ||
551 | return NULL; | ||
552 | } | ||
553 | |||
554 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | ||
555 | if (!desc) | ||
556 | return NULL; | ||
557 | |||
558 | desc->buf_len = buf_len; | ||
559 | desc->period_len = period_len; | ||
560 | desc->num_periods = buf_len / period_len; | ||
561 | |||
562 | if (tegra_adma_set_xfer_params(tdc, desc, buf_addr, direction)) { | ||
563 | kfree(desc); | ||
564 | return NULL; | ||
565 | } | ||
566 | |||
567 | return vchan_tx_prep(&tdc->vc, &desc->vd, flags); | ||
568 | } | ||
569 | |||
570 | static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) | ||
571 | { | ||
572 | struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); | ||
573 | int ret; | ||
574 | |||
575 | ret = request_irq(tdc->irq, tegra_adma_isr, 0, dma_chan_name(dc), tdc); | ||
576 | if (ret) { | ||
577 | dev_err(tdc2dev(tdc), "failed to get interrupt for %s\n", | ||
578 | dma_chan_name(dc)); | ||
579 | return ret; | ||
580 | } | ||
581 | |||
582 | ret = pm_runtime_get_sync(tdc2dev(tdc)); | ||
583 | if (ret < 0) { | ||
584 | free_irq(tdc->irq, tdc); | ||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | dma_cookie_init(&tdc->vc.chan); | ||
589 | |||
590 | return 0; | ||
591 | } | ||
592 | |||
593 | static void tegra_adma_free_chan_resources(struct dma_chan *dc) | ||
594 | { | ||
595 | struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); | ||
596 | |||
597 | tegra_adma_terminate_all(dc); | ||
598 | vchan_free_chan_resources(&tdc->vc); | ||
599 | tasklet_kill(&tdc->vc.task); | ||
600 | free_irq(tdc->irq, tdc); | ||
601 | pm_runtime_put(tdc2dev(tdc)); | ||
602 | |||
603 | tdc->sreq_index = 0; | ||
604 | tdc->sreq_dir = DMA_TRANS_NONE; | ||
605 | } | ||
606 | |||
607 | static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, | ||
608 | struct of_dma *ofdma) | ||
609 | { | ||
610 | struct tegra_adma *tdma = ofdma->of_dma_data; | ||
611 | struct tegra_adma_chan *tdc; | ||
612 | struct dma_chan *chan; | ||
613 | unsigned int sreq_index; | ||
614 | |||
615 | if (dma_spec->args_count != 1) | ||
616 | return NULL; | ||
617 | |||
618 | sreq_index = dma_spec->args[0]; | ||
619 | |||
620 | if (sreq_index == 0) { | ||
621 | dev_err(tdma->dev, "DMA request must not be 0\n"); | ||
622 | return NULL; | ||
623 | } | ||
624 | |||
625 | chan = dma_get_any_slave_channel(&tdma->dma_dev); | ||
626 | if (!chan) | ||
627 | return NULL; | ||
628 | |||
629 | tdc = to_tegra_adma_chan(chan); | ||
630 | tdc->sreq_index = sreq_index; | ||
631 | |||
632 | return chan; | ||
633 | } | ||
634 | |||
635 | static int tegra_adma_runtime_suspend(struct device *dev) | ||
636 | { | ||
637 | struct tegra_adma *tdma = dev_get_drvdata(dev); | ||
638 | |||
639 | tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); | ||
640 | |||
641 | return pm_clk_suspend(dev); | ||
642 | } | ||
643 | |||
644 | static int tegra_adma_runtime_resume(struct device *dev) | ||
645 | { | ||
646 | struct tegra_adma *tdma = dev_get_drvdata(dev); | ||
647 | int ret; | ||
648 | |||
649 | ret = pm_clk_resume(dev); | ||
650 | if (ret) | ||
651 | return ret; | ||
652 | |||
653 | tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); | ||
654 | |||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | static const struct tegra_adma_chip_data tegra210_chip_data = { | ||
659 | .nr_channels = 22, | ||
660 | }; | ||
661 | |||
662 | static const struct of_device_id tegra_adma_of_match[] = { | ||
663 | { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data }, | ||
664 | { }, | ||
665 | }; | ||
666 | MODULE_DEVICE_TABLE(of, tegra_adma_of_match); | ||
667 | |||
668 | static int tegra_adma_probe(struct platform_device *pdev) | ||
669 | { | ||
670 | const struct tegra_adma_chip_data *cdata; | ||
671 | struct tegra_adma *tdma; | ||
672 | struct resource *res; | ||
673 | struct clk *clk; | ||
674 | int ret, i; | ||
675 | |||
676 | cdata = of_device_get_match_data(&pdev->dev); | ||
677 | if (!cdata) { | ||
678 | dev_err(&pdev->dev, "device match data not found\n"); | ||
679 | return -ENODEV; | ||
680 | } | ||
681 | |||
682 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | ||
683 | sizeof(struct tegra_adma_chan), GFP_KERNEL); | ||
684 | if (!tdma) | ||
685 | return -ENOMEM; | ||
686 | |||
687 | tdma->dev = &pdev->dev; | ||
688 | tdma->nr_channels = cdata->nr_channels; | ||
689 | platform_set_drvdata(pdev, tdma); | ||
690 | |||
691 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
692 | tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); | ||
693 | if (IS_ERR(tdma->base_addr)) | ||
694 | return PTR_ERR(tdma->base_addr); | ||
695 | |||
696 | ret = pm_clk_create(&pdev->dev); | ||
697 | if (ret) | ||
698 | return ret; | ||
699 | |||
700 | clk = clk_get(&pdev->dev, "d_audio"); | ||
701 | if (IS_ERR(clk)) { | ||
702 | dev_err(&pdev->dev, "ADMA clock not found\n"); | ||
703 | ret = PTR_ERR(clk); | ||
704 | goto clk_destroy; | ||
705 | } | ||
706 | |||
707 | ret = pm_clk_add_clk(&pdev->dev, clk); | ||
708 | if (ret) { | ||
709 | clk_put(clk); | ||
710 | goto clk_destroy; | ||
711 | } | ||
712 | |||
713 | pm_runtime_enable(&pdev->dev); | ||
714 | |||
715 | ret = pm_runtime_get_sync(&pdev->dev); | ||
716 | if (ret < 0) | ||
717 | goto rpm_disable; | ||
718 | |||
719 | ret = tegra_adma_init(tdma); | ||
720 | if (ret) | ||
721 | goto rpm_put; | ||
722 | |||
723 | INIT_LIST_HEAD(&tdma->dma_dev.channels); | ||
724 | for (i = 0; i < tdma->nr_channels; i++) { | ||
725 | struct tegra_adma_chan *tdc = &tdma->channels[i]; | ||
726 | |||
727 | tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); | ||
728 | |||
729 | tdc->irq = of_irq_get(pdev->dev.of_node, i); | ||
730 | if (tdc->irq < 0) { | ||
731 | ret = tdc->irq; | ||
732 | goto irq_dispose; | ||
733 | } | ||
734 | |||
735 | vchan_init(&tdc->vc, &tdma->dma_dev); | ||
736 | tdc->vc.desc_free = tegra_adma_desc_free; | ||
737 | tdc->tdma = tdma; | ||
738 | } | ||
739 | |||
740 | dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); | ||
741 | dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); | ||
742 | dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); | ||
743 | |||
744 | tdma->dma_dev.dev = &pdev->dev; | ||
745 | tdma->dma_dev.device_alloc_chan_resources = | ||
746 | tegra_adma_alloc_chan_resources; | ||
747 | tdma->dma_dev.device_free_chan_resources = | ||
748 | tegra_adma_free_chan_resources; | ||
749 | tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending; | ||
750 | tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic; | ||
751 | tdma->dma_dev.device_config = tegra_adma_slave_config; | ||
752 | tdma->dma_dev.device_tx_status = tegra_adma_tx_status; | ||
753 | tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; | ||
754 | tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
755 | tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
756 | tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
757 | tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
758 | |||
759 | ret = dma_async_device_register(&tdma->dma_dev); | ||
760 | if (ret < 0) { | ||
761 | dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); | ||
762 | goto irq_dispose; | ||
763 | } | ||
764 | |||
765 | ret = of_dma_controller_register(pdev->dev.of_node, | ||
766 | tegra_dma_of_xlate, tdma); | ||
767 | if (ret < 0) { | ||
768 | dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret); | ||
769 | goto dma_remove; | ||
770 | } | ||
771 | |||
772 | pm_runtime_put(&pdev->dev); | ||
773 | |||
774 | dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n", | ||
775 | tdma->nr_channels); | ||
776 | |||
777 | return 0; | ||
778 | |||
779 | dma_remove: | ||
780 | dma_async_device_unregister(&tdma->dma_dev); | ||
781 | irq_dispose: | ||
782 | while (--i >= 0) | ||
783 | irq_dispose_mapping(tdma->channels[i].irq); | ||
784 | rpm_put: | ||
785 | pm_runtime_put_sync(&pdev->dev); | ||
786 | rpm_disable: | ||
787 | pm_runtime_disable(&pdev->dev); | ||
788 | clk_destroy: | ||
789 | pm_clk_destroy(&pdev->dev); | ||
790 | |||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | static int tegra_adma_remove(struct platform_device *pdev) | ||
795 | { | ||
796 | struct tegra_adma *tdma = platform_get_drvdata(pdev); | ||
797 | int i; | ||
798 | |||
799 | dma_async_device_unregister(&tdma->dma_dev); | ||
800 | |||
801 | for (i = 0; i < tdma->nr_channels; ++i) | ||
802 | irq_dispose_mapping(tdma->channels[i].irq); | ||
803 | |||
804 | pm_runtime_put_sync(&pdev->dev); | ||
805 | pm_runtime_disable(&pdev->dev); | ||
806 | pm_clk_destroy(&pdev->dev); | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | #ifdef CONFIG_PM_SLEEP | ||
812 | static int tegra_adma_pm_suspend(struct device *dev) | ||
813 | { | ||
814 | return pm_runtime_suspended(dev) == false; | ||
815 | } | ||
816 | #endif | ||
817 | |||
818 | static const struct dev_pm_ops tegra_adma_dev_pm_ops = { | ||
819 | SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend, | ||
820 | tegra_adma_runtime_resume, NULL) | ||
821 | SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend, NULL) | ||
822 | }; | ||
823 | |||
824 | static struct platform_driver tegra_admac_driver = { | ||
825 | .driver = { | ||
826 | .name = "tegra-adma", | ||
827 | .pm = &tegra_adma_dev_pm_ops, | ||
828 | .of_match_table = tegra_adma_of_match, | ||
829 | }, | ||
830 | .probe = tegra_adma_probe, | ||
831 | .remove = tegra_adma_remove, | ||
832 | }; | ||
833 | |||
834 | module_platform_driver(tegra_admac_driver); | ||
835 | |||
836 | MODULE_ALIAS("platform:tegra210-adma"); | ||
837 | MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver"); | ||
838 | MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>"); | ||
839 | MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>"); | ||
840 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index ef67f278e076..df9118540b91 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -16,6 +16,15 @@ | |||
16 | * video device (S2MM). Initialization, status, interrupt and management | 16 | * video device (S2MM). Initialization, status, interrupt and management |
17 | * registers are accessed through an AXI4-Lite slave interface. | 17 | * registers are accessed through an AXI4-Lite slave interface. |
18 | * | 18 | * |
19 | * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that | ||
20 | * provides high-bandwidth one dimensional direct memory access between memory | ||
21 | * and AXI4-Stream target peripherals. It supports one receive and one | ||
22 | * transmit channel, both of them optional at synthesis time. | ||
23 | * | ||
24 | * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory | ||
25 | * Access (DMA) between a memory-mapped source address and a memory-mapped | ||
26 | * destination address. | ||
27 | * | ||
19 | * This program is free software: you can redistribute it and/or modify | 28 | * This program is free software: you can redistribute it and/or modify |
20 | * it under the terms of the GNU General Public License as published by | 29 | * it under the terms of the GNU General Public License as published by |
21 | * the Free Software Foundation, either version 2 of the License, or | 30 | * the Free Software Foundation, either version 2 of the License, or |
@@ -35,116 +44,138 @@ | |||
35 | #include <linux/of_platform.h> | 44 | #include <linux/of_platform.h> |
36 | #include <linux/of_irq.h> | 45 | #include <linux/of_irq.h> |
37 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/clk.h> | ||
38 | 48 | ||
39 | #include "../dmaengine.h" | 49 | #include "../dmaengine.h" |
40 | 50 | ||
41 | /* Register/Descriptor Offsets */ | 51 | /* Register/Descriptor Offsets */ |
42 | #define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 | 52 | #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 |
43 | #define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 | 53 | #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 |
44 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 | 54 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 |
45 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 | 55 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 |
46 | 56 | ||
47 | /* Control Registers */ | 57 | /* Control Registers */ |
48 | #define XILINX_VDMA_REG_DMACR 0x0000 | 58 | #define XILINX_DMA_REG_DMACR 0x0000 |
49 | #define XILINX_VDMA_DMACR_DELAY_MAX 0xff | 59 | #define XILINX_DMA_DMACR_DELAY_MAX 0xff |
50 | #define XILINX_VDMA_DMACR_DELAY_SHIFT 24 | 60 | #define XILINX_DMA_DMACR_DELAY_SHIFT 24 |
51 | #define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff | 61 | #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff |
52 | #define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 | 62 | #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 |
53 | #define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) | 63 | #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) |
54 | #define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) | 64 | #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) |
55 | #define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) | 65 | #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) |
56 | #define XILINX_VDMA_DMACR_MASTER_SHIFT 8 | 66 | #define XILINX_DMA_DMACR_MASTER_SHIFT 8 |
57 | #define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 | 67 | #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 |
58 | #define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) | 68 | #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) |
59 | #define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) | 69 | #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) |
60 | #define XILINX_VDMA_DMACR_RESET BIT(2) | 70 | #define XILINX_DMA_DMACR_RESET BIT(2) |
61 | #define XILINX_VDMA_DMACR_CIRC_EN BIT(1) | 71 | #define XILINX_DMA_DMACR_CIRC_EN BIT(1) |
62 | #define XILINX_VDMA_DMACR_RUNSTOP BIT(0) | 72 | #define XILINX_DMA_DMACR_RUNSTOP BIT(0) |
63 | #define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) | 73 | #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) |
64 | 74 | ||
65 | #define XILINX_VDMA_REG_DMASR 0x0004 | 75 | #define XILINX_DMA_REG_DMASR 0x0004 |
66 | #define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) | 76 | #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) |
67 | #define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) | 77 | #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) |
68 | #define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) | 78 | #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) |
69 | #define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) | 79 | #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) |
70 | #define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) | 80 | #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) |
71 | #define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) | 81 | #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) |
72 | #define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) | 82 | #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) |
73 | #define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) | 83 | #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) |
74 | #define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) | 84 | #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) |
75 | #define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) | 85 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) |
76 | #define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) | 86 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) |
77 | #define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) | 87 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) |
78 | #define XILINX_VDMA_DMASR_IDLE BIT(1) | 88 | #define XILINX_DMA_DMASR_IDLE BIT(1) |
79 | #define XILINX_VDMA_DMASR_HALTED BIT(0) | 89 | #define XILINX_DMA_DMASR_HALTED BIT(0) |
80 | #define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) | 90 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) |
81 | #define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) | 91 | #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) |
82 | 92 | ||
83 | #define XILINX_VDMA_REG_CURDESC 0x0008 | 93 | #define XILINX_DMA_REG_CURDESC 0x0008 |
84 | #define XILINX_VDMA_REG_TAILDESC 0x0010 | 94 | #define XILINX_DMA_REG_TAILDESC 0x0010 |
85 | #define XILINX_VDMA_REG_REG_INDEX 0x0014 | 95 | #define XILINX_DMA_REG_REG_INDEX 0x0014 |
86 | #define XILINX_VDMA_REG_FRMSTORE 0x0018 | 96 | #define XILINX_DMA_REG_FRMSTORE 0x0018 |
87 | #define XILINX_VDMA_REG_THRESHOLD 0x001c | 97 | #define XILINX_DMA_REG_THRESHOLD 0x001c |
88 | #define XILINX_VDMA_REG_FRMPTR_STS 0x0024 | 98 | #define XILINX_DMA_REG_FRMPTR_STS 0x0024 |
89 | #define XILINX_VDMA_REG_PARK_PTR 0x0028 | 99 | #define XILINX_DMA_REG_PARK_PTR 0x0028 |
90 | #define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 | 100 | #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 |
91 | #define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 | 101 | #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 |
92 | #define XILINX_VDMA_REG_VDMA_VERSION 0x002c | 102 | #define XILINX_DMA_REG_VDMA_VERSION 0x002c |
93 | 103 | ||
94 | /* Register Direct Mode Registers */ | 104 | /* Register Direct Mode Registers */ |
95 | #define XILINX_VDMA_REG_VSIZE 0x0000 | 105 | #define XILINX_DMA_REG_VSIZE 0x0000 |
96 | #define XILINX_VDMA_REG_HSIZE 0x0004 | 106 | #define XILINX_DMA_REG_HSIZE 0x0004 |
97 | 107 | ||
98 | #define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 | 108 | #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 |
99 | #define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 | 109 | #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 |
100 | #define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 | 110 | #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 |
101 | 111 | ||
102 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) | 112 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) |
113 | #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) | ||
103 | 114 | ||
104 | /* HW specific definitions */ | 115 | /* HW specific definitions */ |
105 | #define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 | 116 | #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 |
106 | 117 | ||
107 | #define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ | 118 | #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ |
108 | (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ | 119 | (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ |
109 | XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ | 120 | XILINX_DMA_DMASR_DLY_CNT_IRQ | \ |
110 | XILINX_VDMA_DMASR_ERR_IRQ) | 121 | XILINX_DMA_DMASR_ERR_IRQ) |
111 | 122 | ||
112 | #define XILINX_VDMA_DMASR_ALL_ERR_MASK \ | 123 | #define XILINX_DMA_DMASR_ALL_ERR_MASK \ |
113 | (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ | 124 | (XILINX_DMA_DMASR_EOL_LATE_ERR | \ |
114 | XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | 125 | XILINX_DMA_DMASR_SOF_LATE_ERR | \ |
115 | XILINX_VDMA_DMASR_SG_DEC_ERR | \ | 126 | XILINX_DMA_DMASR_SG_DEC_ERR | \ |
116 | XILINX_VDMA_DMASR_SG_SLV_ERR | \ | 127 | XILINX_DMA_DMASR_SG_SLV_ERR | \ |
117 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | 128 | XILINX_DMA_DMASR_EOF_EARLY_ERR | \ |
118 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | 129 | XILINX_DMA_DMASR_SOF_EARLY_ERR | \ |
119 | XILINX_VDMA_DMASR_DMA_DEC_ERR | \ | 130 | XILINX_DMA_DMASR_DMA_DEC_ERR | \ |
120 | XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ | 131 | XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ |
121 | XILINX_VDMA_DMASR_DMA_INT_ERR) | 132 | XILINX_DMA_DMASR_DMA_INT_ERR) |
122 | 133 | ||
123 | /* | 134 | /* |
124 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early | 135 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early |
125 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC | 136 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC |
126 | * is enabled in the h/w system. | 137 | * is enabled in the h/w system. |
127 | */ | 138 | */ |
128 | #define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ | 139 | #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ |
129 | (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | 140 | (XILINX_DMA_DMASR_SOF_LATE_ERR | \ |
130 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | 141 | XILINX_DMA_DMASR_EOF_EARLY_ERR | \ |
131 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | 142 | XILINX_DMA_DMASR_SOF_EARLY_ERR | \ |
132 | XILINX_VDMA_DMASR_DMA_INT_ERR) | 143 | XILINX_DMA_DMASR_DMA_INT_ERR) |
133 | 144 | ||
134 | /* Axi VDMA Flush on Fsync bits */ | 145 | /* Axi VDMA Flush on Fsync bits */ |
135 | #define XILINX_VDMA_FLUSH_S2MM 3 | 146 | #define XILINX_DMA_FLUSH_S2MM 3 |
136 | #define XILINX_VDMA_FLUSH_MM2S 2 | 147 | #define XILINX_DMA_FLUSH_MM2S 2 |
137 | #define XILINX_VDMA_FLUSH_BOTH 1 | 148 | #define XILINX_DMA_FLUSH_BOTH 1 |
138 | 149 | ||
139 | /* Delay loop counter to prevent hardware failure */ | 150 | /* Delay loop counter to prevent hardware failure */ |
140 | #define XILINX_VDMA_LOOP_COUNT 1000000 | 151 | #define XILINX_DMA_LOOP_COUNT 1000000 |
152 | |||
153 | /* AXI DMA Specific Registers/Offsets */ | ||
154 | #define XILINX_DMA_REG_SRCDSTADDR 0x18 | ||
155 | #define XILINX_DMA_REG_BTT 0x28 | ||
156 | |||
157 | /* AXI DMA Specific Masks/Bit fields */ | ||
158 | #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) | ||
159 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) | ||
160 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 | ||
161 | #define XILINX_DMA_BD_SOP BIT(27) | ||
162 | #define XILINX_DMA_BD_EOP BIT(26) | ||
163 | #define XILINX_DMA_COALESCE_MAX 255 | ||
164 | #define XILINX_DMA_NUM_APP_WORDS 5 | ||
165 | |||
166 | /* AXI CDMA Specific Registers/Offsets */ | ||
167 | #define XILINX_CDMA_REG_SRCADDR 0x18 | ||
168 | #define XILINX_CDMA_REG_DSTADDR 0x20 | ||
169 | |||
170 | /* AXI CDMA Specific Masks */ | ||
171 | #define XILINX_CDMA_CR_SGMODE BIT(3) | ||
141 | 172 | ||
142 | /** | 173 | /** |
143 | * struct xilinx_vdma_desc_hw - Hardware Descriptor | 174 | * struct xilinx_vdma_desc_hw - Hardware Descriptor |
144 | * @next_desc: Next Descriptor Pointer @0x00 | 175 | * @next_desc: Next Descriptor Pointer @0x00 |
145 | * @pad1: Reserved @0x04 | 176 | * @pad1: Reserved @0x04 |
146 | * @buf_addr: Buffer address @0x08 | 177 | * @buf_addr: Buffer address @0x08 |
147 | * @pad2: Reserved @0x0C | 178 | * @buf_addr_msb: MSB of Buffer address @0x0C |
148 | * @vsize: Vertical Size @0x10 | 179 | * @vsize: Vertical Size @0x10 |
149 | * @hsize: Horizontal Size @0x14 | 180 | * @hsize: Horizontal Size @0x14 |
150 | * @stride: Number of bytes between the first | 181 | * @stride: Number of bytes between the first |
@@ -154,13 +185,59 @@ struct xilinx_vdma_desc_hw { | |||
154 | u32 next_desc; | 185 | u32 next_desc; |
155 | u32 pad1; | 186 | u32 pad1; |
156 | u32 buf_addr; | 187 | u32 buf_addr; |
157 | u32 pad2; | 188 | u32 buf_addr_msb; |
158 | u32 vsize; | 189 | u32 vsize; |
159 | u32 hsize; | 190 | u32 hsize; |
160 | u32 stride; | 191 | u32 stride; |
161 | } __aligned(64); | 192 | } __aligned(64); |
162 | 193 | ||
163 | /** | 194 | /** |
195 | * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA | ||
196 | * @next_desc: Next Descriptor Pointer @0x00 | ||
197 | * @pad1: Reserved @0x04 | ||
198 | * @buf_addr: Buffer address @0x08 | ||
199 | * @pad2: Reserved @0x0C | ||
200 | * @pad3: Reserved @0x10 | ||
201 | * @pad4: Reserved @0x14 | ||
202 | * @control: Control field @0x18 | ||
203 | * @status: Status field @0x1C | ||
204 | * @app: APP Fields @0x20 - 0x30 | ||
205 | */ | ||
206 | struct xilinx_axidma_desc_hw { | ||
207 | u32 next_desc; | ||
208 | u32 pad1; | ||
209 | u32 buf_addr; | ||
210 | u32 pad2; | ||
211 | u32 pad3; | ||
212 | u32 pad4; | ||
213 | u32 control; | ||
214 | u32 status; | ||
215 | u32 app[XILINX_DMA_NUM_APP_WORDS]; | ||
216 | } __aligned(64); | ||
217 | |||
218 | /** | ||
219 | * struct xilinx_cdma_desc_hw - Hardware Descriptor | ||
220 | * @next_desc: Next Descriptor Pointer @0x00 | ||
221 | * @pad1: Reserved @0x04 | ||
222 | * @src_addr: Source address @0x08 | ||
223 | * @pad2: Reserved @0x0C | ||
224 | * @dest_addr: Destination address @0x10 | ||
225 | * @pad3: Reserved @0x14 | ||
226 | * @control: Control field @0x18 | ||
227 | * @status: Status field @0x1C | ||
228 | */ | ||
229 | struct xilinx_cdma_desc_hw { | ||
230 | u32 next_desc; | ||
231 | u32 pad1; | ||
232 | u32 src_addr; | ||
233 | u32 pad2; | ||
234 | u32 dest_addr; | ||
235 | u32 pad3; | ||
236 | u32 control; | ||
237 | u32 status; | ||
238 | } __aligned(64); | ||
239 | |||
240 | /** | ||
164 | * struct xilinx_vdma_tx_segment - Descriptor segment | 241 | * struct xilinx_vdma_tx_segment - Descriptor segment |
165 | * @hw: Hardware descriptor | 242 | * @hw: Hardware descriptor |
166 | * @node: Node in the descriptor segments list | 243 | * @node: Node in the descriptor segments list |
@@ -173,19 +250,43 @@ struct xilinx_vdma_tx_segment { | |||
173 | } __aligned(64); | 250 | } __aligned(64); |
174 | 251 | ||
175 | /** | 252 | /** |
176 | * struct xilinx_vdma_tx_descriptor - Per Transaction structure | 253 | * struct xilinx_axidma_tx_segment - Descriptor segment |
254 | * @hw: Hardware descriptor | ||
255 | * @node: Node in the descriptor segments list | ||
256 | * @phys: Physical address of segment | ||
257 | */ | ||
258 | struct xilinx_axidma_tx_segment { | ||
259 | struct xilinx_axidma_desc_hw hw; | ||
260 | struct list_head node; | ||
261 | dma_addr_t phys; | ||
262 | } __aligned(64); | ||
263 | |||
264 | /** | ||
265 | * struct xilinx_cdma_tx_segment - Descriptor segment | ||
266 | * @hw: Hardware descriptor | ||
267 | * @node: Node in the descriptor segments list | ||
268 | * @phys: Physical address of segment | ||
269 | */ | ||
270 | struct xilinx_cdma_tx_segment { | ||
271 | struct xilinx_cdma_desc_hw hw; | ||
272 | struct list_head node; | ||
273 | dma_addr_t phys; | ||
274 | } __aligned(64); | ||
275 | |||
276 | /** | ||
277 | * struct xilinx_dma_tx_descriptor - Per Transaction structure | ||
177 | * @async_tx: Async transaction descriptor | 278 | * @async_tx: Async transaction descriptor |
178 | * @segments: TX segments list | 279 | * @segments: TX segments list |
179 | * @node: Node in the channel descriptors list | 280 | * @node: Node in the channel descriptors list |
180 | */ | 281 | */ |
181 | struct xilinx_vdma_tx_descriptor { | 282 | struct xilinx_dma_tx_descriptor { |
182 | struct dma_async_tx_descriptor async_tx; | 283 | struct dma_async_tx_descriptor async_tx; |
183 | struct list_head segments; | 284 | struct list_head segments; |
184 | struct list_head node; | 285 | struct list_head node; |
185 | }; | 286 | }; |
186 | 287 | ||
187 | /** | 288 | /** |
188 | * struct xilinx_vdma_chan - Driver specific VDMA channel structure | 289 | * struct xilinx_dma_chan - Driver specific DMA channel structure |
189 | * @xdev: Driver specific device structure | 290 | * @xdev: Driver specific device structure |
190 | * @ctrl_offset: Control registers offset | 291 | * @ctrl_offset: Control registers offset |
191 | * @desc_offset: TX descriptor registers offset | 292 | * @desc_offset: TX descriptor registers offset |
@@ -207,9 +308,14 @@ struct xilinx_vdma_tx_descriptor { | |||
207 | * @config: Device configuration info | 308 | * @config: Device configuration info |
208 | * @flush_on_fsync: Flush on Frame sync | 309 | * @flush_on_fsync: Flush on Frame sync |
209 | * @desc_pendingcount: Descriptor pending count | 310 | * @desc_pendingcount: Descriptor pending count |
311 | * @ext_addr: Indicates 64 bit addressing is supported by dma channel | ||
312 | * @desc_submitcount: Descriptor h/w submitted count | ||
313 | * @residue: Residue for AXI DMA | ||
314 | * @seg_v: Statically allocated segments base | ||
315 | * @start_transfer: Differentiate b/w DMA IP's transfer | ||
210 | */ | 316 | */ |
211 | struct xilinx_vdma_chan { | 317 | struct xilinx_dma_chan { |
212 | struct xilinx_vdma_device *xdev; | 318 | struct xilinx_dma_device *xdev; |
213 | u32 ctrl_offset; | 319 | u32 ctrl_offset; |
214 | u32 desc_offset; | 320 | u32 desc_offset; |
215 | spinlock_t lock; | 321 | spinlock_t lock; |
@@ -230,73 +336,122 @@ struct xilinx_vdma_chan { | |||
230 | struct xilinx_vdma_config config; | 336 | struct xilinx_vdma_config config; |
231 | bool flush_on_fsync; | 337 | bool flush_on_fsync; |
232 | u32 desc_pendingcount; | 338 | u32 desc_pendingcount; |
339 | bool ext_addr; | ||
340 | u32 desc_submitcount; | ||
341 | u32 residue; | ||
342 | struct xilinx_axidma_tx_segment *seg_v; | ||
343 | void (*start_transfer)(struct xilinx_dma_chan *chan); | ||
344 | }; | ||
345 | |||
346 | struct xilinx_dma_config { | ||
347 | enum xdma_ip_type dmatype; | ||
348 | int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, | ||
349 | struct clk **tx_clk, struct clk **txs_clk, | ||
350 | struct clk **rx_clk, struct clk **rxs_clk); | ||
233 | }; | 351 | }; |
234 | 352 | ||
235 | /** | 353 | /** |
236 | * struct xilinx_vdma_device - VDMA device structure | 354 | * struct xilinx_dma_device - DMA device structure |
237 | * @regs: I/O mapped base address | 355 | * @regs: I/O mapped base address |
238 | * @dev: Device Structure | 356 | * @dev: Device Structure |
239 | * @common: DMA device structure | 357 | * @common: DMA device structure |
240 | * @chan: Driver specific VDMA channel | 358 | * @chan: Driver specific DMA channel |
241 | * @has_sg: Specifies whether Scatter-Gather is present or not | 359 | * @has_sg: Specifies whether Scatter-Gather is present or not |
242 | * @flush_on_fsync: Flush on frame sync | 360 | * @flush_on_fsync: Flush on frame sync |
361 | * @ext_addr: Indicates 64 bit addressing is supported by dma device | ||
362 | * @pdev: Platform device structure pointer | ||
363 | * @dma_config: DMA config structure | ||
364 | * @axi_clk: DMA Axi4-lite interace clock | ||
365 | * @tx_clk: DMA mm2s clock | ||
366 | * @txs_clk: DMA mm2s stream clock | ||
367 | * @rx_clk: DMA s2mm clock | ||
368 | * @rxs_clk: DMA s2mm stream clock | ||
243 | */ | 369 | */ |
244 | struct xilinx_vdma_device { | 370 | struct xilinx_dma_device { |
245 | void __iomem *regs; | 371 | void __iomem *regs; |
246 | struct device *dev; | 372 | struct device *dev; |
247 | struct dma_device common; | 373 | struct dma_device common; |
248 | struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; | 374 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; |
249 | bool has_sg; | 375 | bool has_sg; |
250 | u32 flush_on_fsync; | 376 | u32 flush_on_fsync; |
377 | bool ext_addr; | ||
378 | struct platform_device *pdev; | ||
379 | const struct xilinx_dma_config *dma_config; | ||
380 | struct clk *axi_clk; | ||
381 | struct clk *tx_clk; | ||
382 | struct clk *txs_clk; | ||
383 | struct clk *rx_clk; | ||
384 | struct clk *rxs_clk; | ||
251 | }; | 385 | }; |
252 | 386 | ||
253 | /* Macros */ | 387 | /* Macros */ |
254 | #define to_xilinx_chan(chan) \ | 388 | #define to_xilinx_chan(chan) \ |
255 | container_of(chan, struct xilinx_vdma_chan, common) | 389 | container_of(chan, struct xilinx_dma_chan, common) |
256 | #define to_vdma_tx_descriptor(tx) \ | 390 | #define to_dma_tx_descriptor(tx) \ |
257 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) | 391 | container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) |
258 | #define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ | 392 | #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ |
259 | readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ | 393 | readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ |
260 | cond, delay_us, timeout_us) | 394 | cond, delay_us, timeout_us) |
261 | 395 | ||
262 | /* IO accessors */ | 396 | /* IO accessors */ |
263 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) | 397 | static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) |
264 | { | 398 | { |
265 | return ioread32(chan->xdev->regs + reg); | 399 | return ioread32(chan->xdev->regs + reg); |
266 | } | 400 | } |
267 | 401 | ||
268 | static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) | 402 | static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) |
269 | { | 403 | { |
270 | iowrite32(value, chan->xdev->regs + reg); | 404 | iowrite32(value, chan->xdev->regs + reg); |
271 | } | 405 | } |
272 | 406 | ||
273 | static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, | 407 | static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, |
274 | u32 value) | 408 | u32 value) |
275 | { | 409 | { |
276 | vdma_write(chan, chan->desc_offset + reg, value); | 410 | dma_write(chan, chan->desc_offset + reg, value); |
277 | } | 411 | } |
278 | 412 | ||
279 | static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) | 413 | static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) |
280 | { | 414 | { |
281 | return vdma_read(chan, chan->ctrl_offset + reg); | 415 | return dma_read(chan, chan->ctrl_offset + reg); |
282 | } | 416 | } |
283 | 417 | ||
284 | static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, | 418 | static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, |
285 | u32 value) | 419 | u32 value) |
286 | { | 420 | { |
287 | vdma_write(chan, chan->ctrl_offset + reg, value); | 421 | dma_write(chan, chan->ctrl_offset + reg, value); |
288 | } | 422 | } |
289 | 423 | ||
290 | static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, | 424 | static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, |
291 | u32 clr) | 425 | u32 clr) |
292 | { | 426 | { |
293 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); | 427 | dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); |
294 | } | 428 | } |
295 | 429 | ||
296 | static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | 430 | static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, |
297 | u32 set) | 431 | u32 set) |
298 | { | 432 | { |
299 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); | 433 | dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); |
434 | } | ||
435 | |||
436 | /** | ||
437 | * vdma_desc_write_64 - 64-bit descriptor write | ||
438 | * @chan: Driver specific VDMA channel | ||
439 | * @reg: Register to write | ||
440 | * @value_lsb: lower address of the descriptor. | ||
441 | * @value_msb: upper address of the descriptor. | ||
442 | * | ||
443 | * Since vdma driver is trying to write to a register offset which is not a | ||
444 | * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits | ||
445 | * instead of a single 64 bit register write. | ||
446 | */ | ||
447 | static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, | ||
448 | u32 value_lsb, u32 value_msb) | ||
449 | { | ||
450 | /* Write the lsb 32 bits*/ | ||
451 | writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); | ||
452 | |||
453 | /* Write the msb 32 bits */ | ||
454 | writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); | ||
300 | } | 455 | } |
301 | 456 | ||
302 | /* ----------------------------------------------------------------------------- | 457 | /* ----------------------------------------------------------------------------- |
@@ -305,16 +460,59 @@ static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | |||
305 | 460 | ||
306 | /** | 461 | /** |
307 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment | 462 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment |
308 | * @chan: Driver specific VDMA channel | 463 | * @chan: Driver specific DMA channel |
309 | * | 464 | * |
310 | * Return: The allocated segment on success and NULL on failure. | 465 | * Return: The allocated segment on success and NULL on failure. |
311 | */ | 466 | */ |
312 | static struct xilinx_vdma_tx_segment * | 467 | static struct xilinx_vdma_tx_segment * |
313 | xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | 468 | xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
314 | { | 469 | { |
315 | struct xilinx_vdma_tx_segment *segment; | 470 | struct xilinx_vdma_tx_segment *segment; |
316 | dma_addr_t phys; | 471 | dma_addr_t phys; |
317 | 472 | ||
473 | segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); | ||
474 | if (!segment) | ||
475 | return NULL; | ||
476 | |||
477 | segment->phys = phys; | ||
478 | |||
479 | return segment; | ||
480 | } | ||
481 | |||
482 | /** | ||
483 | * xilinx_cdma_alloc_tx_segment - Allocate transaction segment | ||
484 | * @chan: Driver specific DMA channel | ||
485 | * | ||
486 | * Return: The allocated segment on success and NULL on failure. | ||
487 | */ | ||
488 | static struct xilinx_cdma_tx_segment * | ||
489 | xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) | ||
490 | { | ||
491 | struct xilinx_cdma_tx_segment *segment; | ||
492 | dma_addr_t phys; | ||
493 | |||
494 | segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); | ||
495 | if (!segment) | ||
496 | return NULL; | ||
497 | |||
498 | memset(segment, 0, sizeof(*segment)); | ||
499 | segment->phys = phys; | ||
500 | |||
501 | return segment; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * xilinx_axidma_alloc_tx_segment - Allocate transaction segment | ||
506 | * @chan: Driver specific DMA channel | ||
507 | * | ||
508 | * Return: The allocated segment on success and NULL on failure. | ||
509 | */ | ||
510 | static struct xilinx_axidma_tx_segment * | ||
511 | xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) | ||
512 | { | ||
513 | struct xilinx_axidma_tx_segment *segment; | ||
514 | dma_addr_t phys; | ||
515 | |||
318 | segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); | 516 | segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); |
319 | if (!segment) | 517 | if (!segment) |
320 | return NULL; | 518 | return NULL; |
@@ -326,26 +524,48 @@ xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | |||
326 | } | 524 | } |
327 | 525 | ||
328 | /** | 526 | /** |
527 | * xilinx_dma_free_tx_segment - Free transaction segment | ||
528 | * @chan: Driver specific DMA channel | ||
529 | * @segment: DMA transaction segment | ||
530 | */ | ||
531 | static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, | ||
532 | struct xilinx_axidma_tx_segment *segment) | ||
533 | { | ||
534 | dma_pool_free(chan->desc_pool, segment, segment->phys); | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * xilinx_cdma_free_tx_segment - Free transaction segment | ||
539 | * @chan: Driver specific DMA channel | ||
540 | * @segment: DMA transaction segment | ||
541 | */ | ||
542 | static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, | ||
543 | struct xilinx_cdma_tx_segment *segment) | ||
544 | { | ||
545 | dma_pool_free(chan->desc_pool, segment, segment->phys); | ||
546 | } | ||
547 | |||
548 | /** | ||
329 | * xilinx_vdma_free_tx_segment - Free transaction segment | 549 | * xilinx_vdma_free_tx_segment - Free transaction segment |
330 | * @chan: Driver specific VDMA channel | 550 | * @chan: Driver specific DMA channel |
331 | * @segment: VDMA transaction segment | 551 | * @segment: DMA transaction segment |
332 | */ | 552 | */ |
333 | static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, | 553 | static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, |
334 | struct xilinx_vdma_tx_segment *segment) | 554 | struct xilinx_vdma_tx_segment *segment) |
335 | { | 555 | { |
336 | dma_pool_free(chan->desc_pool, segment, segment->phys); | 556 | dma_pool_free(chan->desc_pool, segment, segment->phys); |
337 | } | 557 | } |
338 | 558 | ||
339 | /** | 559 | /** |
340 | * xilinx_vdma_tx_descriptor - Allocate transaction descriptor | 560 | * xilinx_dma_tx_descriptor - Allocate transaction descriptor |
341 | * @chan: Driver specific VDMA channel | 561 | * @chan: Driver specific DMA channel |
342 | * | 562 | * |
343 | * Return: The allocated descriptor on success and NULL on failure. | 563 | * Return: The allocated descriptor on success and NULL on failure. |
344 | */ | 564 | */ |
345 | static struct xilinx_vdma_tx_descriptor * | 565 | static struct xilinx_dma_tx_descriptor * |
346 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | 566 | xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) |
347 | { | 567 | { |
348 | struct xilinx_vdma_tx_descriptor *desc; | 568 | struct xilinx_dma_tx_descriptor *desc; |
349 | 569 | ||
350 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | 570 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
351 | if (!desc) | 571 | if (!desc) |
@@ -357,22 +577,38 @@ xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | |||
357 | } | 577 | } |
358 | 578 | ||
359 | /** | 579 | /** |
360 | * xilinx_vdma_free_tx_descriptor - Free transaction descriptor | 580 | * xilinx_dma_free_tx_descriptor - Free transaction descriptor |
361 | * @chan: Driver specific VDMA channel | 581 | * @chan: Driver specific DMA channel |
362 | * @desc: VDMA transaction descriptor | 582 | * @desc: DMA transaction descriptor |
363 | */ | 583 | */ |
364 | static void | 584 | static void |
365 | xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | 585 | xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, |
366 | struct xilinx_vdma_tx_descriptor *desc) | 586 | struct xilinx_dma_tx_descriptor *desc) |
367 | { | 587 | { |
368 | struct xilinx_vdma_tx_segment *segment, *next; | 588 | struct xilinx_vdma_tx_segment *segment, *next; |
589 | struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; | ||
590 | struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; | ||
369 | 591 | ||
370 | if (!desc) | 592 | if (!desc) |
371 | return; | 593 | return; |
372 | 594 | ||
373 | list_for_each_entry_safe(segment, next, &desc->segments, node) { | 595 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
374 | list_del(&segment->node); | 596 | list_for_each_entry_safe(segment, next, &desc->segments, node) { |
375 | xilinx_vdma_free_tx_segment(chan, segment); | 597 | list_del(&segment->node); |
598 | xilinx_vdma_free_tx_segment(chan, segment); | ||
599 | } | ||
600 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { | ||
601 | list_for_each_entry_safe(cdma_segment, cdma_next, | ||
602 | &desc->segments, node) { | ||
603 | list_del(&cdma_segment->node); | ||
604 | xilinx_cdma_free_tx_segment(chan, cdma_segment); | ||
605 | } | ||
606 | } else { | ||
607 | list_for_each_entry_safe(axidma_segment, axidma_next, | ||
608 | &desc->segments, node) { | ||
609 | list_del(&axidma_segment->node); | ||
610 | xilinx_dma_free_tx_segment(chan, axidma_segment); | ||
611 | } | ||
376 | } | 612 | } |
377 | 613 | ||
378 | kfree(desc); | 614 | kfree(desc); |
@@ -381,60 +617,62 @@ xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | |||
381 | /* Required functions */ | 617 | /* Required functions */ |
382 | 618 | ||
383 | /** | 619 | /** |
384 | * xilinx_vdma_free_desc_list - Free descriptors list | 620 | * xilinx_dma_free_desc_list - Free descriptors list |
385 | * @chan: Driver specific VDMA channel | 621 | * @chan: Driver specific DMA channel |
386 | * @list: List to parse and delete the descriptor | 622 | * @list: List to parse and delete the descriptor |
387 | */ | 623 | */ |
388 | static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, | 624 | static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, |
389 | struct list_head *list) | 625 | struct list_head *list) |
390 | { | 626 | { |
391 | struct xilinx_vdma_tx_descriptor *desc, *next; | 627 | struct xilinx_dma_tx_descriptor *desc, *next; |
392 | 628 | ||
393 | list_for_each_entry_safe(desc, next, list, node) { | 629 | list_for_each_entry_safe(desc, next, list, node) { |
394 | list_del(&desc->node); | 630 | list_del(&desc->node); |
395 | xilinx_vdma_free_tx_descriptor(chan, desc); | 631 | xilinx_dma_free_tx_descriptor(chan, desc); |
396 | } | 632 | } |
397 | } | 633 | } |
398 | 634 | ||
399 | /** | 635 | /** |
400 | * xilinx_vdma_free_descriptors - Free channel descriptors | 636 | * xilinx_dma_free_descriptors - Free channel descriptors |
401 | * @chan: Driver specific VDMA channel | 637 | * @chan: Driver specific DMA channel |
402 | */ | 638 | */ |
403 | static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) | 639 | static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) |
404 | { | 640 | { |
405 | unsigned long flags; | 641 | unsigned long flags; |
406 | 642 | ||
407 | spin_lock_irqsave(&chan->lock, flags); | 643 | spin_lock_irqsave(&chan->lock, flags); |
408 | 644 | ||
409 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); | 645 | xilinx_dma_free_desc_list(chan, &chan->pending_list); |
410 | xilinx_vdma_free_desc_list(chan, &chan->done_list); | 646 | xilinx_dma_free_desc_list(chan, &chan->done_list); |
411 | xilinx_vdma_free_desc_list(chan, &chan->active_list); | 647 | xilinx_dma_free_desc_list(chan, &chan->active_list); |
412 | 648 | ||
413 | spin_unlock_irqrestore(&chan->lock, flags); | 649 | spin_unlock_irqrestore(&chan->lock, flags); |
414 | } | 650 | } |
415 | 651 | ||
416 | /** | 652 | /** |
417 | * xilinx_vdma_free_chan_resources - Free channel resources | 653 | * xilinx_dma_free_chan_resources - Free channel resources |
418 | * @dchan: DMA channel | 654 | * @dchan: DMA channel |
419 | */ | 655 | */ |
420 | static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) | 656 | static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) |
421 | { | 657 | { |
422 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 658 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
423 | 659 | ||
424 | dev_dbg(chan->dev, "Free all channel resources.\n"); | 660 | dev_dbg(chan->dev, "Free all channel resources.\n"); |
425 | 661 | ||
426 | xilinx_vdma_free_descriptors(chan); | 662 | xilinx_dma_free_descriptors(chan); |
663 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) | ||
664 | xilinx_dma_free_tx_segment(chan, chan->seg_v); | ||
427 | dma_pool_destroy(chan->desc_pool); | 665 | dma_pool_destroy(chan->desc_pool); |
428 | chan->desc_pool = NULL; | 666 | chan->desc_pool = NULL; |
429 | } | 667 | } |
430 | 668 | ||
431 | /** | 669 | /** |
432 | * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors | 670 | * xilinx_dma_chan_desc_cleanup - Clean channel descriptors |
433 | * @chan: Driver specific VDMA channel | 671 | * @chan: Driver specific DMA channel |
434 | */ | 672 | */ |
435 | static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | 673 | static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) |
436 | { | 674 | { |
437 | struct xilinx_vdma_tx_descriptor *desc, *next; | 675 | struct xilinx_dma_tx_descriptor *desc, *next; |
438 | unsigned long flags; | 676 | unsigned long flags; |
439 | 677 | ||
440 | spin_lock_irqsave(&chan->lock, flags); | 678 | spin_lock_irqsave(&chan->lock, flags); |
@@ -457,32 +695,32 @@ static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | |||
457 | 695 | ||
458 | /* Run any dependencies, then free the descriptor */ | 696 | /* Run any dependencies, then free the descriptor */ |
459 | dma_run_dependencies(&desc->async_tx); | 697 | dma_run_dependencies(&desc->async_tx); |
460 | xilinx_vdma_free_tx_descriptor(chan, desc); | 698 | xilinx_dma_free_tx_descriptor(chan, desc); |
461 | } | 699 | } |
462 | 700 | ||
463 | spin_unlock_irqrestore(&chan->lock, flags); | 701 | spin_unlock_irqrestore(&chan->lock, flags); |
464 | } | 702 | } |
465 | 703 | ||
466 | /** | 704 | /** |
467 | * xilinx_vdma_do_tasklet - Schedule completion tasklet | 705 | * xilinx_dma_do_tasklet - Schedule completion tasklet |
468 | * @data: Pointer to the Xilinx VDMA channel structure | 706 | * @data: Pointer to the Xilinx DMA channel structure |
469 | */ | 707 | */ |
470 | static void xilinx_vdma_do_tasklet(unsigned long data) | 708 | static void xilinx_dma_do_tasklet(unsigned long data) |
471 | { | 709 | { |
472 | struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; | 710 | struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; |
473 | 711 | ||
474 | xilinx_vdma_chan_desc_cleanup(chan); | 712 | xilinx_dma_chan_desc_cleanup(chan); |
475 | } | 713 | } |
476 | 714 | ||
477 | /** | 715 | /** |
478 | * xilinx_vdma_alloc_chan_resources - Allocate channel resources | 716 | * xilinx_dma_alloc_chan_resources - Allocate channel resources |
479 | * @dchan: DMA channel | 717 | * @dchan: DMA channel |
480 | * | 718 | * |
481 | * Return: '0' on success and failure value on error | 719 | * Return: '0' on success and failure value on error |
482 | */ | 720 | */ |
483 | static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | 721 | static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) |
484 | { | 722 | { |
485 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 723 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
486 | 724 | ||
487 | /* Has this channel already been allocated? */ | 725 | /* Has this channel already been allocated? */ |
488 | if (chan->desc_pool) | 726 | if (chan->desc_pool) |
@@ -492,10 +730,26 @@ static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | |||
492 | * We need the descriptor to be aligned to 64bytes | 730 | * We need the descriptor to be aligned to 64bytes |
493 | * for meeting Xilinx VDMA specification requirement. | 731 | * for meeting Xilinx VDMA specification requirement. |
494 | */ | 732 | */ |
495 | chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", | 733 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
496 | chan->dev, | 734 | chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", |
497 | sizeof(struct xilinx_vdma_tx_segment), | 735 | chan->dev, |
498 | __alignof__(struct xilinx_vdma_tx_segment), 0); | 736 | sizeof(struct xilinx_axidma_tx_segment), |
737 | __alignof__(struct xilinx_axidma_tx_segment), | ||
738 | 0); | ||
739 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { | ||
740 | chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", | ||
741 | chan->dev, | ||
742 | sizeof(struct xilinx_cdma_tx_segment), | ||
743 | __alignof__(struct xilinx_cdma_tx_segment), | ||
744 | 0); | ||
745 | } else { | ||
746 | chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", | ||
747 | chan->dev, | ||
748 | sizeof(struct xilinx_vdma_tx_segment), | ||
749 | __alignof__(struct xilinx_vdma_tx_segment), | ||
750 | 0); | ||
751 | } | ||
752 | |||
499 | if (!chan->desc_pool) { | 753 | if (!chan->desc_pool) { |
500 | dev_err(chan->dev, | 754 | dev_err(chan->dev, |
501 | "unable to allocate channel %d descriptor pool\n", | 755 | "unable to allocate channel %d descriptor pool\n", |
@@ -503,110 +757,160 @@ static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | |||
503 | return -ENOMEM; | 757 | return -ENOMEM; |
504 | } | 758 | } |
505 | 759 | ||
760 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) | ||
761 | /* | ||
762 | * For AXI DMA case after submitting a pending_list, keep | ||
763 | * an extra segment allocated so that the "next descriptor" | ||
764 | * pointer on the tail descriptor always points to a | ||
765 | * valid descriptor, even when paused after reaching taildesc. | ||
766 | * This way, it is possible to issue additional | ||
767 | * transfers without halting and restarting the channel. | ||
768 | */ | ||
769 | chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); | ||
770 | |||
506 | dma_cookie_init(dchan); | 771 | dma_cookie_init(dchan); |
772 | |||
773 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||
774 | /* For AXI DMA resetting once channel will reset the | ||
775 | * other channel as well so enable the interrupts here. | ||
776 | */ | ||
777 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, | ||
778 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); | ||
779 | } | ||
780 | |||
781 | if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) | ||
782 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, | ||
783 | XILINX_CDMA_CR_SGMODE); | ||
784 | |||
507 | return 0; | 785 | return 0; |
508 | } | 786 | } |
509 | 787 | ||
510 | /** | 788 | /** |
511 | * xilinx_vdma_tx_status - Get VDMA transaction status | 789 | * xilinx_dma_tx_status - Get DMA transaction status |
512 | * @dchan: DMA channel | 790 | * @dchan: DMA channel |
513 | * @cookie: Transaction identifier | 791 | * @cookie: Transaction identifier |
514 | * @txstate: Transaction state | 792 | * @txstate: Transaction state |
515 | * | 793 | * |
516 | * Return: DMA transaction status | 794 | * Return: DMA transaction status |
517 | */ | 795 | */ |
518 | static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, | 796 | static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, |
519 | dma_cookie_t cookie, | 797 | dma_cookie_t cookie, |
520 | struct dma_tx_state *txstate) | 798 | struct dma_tx_state *txstate) |
521 | { | 799 | { |
522 | return dma_cookie_status(dchan, cookie, txstate); | 800 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
801 | struct xilinx_dma_tx_descriptor *desc; | ||
802 | struct xilinx_axidma_tx_segment *segment; | ||
803 | struct xilinx_axidma_desc_hw *hw; | ||
804 | enum dma_status ret; | ||
805 | unsigned long flags; | ||
806 | u32 residue = 0; | ||
807 | |||
808 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
809 | if (ret == DMA_COMPLETE || !txstate) | ||
810 | return ret; | ||
811 | |||
812 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||
813 | spin_lock_irqsave(&chan->lock, flags); | ||
814 | |||
815 | desc = list_last_entry(&chan->active_list, | ||
816 | struct xilinx_dma_tx_descriptor, node); | ||
817 | if (chan->has_sg) { | ||
818 | list_for_each_entry(segment, &desc->segments, node) { | ||
819 | hw = &segment->hw; | ||
820 | residue += (hw->control - hw->status) & | ||
821 | XILINX_DMA_MAX_TRANS_LEN; | ||
822 | } | ||
823 | } | ||
824 | spin_unlock_irqrestore(&chan->lock, flags); | ||
825 | |||
826 | chan->residue = residue; | ||
827 | dma_set_residue(txstate, chan->residue); | ||
828 | } | ||
829 | |||
830 | return ret; | ||
523 | } | 831 | } |
524 | 832 | ||
525 | /** | 833 | /** |
526 | * xilinx_vdma_is_running - Check if VDMA channel is running | 834 | * xilinx_dma_is_running - Check if DMA channel is running |
527 | * @chan: Driver specific VDMA channel | 835 | * @chan: Driver specific DMA channel |
528 | * | 836 | * |
529 | * Return: '1' if running, '0' if not. | 837 | * Return: '1' if running, '0' if not. |
530 | */ | 838 | */ |
531 | static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) | 839 | static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) |
532 | { | 840 | { |
533 | return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | 841 | return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & |
534 | XILINX_VDMA_DMASR_HALTED) && | 842 | XILINX_DMA_DMASR_HALTED) && |
535 | (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | 843 | (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & |
536 | XILINX_VDMA_DMACR_RUNSTOP); | 844 | XILINX_DMA_DMACR_RUNSTOP); |
537 | } | 845 | } |
538 | 846 | ||
539 | /** | 847 | /** |
540 | * xilinx_vdma_is_idle - Check if VDMA channel is idle | 848 | * xilinx_dma_is_idle - Check if DMA channel is idle |
541 | * @chan: Driver specific VDMA channel | 849 | * @chan: Driver specific DMA channel |
542 | * | 850 | * |
543 | * Return: '1' if idle, '0' if not. | 851 | * Return: '1' if idle, '0' if not. |
544 | */ | 852 | */ |
545 | static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) | 853 | static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) |
546 | { | 854 | { |
547 | return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | 855 | return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & |
548 | XILINX_VDMA_DMASR_IDLE; | 856 | XILINX_DMA_DMASR_IDLE; |
549 | } | 857 | } |
550 | 858 | ||
551 | /** | 859 | /** |
552 | * xilinx_vdma_halt - Halt VDMA channel | 860 | * xilinx_dma_halt - Halt DMA channel |
553 | * @chan: Driver specific VDMA channel | 861 | * @chan: Driver specific DMA channel |
554 | */ | 862 | */ |
555 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | 863 | static void xilinx_dma_halt(struct xilinx_dma_chan *chan) |
556 | { | 864 | { |
557 | int err; | 865 | int err; |
558 | u32 val; | 866 | u32 val; |
559 | 867 | ||
560 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | 868 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
561 | 869 | ||
562 | /* Wait for the hardware to halt */ | 870 | /* Wait for the hardware to halt */ |
563 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, | 871 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, |
564 | (val & XILINX_VDMA_DMASR_HALTED), 0, | 872 | (val & XILINX_DMA_DMASR_HALTED), 0, |
565 | XILINX_VDMA_LOOP_COUNT); | 873 | XILINX_DMA_LOOP_COUNT); |
566 | 874 | ||
567 | if (err) { | 875 | if (err) { |
568 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", | 876 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", |
569 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 877 | chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
570 | chan->err = true; | 878 | chan->err = true; |
571 | } | 879 | } |
572 | |||
573 | return; | ||
574 | } | 880 | } |
575 | 881 | ||
576 | /** | 882 | /** |
577 | * xilinx_vdma_start - Start VDMA channel | 883 | * xilinx_dma_start - Start DMA channel |
578 | * @chan: Driver specific VDMA channel | 884 | * @chan: Driver specific DMA channel |
579 | */ | 885 | */ |
580 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | 886 | static void xilinx_dma_start(struct xilinx_dma_chan *chan) |
581 | { | 887 | { |
582 | int err; | 888 | int err; |
583 | u32 val; | 889 | u32 val; |
584 | 890 | ||
585 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | 891 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
586 | 892 | ||
587 | /* Wait for the hardware to start */ | 893 | /* Wait for the hardware to start */ |
588 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, | 894 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, |
589 | !(val & XILINX_VDMA_DMASR_HALTED), 0, | 895 | !(val & XILINX_DMA_DMASR_HALTED), 0, |
590 | XILINX_VDMA_LOOP_COUNT); | 896 | XILINX_DMA_LOOP_COUNT); |
591 | 897 | ||
592 | if (err) { | 898 | if (err) { |
593 | dev_err(chan->dev, "Cannot start channel %p: %x\n", | 899 | dev_err(chan->dev, "Cannot start channel %p: %x\n", |
594 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 900 | chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
595 | 901 | ||
596 | chan->err = true; | 902 | chan->err = true; |
597 | } | 903 | } |
598 | |||
599 | return; | ||
600 | } | 904 | } |
601 | 905 | ||
602 | /** | 906 | /** |
603 | * xilinx_vdma_start_transfer - Starts VDMA transfer | 907 | * xilinx_vdma_start_transfer - Starts VDMA transfer |
604 | * @chan: Driver specific channel struct pointer | 908 | * @chan: Driver specific channel struct pointer |
605 | */ | 909 | */ |
606 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | 910 | static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) |
607 | { | 911 | { |
608 | struct xilinx_vdma_config *config = &chan->config; | 912 | struct xilinx_vdma_config *config = &chan->config; |
609 | struct xilinx_vdma_tx_descriptor *desc, *tail_desc; | 913 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; |
610 | u32 reg; | 914 | u32 reg; |
611 | struct xilinx_vdma_tx_segment *tail_segment; | 915 | struct xilinx_vdma_tx_segment *tail_segment; |
612 | 916 | ||
@@ -618,16 +922,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
618 | return; | 922 | return; |
619 | 923 | ||
620 | desc = list_first_entry(&chan->pending_list, | 924 | desc = list_first_entry(&chan->pending_list, |
621 | struct xilinx_vdma_tx_descriptor, node); | 925 | struct xilinx_dma_tx_descriptor, node); |
622 | tail_desc = list_last_entry(&chan->pending_list, | 926 | tail_desc = list_last_entry(&chan->pending_list, |
623 | struct xilinx_vdma_tx_descriptor, node); | 927 | struct xilinx_dma_tx_descriptor, node); |
624 | 928 | ||
625 | tail_segment = list_last_entry(&tail_desc->segments, | 929 | tail_segment = list_last_entry(&tail_desc->segments, |
626 | struct xilinx_vdma_tx_segment, node); | 930 | struct xilinx_vdma_tx_segment, node); |
627 | 931 | ||
628 | /* If it is SG mode and hardware is busy, cannot submit */ | 932 | /* If it is SG mode and hardware is busy, cannot submit */ |
629 | if (chan->has_sg && xilinx_vdma_is_running(chan) && | 933 | if (chan->has_sg && xilinx_dma_is_running(chan) && |
630 | !xilinx_vdma_is_idle(chan)) { | 934 | !xilinx_dma_is_idle(chan)) { |
631 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 935 | dev_dbg(chan->dev, "DMA controller still busy\n"); |
632 | return; | 936 | return; |
633 | } | 937 | } |
@@ -637,19 +941,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
637 | * done, start new transfers | 941 | * done, start new transfers |
638 | */ | 942 | */ |
639 | if (chan->has_sg) | 943 | if (chan->has_sg) |
640 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, | 944 | dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, |
641 | desc->async_tx.phys); | 945 | desc->async_tx.phys); |
642 | 946 | ||
643 | /* Configure the hardware using info in the config structure */ | 947 | /* Configure the hardware using info in the config structure */ |
644 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | 948 | reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
645 | 949 | ||
646 | if (config->frm_cnt_en) | 950 | if (config->frm_cnt_en) |
647 | reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; | 951 | reg |= XILINX_DMA_DMACR_FRAMECNT_EN; |
648 | else | 952 | else |
649 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; | 953 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
650 | 954 | ||
651 | /* Configure channel to allow number frame buffers */ | 955 | /* Configure channel to allow number frame buffers */ |
652 | vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE, | 956 | dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, |
653 | chan->desc_pendingcount); | 957 | chan->desc_pendingcount); |
654 | 958 | ||
655 | /* | 959 | /* |
@@ -657,45 +961,53 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
657 | * In direct register mode, if not parking, enable circular mode | 961 | * In direct register mode, if not parking, enable circular mode |
658 | */ | 962 | */ |
659 | if (chan->has_sg || !config->park) | 963 | if (chan->has_sg || !config->park) |
660 | reg |= XILINX_VDMA_DMACR_CIRC_EN; | 964 | reg |= XILINX_DMA_DMACR_CIRC_EN; |
661 | 965 | ||
662 | if (config->park) | 966 | if (config->park) |
663 | reg &= ~XILINX_VDMA_DMACR_CIRC_EN; | 967 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
664 | 968 | ||
665 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); | 969 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
666 | 970 | ||
667 | if (config->park && (config->park_frm >= 0) && | 971 | if (config->park && (config->park_frm >= 0) && |
668 | (config->park_frm < chan->num_frms)) { | 972 | (config->park_frm < chan->num_frms)) { |
669 | if (chan->direction == DMA_MEM_TO_DEV) | 973 | if (chan->direction == DMA_MEM_TO_DEV) |
670 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | 974 | dma_write(chan, XILINX_DMA_REG_PARK_PTR, |
671 | config->park_frm << | 975 | config->park_frm << |
672 | XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); | 976 | XILINX_DMA_PARK_PTR_RD_REF_SHIFT); |
673 | else | 977 | else |
674 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | 978 | dma_write(chan, XILINX_DMA_REG_PARK_PTR, |
675 | config->park_frm << | 979 | config->park_frm << |
676 | XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); | 980 | XILINX_DMA_PARK_PTR_WR_REF_SHIFT); |
677 | } | 981 | } |
678 | 982 | ||
679 | /* Start the hardware */ | 983 | /* Start the hardware */ |
680 | xilinx_vdma_start(chan); | 984 | xilinx_dma_start(chan); |
681 | 985 | ||
682 | if (chan->err) | 986 | if (chan->err) |
683 | return; | 987 | return; |
684 | 988 | ||
685 | /* Start the transfer */ | 989 | /* Start the transfer */ |
686 | if (chan->has_sg) { | 990 | if (chan->has_sg) { |
687 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, | 991 | dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, |
688 | tail_segment->phys); | 992 | tail_segment->phys); |
689 | } else { | 993 | } else { |
690 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | 994 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
691 | int i = 0; | 995 | int i = 0; |
692 | 996 | ||
693 | list_for_each_entry(desc, &chan->pending_list, node) { | 997 | if (chan->desc_submitcount < chan->num_frms) |
694 | segment = list_first_entry(&desc->segments, | 998 | i = chan->desc_submitcount; |
695 | struct xilinx_vdma_tx_segment, node); | 999 | |
696 | vdma_desc_write(chan, | 1000 | list_for_each_entry(segment, &desc->segments, node) { |
1001 | if (chan->ext_addr) | ||
1002 | vdma_desc_write_64(chan, | ||
1003 | XILINX_VDMA_REG_START_ADDRESS_64(i++), | ||
1004 | segment->hw.buf_addr, | ||
1005 | segment->hw.buf_addr_msb); | ||
1006 | else | ||
1007 | vdma_desc_write(chan, | ||
697 | XILINX_VDMA_REG_START_ADDRESS(i++), | 1008 | XILINX_VDMA_REG_START_ADDRESS(i++), |
698 | segment->hw.buf_addr); | 1009 | segment->hw.buf_addr); |
1010 | |||
699 | last = segment; | 1011 | last = segment; |
700 | } | 1012 | } |
701 | 1013 | ||
@@ -703,10 +1015,164 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
703 | return; | 1015 | return; |
704 | 1016 | ||
705 | /* HW expects these parameters to be same for one transaction */ | 1017 | /* HW expects these parameters to be same for one transaction */ |
706 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); | 1018 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); |
707 | vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, | 1019 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
708 | last->hw.stride); | 1020 | last->hw.stride); |
709 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); | 1021 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
1022 | } | ||
1023 | |||
1024 | if (!chan->has_sg) { | ||
1025 | list_del(&desc->node); | ||
1026 | list_add_tail(&desc->node, &chan->active_list); | ||
1027 | chan->desc_submitcount++; | ||
1028 | chan->desc_pendingcount--; | ||
1029 | if (chan->desc_submitcount == chan->num_frms) | ||
1030 | chan->desc_submitcount = 0; | ||
1031 | } else { | ||
1032 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | ||
1033 | chan->desc_pendingcount = 0; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | /** | ||
1038 | * xilinx_cdma_start_transfer - Starts cdma transfer | ||
1039 | * @chan: Driver specific channel struct pointer | ||
1040 | */ | ||
1041 | static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) | ||
1042 | { | ||
1043 | struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; | ||
1044 | struct xilinx_cdma_tx_segment *tail_segment; | ||
1045 | u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); | ||
1046 | |||
1047 | if (chan->err) | ||
1048 | return; | ||
1049 | |||
1050 | if (list_empty(&chan->pending_list)) | ||
1051 | return; | ||
1052 | |||
1053 | head_desc = list_first_entry(&chan->pending_list, | ||
1054 | struct xilinx_dma_tx_descriptor, node); | ||
1055 | tail_desc = list_last_entry(&chan->pending_list, | ||
1056 | struct xilinx_dma_tx_descriptor, node); | ||
1057 | tail_segment = list_last_entry(&tail_desc->segments, | ||
1058 | struct xilinx_cdma_tx_segment, node); | ||
1059 | |||
1060 | if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { | ||
1061 | ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; | ||
1062 | ctrl_reg |= chan->desc_pendingcount << | ||
1063 | XILINX_DMA_CR_COALESCE_SHIFT; | ||
1064 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); | ||
1065 | } | ||
1066 | |||
1067 | if (chan->has_sg) { | ||
1068 | dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, | ||
1069 | head_desc->async_tx.phys); | ||
1070 | |||
1071 | /* Update tail ptr register which will start the transfer */ | ||
1072 | dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, | ||
1073 | tail_segment->phys); | ||
1074 | } else { | ||
1075 | /* In simple mode */ | ||
1076 | struct xilinx_cdma_tx_segment *segment; | ||
1077 | struct xilinx_cdma_desc_hw *hw; | ||
1078 | |||
1079 | segment = list_first_entry(&head_desc->segments, | ||
1080 | struct xilinx_cdma_tx_segment, | ||
1081 | node); | ||
1082 | |||
1083 | hw = &segment->hw; | ||
1084 | |||
1085 | dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); | ||
1086 | dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); | ||
1087 | |||
1088 | /* Start the transfer */ | ||
1089 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | ||
1090 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | ||
1091 | } | ||
1092 | |||
1093 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | ||
1094 | chan->desc_pendingcount = 0; | ||
1095 | } | ||
1096 | |||
1097 | /** | ||
1098 | * xilinx_dma_start_transfer - Starts DMA transfer | ||
1099 | * @chan: Driver specific channel struct pointer | ||
1100 | */ | ||
1101 | static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) | ||
1102 | { | ||
1103 | struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; | ||
1104 | struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; | ||
1105 | u32 reg; | ||
1106 | |||
1107 | if (chan->err) | ||
1108 | return; | ||
1109 | |||
1110 | if (list_empty(&chan->pending_list)) | ||
1111 | return; | ||
1112 | |||
1113 | /* If it is SG mode and hardware is busy, cannot submit */ | ||
1114 | if (chan->has_sg && xilinx_dma_is_running(chan) && | ||
1115 | !xilinx_dma_is_idle(chan)) { | ||
1116 | dev_dbg(chan->dev, "DMA controller still busy\n"); | ||
1117 | return; | ||
1118 | } | ||
1119 | |||
1120 | head_desc = list_first_entry(&chan->pending_list, | ||
1121 | struct xilinx_dma_tx_descriptor, node); | ||
1122 | tail_desc = list_last_entry(&chan->pending_list, | ||
1123 | struct xilinx_dma_tx_descriptor, node); | ||
1124 | tail_segment = list_last_entry(&tail_desc->segments, | ||
1125 | struct xilinx_axidma_tx_segment, node); | ||
1126 | |||
1127 | old_head = list_first_entry(&head_desc->segments, | ||
1128 | struct xilinx_axidma_tx_segment, node); | ||
1129 | new_head = chan->seg_v; | ||
1130 | /* Copy Buffer Descriptor fields. */ | ||
1131 | new_head->hw = old_head->hw; | ||
1132 | |||
1133 | /* Swap and save new reserve */ | ||
1134 | list_replace_init(&old_head->node, &new_head->node); | ||
1135 | chan->seg_v = old_head; | ||
1136 | |||
1137 | tail_segment->hw.next_desc = chan->seg_v->phys; | ||
1138 | head_desc->async_tx.phys = new_head->phys; | ||
1139 | |||
1140 | reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); | ||
1141 | |||
1142 | if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { | ||
1143 | reg &= ~XILINX_DMA_CR_COALESCE_MAX; | ||
1144 | reg |= chan->desc_pendingcount << | ||
1145 | XILINX_DMA_CR_COALESCE_SHIFT; | ||
1146 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); | ||
1147 | } | ||
1148 | |||
1149 | if (chan->has_sg) | ||
1150 | dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, | ||
1151 | head_desc->async_tx.phys); | ||
1152 | |||
1153 | xilinx_dma_start(chan); | ||
1154 | |||
1155 | if (chan->err) | ||
1156 | return; | ||
1157 | |||
1158 | /* Start the transfer */ | ||
1159 | if (chan->has_sg) { | ||
1160 | dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, | ||
1161 | tail_segment->phys); | ||
1162 | } else { | ||
1163 | struct xilinx_axidma_tx_segment *segment; | ||
1164 | struct xilinx_axidma_desc_hw *hw; | ||
1165 | |||
1166 | segment = list_first_entry(&head_desc->segments, | ||
1167 | struct xilinx_axidma_tx_segment, | ||
1168 | node); | ||
1169 | hw = &segment->hw; | ||
1170 | |||
1171 | dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); | ||
1172 | |||
1173 | /* Start the transfer */ | ||
1174 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | ||
1175 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | ||
710 | } | 1176 | } |
711 | 1177 | ||
712 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1178 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
@@ -714,28 +1180,28 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
714 | } | 1180 | } |
715 | 1181 | ||
716 | /** | 1182 | /** |
717 | * xilinx_vdma_issue_pending - Issue pending transactions | 1183 | * xilinx_dma_issue_pending - Issue pending transactions |
718 | * @dchan: DMA channel | 1184 | * @dchan: DMA channel |
719 | */ | 1185 | */ |
720 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | 1186 | static void xilinx_dma_issue_pending(struct dma_chan *dchan) |
721 | { | 1187 | { |
722 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 1188 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
723 | unsigned long flags; | 1189 | unsigned long flags; |
724 | 1190 | ||
725 | spin_lock_irqsave(&chan->lock, flags); | 1191 | spin_lock_irqsave(&chan->lock, flags); |
726 | xilinx_vdma_start_transfer(chan); | 1192 | chan->start_transfer(chan); |
727 | spin_unlock_irqrestore(&chan->lock, flags); | 1193 | spin_unlock_irqrestore(&chan->lock, flags); |
728 | } | 1194 | } |
729 | 1195 | ||
730 | /** | 1196 | /** |
731 | * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete | 1197 | * xilinx_dma_complete_descriptor - Mark the active descriptor as complete |
732 | * @chan : xilinx DMA channel | 1198 | * @chan : xilinx DMA channel |
733 | * | 1199 | * |
734 | * CONTEXT: hardirq | 1200 | * CONTEXT: hardirq |
735 | */ | 1201 | */ |
736 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | 1202 | static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) |
737 | { | 1203 | { |
738 | struct xilinx_vdma_tx_descriptor *desc, *next; | 1204 | struct xilinx_dma_tx_descriptor *desc, *next; |
739 | 1205 | ||
740 | /* This function was invoked with lock held */ | 1206 | /* This function was invoked with lock held */ |
741 | if (list_empty(&chan->active_list)) | 1207 | if (list_empty(&chan->active_list)) |
@@ -749,27 +1215,27 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | |||
749 | } | 1215 | } |
750 | 1216 | ||
751 | /** | 1217 | /** |
752 | * xilinx_vdma_reset - Reset VDMA channel | 1218 | * xilinx_dma_reset - Reset DMA channel |
753 | * @chan: Driver specific VDMA channel | 1219 | * @chan: Driver specific DMA channel |
754 | * | 1220 | * |
755 | * Return: '0' on success and failure value on error | 1221 | * Return: '0' on success and failure value on error |
756 | */ | 1222 | */ |
757 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | 1223 | static int xilinx_dma_reset(struct xilinx_dma_chan *chan) |
758 | { | 1224 | { |
759 | int err; | 1225 | int err; |
760 | u32 tmp; | 1226 | u32 tmp; |
761 | 1227 | ||
762 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); | 1228 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); |
763 | 1229 | ||
764 | /* Wait for the hardware to finish reset */ | 1230 | /* Wait for the hardware to finish reset */ |
765 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp, | 1231 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, |
766 | !(tmp & XILINX_VDMA_DMACR_RESET), 0, | 1232 | !(tmp & XILINX_DMA_DMACR_RESET), 0, |
767 | XILINX_VDMA_LOOP_COUNT); | 1233 | XILINX_DMA_LOOP_COUNT); |
768 | 1234 | ||
769 | if (err) { | 1235 | if (err) { |
770 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", | 1236 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", |
771 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), | 1237 | dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), |
772 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 1238 | dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
773 | return -ETIMEDOUT; | 1239 | return -ETIMEDOUT; |
774 | } | 1240 | } |
775 | 1241 | ||
@@ -779,48 +1245,48 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | |||
779 | } | 1245 | } |
780 | 1246 | ||
781 | /** | 1247 | /** |
782 | * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts | 1248 | * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts |
783 | * @chan: Driver specific VDMA channel | 1249 | * @chan: Driver specific DMA channel |
784 | * | 1250 | * |
785 | * Return: '0' on success and failure value on error | 1251 | * Return: '0' on success and failure value on error |
786 | */ | 1252 | */ |
787 | static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) | 1253 | static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) |
788 | { | 1254 | { |
789 | int err; | 1255 | int err; |
790 | 1256 | ||
791 | /* Reset VDMA */ | 1257 | /* Reset VDMA */ |
792 | err = xilinx_vdma_reset(chan); | 1258 | err = xilinx_dma_reset(chan); |
793 | if (err) | 1259 | if (err) |
794 | return err; | 1260 | return err; |
795 | 1261 | ||
796 | /* Enable interrupts */ | 1262 | /* Enable interrupts */ |
797 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, | 1263 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, |
798 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | 1264 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
799 | 1265 | ||
800 | return 0; | 1266 | return 0; |
801 | } | 1267 | } |
802 | 1268 | ||
803 | /** | 1269 | /** |
804 | * xilinx_vdma_irq_handler - VDMA Interrupt handler | 1270 | * xilinx_dma_irq_handler - DMA Interrupt handler |
805 | * @irq: IRQ number | 1271 | * @irq: IRQ number |
806 | * @data: Pointer to the Xilinx VDMA channel structure | 1272 | * @data: Pointer to the Xilinx DMA channel structure |
807 | * | 1273 | * |
808 | * Return: IRQ_HANDLED/IRQ_NONE | 1274 | * Return: IRQ_HANDLED/IRQ_NONE |
809 | */ | 1275 | */ |
810 | static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | 1276 | static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) |
811 | { | 1277 | { |
812 | struct xilinx_vdma_chan *chan = data; | 1278 | struct xilinx_dma_chan *chan = data; |
813 | u32 status; | 1279 | u32 status; |
814 | 1280 | ||
815 | /* Read the status and ack the interrupts. */ | 1281 | /* Read the status and ack the interrupts. */ |
816 | status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); | 1282 | status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); |
817 | if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) | 1283 | if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) |
818 | return IRQ_NONE; | 1284 | return IRQ_NONE; |
819 | 1285 | ||
820 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | 1286 | dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, |
821 | status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | 1287 | status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
822 | 1288 | ||
823 | if (status & XILINX_VDMA_DMASR_ERR_IRQ) { | 1289 | if (status & XILINX_DMA_DMASR_ERR_IRQ) { |
824 | /* | 1290 | /* |
825 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the | 1291 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the |
826 | * error is recoverable, ignore it. Otherwise flag the error. | 1292 | * error is recoverable, ignore it. Otherwise flag the error. |
@@ -828,22 +1294,23 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
828 | * Only recoverable errors can be cleared in the DMASR register, | 1294 | * Only recoverable errors can be cleared in the DMASR register, |
829 | * make sure not to write to other error bits to 1. | 1295 | * make sure not to write to other error bits to 1. |
830 | */ | 1296 | */ |
831 | u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; | 1297 | u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; |
832 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | 1298 | |
833 | errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); | 1299 | dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, |
1300 | errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); | ||
834 | 1301 | ||
835 | if (!chan->flush_on_fsync || | 1302 | if (!chan->flush_on_fsync || |
836 | (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { | 1303 | (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { |
837 | dev_err(chan->dev, | 1304 | dev_err(chan->dev, |
838 | "Channel %p has errors %x, cdr %x tdr %x\n", | 1305 | "Channel %p has errors %x, cdr %x tdr %x\n", |
839 | chan, errors, | 1306 | chan, errors, |
840 | vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), | 1307 | dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), |
841 | vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); | 1308 | dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); |
842 | chan->err = true; | 1309 | chan->err = true; |
843 | } | 1310 | } |
844 | } | 1311 | } |
845 | 1312 | ||
846 | if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { | 1313 | if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { |
847 | /* | 1314 | /* |
848 | * Device takes too long to do the transfer when user requires | 1315 | * Device takes too long to do the transfer when user requires |
849 | * responsiveness. | 1316 | * responsiveness. |
@@ -851,10 +1318,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
851 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); | 1318 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); |
852 | } | 1319 | } |
853 | 1320 | ||
854 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { | 1321 | if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { |
855 | spin_lock(&chan->lock); | 1322 | spin_lock(&chan->lock); |
856 | xilinx_vdma_complete_descriptor(chan); | 1323 | xilinx_dma_complete_descriptor(chan); |
857 | xilinx_vdma_start_transfer(chan); | 1324 | chan->start_transfer(chan); |
858 | spin_unlock(&chan->lock); | 1325 | spin_unlock(&chan->lock); |
859 | } | 1326 | } |
860 | 1327 | ||
@@ -867,11 +1334,13 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
867 | * @chan: Driver specific dma channel | 1334 | * @chan: Driver specific dma channel |
868 | * @desc: dma transaction descriptor | 1335 | * @desc: dma transaction descriptor |
869 | */ | 1336 | */ |
870 | static void append_desc_queue(struct xilinx_vdma_chan *chan, | 1337 | static void append_desc_queue(struct xilinx_dma_chan *chan, |
871 | struct xilinx_vdma_tx_descriptor *desc) | 1338 | struct xilinx_dma_tx_descriptor *desc) |
872 | { | 1339 | { |
873 | struct xilinx_vdma_tx_segment *tail_segment; | 1340 | struct xilinx_vdma_tx_segment *tail_segment; |
874 | struct xilinx_vdma_tx_descriptor *tail_desc; | 1341 | struct xilinx_dma_tx_descriptor *tail_desc; |
1342 | struct xilinx_axidma_tx_segment *axidma_tail_segment; | ||
1343 | struct xilinx_cdma_tx_segment *cdma_tail_segment; | ||
875 | 1344 | ||
876 | if (list_empty(&chan->pending_list)) | 1345 | if (list_empty(&chan->pending_list)) |
877 | goto append; | 1346 | goto append; |
@@ -881,10 +1350,23 @@ static void append_desc_queue(struct xilinx_vdma_chan *chan, | |||
881 | * that already exists in memory. | 1350 | * that already exists in memory. |
882 | */ | 1351 | */ |
883 | tail_desc = list_last_entry(&chan->pending_list, | 1352 | tail_desc = list_last_entry(&chan->pending_list, |
884 | struct xilinx_vdma_tx_descriptor, node); | 1353 | struct xilinx_dma_tx_descriptor, node); |
885 | tail_segment = list_last_entry(&tail_desc->segments, | 1354 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
886 | struct xilinx_vdma_tx_segment, node); | 1355 | tail_segment = list_last_entry(&tail_desc->segments, |
887 | tail_segment->hw.next_desc = (u32)desc->async_tx.phys; | 1356 | struct xilinx_vdma_tx_segment, |
1357 | node); | ||
1358 | tail_segment->hw.next_desc = (u32)desc->async_tx.phys; | ||
1359 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { | ||
1360 | cdma_tail_segment = list_last_entry(&tail_desc->segments, | ||
1361 | struct xilinx_cdma_tx_segment, | ||
1362 | node); | ||
1363 | cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; | ||
1364 | } else { | ||
1365 | axidma_tail_segment = list_last_entry(&tail_desc->segments, | ||
1366 | struct xilinx_axidma_tx_segment, | ||
1367 | node); | ||
1368 | axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; | ||
1369 | } | ||
888 | 1370 | ||
889 | /* | 1371 | /* |
890 | * Add the software descriptor and all children to the list | 1372 | * Add the software descriptor and all children to the list |
@@ -894,22 +1376,23 @@ append: | |||
894 | list_add_tail(&desc->node, &chan->pending_list); | 1376 | list_add_tail(&desc->node, &chan->pending_list); |
895 | chan->desc_pendingcount++; | 1377 | chan->desc_pendingcount++; |
896 | 1378 | ||
897 | if (unlikely(chan->desc_pendingcount > chan->num_frms)) { | 1379 | if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) |
1380 | && unlikely(chan->desc_pendingcount > chan->num_frms)) { | ||
898 | dev_dbg(chan->dev, "desc pendingcount is too high\n"); | 1381 | dev_dbg(chan->dev, "desc pendingcount is too high\n"); |
899 | chan->desc_pendingcount = chan->num_frms; | 1382 | chan->desc_pendingcount = chan->num_frms; |
900 | } | 1383 | } |
901 | } | 1384 | } |
902 | 1385 | ||
903 | /** | 1386 | /** |
904 | * xilinx_vdma_tx_submit - Submit DMA transaction | 1387 | * xilinx_dma_tx_submit - Submit DMA transaction |
905 | * @tx: Async transaction descriptor | 1388 | * @tx: Async transaction descriptor |
906 | * | 1389 | * |
907 | * Return: cookie value on success and failure value on error | 1390 | * Return: cookie value on success and failure value on error |
908 | */ | 1391 | */ |
909 | static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | 1392 | static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
910 | { | 1393 | { |
911 | struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); | 1394 | struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); |
912 | struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); | 1395 | struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); |
913 | dma_cookie_t cookie; | 1396 | dma_cookie_t cookie; |
914 | unsigned long flags; | 1397 | unsigned long flags; |
915 | int err; | 1398 | int err; |
@@ -919,7 +1402,7 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
919 | * If reset fails, need to hard reset the system. | 1402 | * If reset fails, need to hard reset the system. |
920 | * Channel is no longer functional | 1403 | * Channel is no longer functional |
921 | */ | 1404 | */ |
922 | err = xilinx_vdma_chan_reset(chan); | 1405 | err = xilinx_dma_chan_reset(chan); |
923 | if (err < 0) | 1406 | if (err < 0) |
924 | return err; | 1407 | return err; |
925 | } | 1408 | } |
@@ -950,8 +1433,8 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
950 | struct dma_interleaved_template *xt, | 1433 | struct dma_interleaved_template *xt, |
951 | unsigned long flags) | 1434 | unsigned long flags) |
952 | { | 1435 | { |
953 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 1436 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
954 | struct xilinx_vdma_tx_descriptor *desc; | 1437 | struct xilinx_dma_tx_descriptor *desc; |
955 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; | 1438 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; |
956 | struct xilinx_vdma_desc_hw *hw; | 1439 | struct xilinx_vdma_desc_hw *hw; |
957 | 1440 | ||
@@ -965,12 +1448,12 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
965 | return NULL; | 1448 | return NULL; |
966 | 1449 | ||
967 | /* Allocate a transaction descriptor. */ | 1450 | /* Allocate a transaction descriptor. */ |
968 | desc = xilinx_vdma_alloc_tx_descriptor(chan); | 1451 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
969 | if (!desc) | 1452 | if (!desc) |
970 | return NULL; | 1453 | return NULL; |
971 | 1454 | ||
972 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | 1455 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); |
973 | desc->async_tx.tx_submit = xilinx_vdma_tx_submit; | 1456 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
974 | async_tx_ack(&desc->async_tx); | 1457 | async_tx_ack(&desc->async_tx); |
975 | 1458 | ||
976 | /* Allocate the link descriptor from DMA pool */ | 1459 | /* Allocate the link descriptor from DMA pool */ |
@@ -983,14 +1466,25 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
983 | hw->vsize = xt->numf; | 1466 | hw->vsize = xt->numf; |
984 | hw->hsize = xt->sgl[0].size; | 1467 | hw->hsize = xt->sgl[0].size; |
985 | hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << | 1468 | hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << |
986 | XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; | 1469 | XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; |
987 | hw->stride |= chan->config.frm_dly << | 1470 | hw->stride |= chan->config.frm_dly << |
988 | XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; | 1471 | XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; |
989 | 1472 | ||
990 | if (xt->dir != DMA_MEM_TO_DEV) | 1473 | if (xt->dir != DMA_MEM_TO_DEV) { |
991 | hw->buf_addr = xt->dst_start; | 1474 | if (chan->ext_addr) { |
992 | else | 1475 | hw->buf_addr = lower_32_bits(xt->dst_start); |
993 | hw->buf_addr = xt->src_start; | 1476 | hw->buf_addr_msb = upper_32_bits(xt->dst_start); |
1477 | } else { | ||
1478 | hw->buf_addr = xt->dst_start; | ||
1479 | } | ||
1480 | } else { | ||
1481 | if (chan->ext_addr) { | ||
1482 | hw->buf_addr = lower_32_bits(xt->src_start); | ||
1483 | hw->buf_addr_msb = upper_32_bits(xt->src_start); | ||
1484 | } else { | ||
1485 | hw->buf_addr = xt->src_start; | ||
1486 | } | ||
1487 | } | ||
994 | 1488 | ||
995 | /* Insert the segment into the descriptor segments list. */ | 1489 | /* Insert the segment into the descriptor segments list. */ |
996 | list_add_tail(&segment->node, &desc->segments); | 1490 | list_add_tail(&segment->node, &desc->segments); |
@@ -1005,29 +1499,194 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
1005 | return &desc->async_tx; | 1499 | return &desc->async_tx; |
1006 | 1500 | ||
1007 | error: | 1501 | error: |
1008 | xilinx_vdma_free_tx_descriptor(chan, desc); | 1502 | xilinx_dma_free_tx_descriptor(chan, desc); |
1503 | return NULL; | ||
1504 | } | ||
1505 | |||
1506 | /** | ||
1507 | * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction | ||
1508 | * @dchan: DMA channel | ||
1509 | * @dma_dst: destination address | ||
1510 | * @dma_src: source address | ||
1511 | * @len: transfer length | ||
1512 | * @flags: transfer ack flags | ||
1513 | * | ||
1514 | * Return: Async transaction descriptor on success and NULL on failure | ||
1515 | */ | ||
1516 | static struct dma_async_tx_descriptor * | ||
1517 | xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | ||
1518 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
1519 | { | ||
1520 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); | ||
1521 | struct xilinx_dma_tx_descriptor *desc; | ||
1522 | struct xilinx_cdma_tx_segment *segment, *prev; | ||
1523 | struct xilinx_cdma_desc_hw *hw; | ||
1524 | |||
1525 | if (!len || len > XILINX_DMA_MAX_TRANS_LEN) | ||
1526 | return NULL; | ||
1527 | |||
1528 | desc = xilinx_dma_alloc_tx_descriptor(chan); | ||
1529 | if (!desc) | ||
1530 | return NULL; | ||
1531 | |||
1532 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | ||
1533 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; | ||
1534 | |||
1535 | /* Allocate the link descriptor from DMA pool */ | ||
1536 | segment = xilinx_cdma_alloc_tx_segment(chan); | ||
1537 | if (!segment) | ||
1538 | goto error; | ||
1539 | |||
1540 | hw = &segment->hw; | ||
1541 | hw->control = len; | ||
1542 | hw->src_addr = dma_src; | ||
1543 | hw->dest_addr = dma_dst; | ||
1544 | |||
1545 | /* Fill the previous next descriptor with current */ | ||
1546 | prev = list_last_entry(&desc->segments, | ||
1547 | struct xilinx_cdma_tx_segment, node); | ||
1548 | prev->hw.next_desc = segment->phys; | ||
1549 | |||
1550 | /* Insert the segment into the descriptor segments list. */ | ||
1551 | list_add_tail(&segment->node, &desc->segments); | ||
1552 | |||
1553 | prev = segment; | ||
1554 | |||
1555 | /* Link the last hardware descriptor with the first. */ | ||
1556 | segment = list_first_entry(&desc->segments, | ||
1557 | struct xilinx_cdma_tx_segment, node); | ||
1558 | desc->async_tx.phys = segment->phys; | ||
1559 | prev->hw.next_desc = segment->phys; | ||
1560 | |||
1561 | return &desc->async_tx; | ||
1562 | |||
1563 | error: | ||
1564 | xilinx_dma_free_tx_descriptor(chan, desc); | ||
1565 | return NULL; | ||
1566 | } | ||
1567 | |||
1568 | /** | ||
1569 | * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
1570 | * @dchan: DMA channel | ||
1571 | * @sgl: scatterlist to transfer to/from | ||
1572 | * @sg_len: number of entries in @scatterlist | ||
1573 | * @direction: DMA direction | ||
1574 | * @flags: transfer ack flags | ||
1575 | * @context: APP words of the descriptor | ||
1576 | * | ||
1577 | * Return: Async transaction descriptor on success and NULL on failure | ||
1578 | */ | ||
1579 | static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( | ||
1580 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
1581 | enum dma_transfer_direction direction, unsigned long flags, | ||
1582 | void *context) | ||
1583 | { | ||
1584 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); | ||
1585 | struct xilinx_dma_tx_descriptor *desc; | ||
1586 | struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; | ||
1587 | u32 *app_w = (u32 *)context; | ||
1588 | struct scatterlist *sg; | ||
1589 | size_t copy; | ||
1590 | size_t sg_used; | ||
1591 | unsigned int i; | ||
1592 | |||
1593 | if (!is_slave_direction(direction)) | ||
1594 | return NULL; | ||
1595 | |||
1596 | /* Allocate a transaction descriptor. */ | ||
1597 | desc = xilinx_dma_alloc_tx_descriptor(chan); | ||
1598 | if (!desc) | ||
1599 | return NULL; | ||
1600 | |||
1601 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | ||
1602 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; | ||
1603 | |||
1604 | /* Build transactions using information in the scatter gather list */ | ||
1605 | for_each_sg(sgl, sg, sg_len, i) { | ||
1606 | sg_used = 0; | ||
1607 | |||
1608 | /* Loop until the entire scatterlist entry is used */ | ||
1609 | while (sg_used < sg_dma_len(sg)) { | ||
1610 | struct xilinx_axidma_desc_hw *hw; | ||
1611 | |||
1612 | /* Get a free segment */ | ||
1613 | segment = xilinx_axidma_alloc_tx_segment(chan); | ||
1614 | if (!segment) | ||
1615 | goto error; | ||
1616 | |||
1617 | /* | ||
1618 | * Calculate the maximum number of bytes to transfer, | ||
1619 | * making sure it is less than the hw limit | ||
1620 | */ | ||
1621 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | ||
1622 | XILINX_DMA_MAX_TRANS_LEN); | ||
1623 | hw = &segment->hw; | ||
1624 | |||
1625 | /* Fill in the descriptor */ | ||
1626 | hw->buf_addr = sg_dma_address(sg) + sg_used; | ||
1627 | |||
1628 | hw->control = copy; | ||
1629 | |||
1630 | if (chan->direction == DMA_MEM_TO_DEV) { | ||
1631 | if (app_w) | ||
1632 | memcpy(hw->app, app_w, sizeof(u32) * | ||
1633 | XILINX_DMA_NUM_APP_WORDS); | ||
1634 | } | ||
1635 | |||
1636 | if (prev) | ||
1637 | prev->hw.next_desc = segment->phys; | ||
1638 | |||
1639 | prev = segment; | ||
1640 | sg_used += copy; | ||
1641 | |||
1642 | /* | ||
1643 | * Insert the segment into the descriptor segments | ||
1644 | * list. | ||
1645 | */ | ||
1646 | list_add_tail(&segment->node, &desc->segments); | ||
1647 | } | ||
1648 | } | ||
1649 | |||
1650 | segment = list_first_entry(&desc->segments, | ||
1651 | struct xilinx_axidma_tx_segment, node); | ||
1652 | desc->async_tx.phys = segment->phys; | ||
1653 | prev->hw.next_desc = segment->phys; | ||
1654 | |||
1655 | /* For the last DMA_MEM_TO_DEV transfer, set EOP */ | ||
1656 | if (chan->direction == DMA_MEM_TO_DEV) { | ||
1657 | segment->hw.control |= XILINX_DMA_BD_SOP; | ||
1658 | segment = list_last_entry(&desc->segments, | ||
1659 | struct xilinx_axidma_tx_segment, | ||
1660 | node); | ||
1661 | segment->hw.control |= XILINX_DMA_BD_EOP; | ||
1662 | } | ||
1663 | |||
1664 | return &desc->async_tx; | ||
1665 | |||
1666 | error: | ||
1667 | xilinx_dma_free_tx_descriptor(chan, desc); | ||
1009 | return NULL; | 1668 | return NULL; |
1010 | } | 1669 | } |
1011 | 1670 | ||
1012 | /** | 1671 | /** |
1013 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors | 1672 | * xilinx_dma_terminate_all - Halt the channel and free descriptors |
1014 | * @chan: Driver specific VDMA Channel pointer | 1673 | * @chan: Driver specific DMA Channel pointer |
1015 | */ | 1674 | */ |
1016 | static int xilinx_vdma_terminate_all(struct dma_chan *dchan) | 1675 | static int xilinx_dma_terminate_all(struct dma_chan *dchan) |
1017 | { | 1676 | { |
1018 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 1677 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
1019 | 1678 | ||
1020 | /* Halt the DMA engine */ | 1679 | /* Halt the DMA engine */ |
1021 | xilinx_vdma_halt(chan); | 1680 | xilinx_dma_halt(chan); |
1022 | 1681 | ||
1023 | /* Remove and free all of the descriptors in the lists */ | 1682 | /* Remove and free all of the descriptors in the lists */ |
1024 | xilinx_vdma_free_descriptors(chan); | 1683 | xilinx_dma_free_descriptors(chan); |
1025 | 1684 | ||
1026 | return 0; | 1685 | return 0; |
1027 | } | 1686 | } |
1028 | 1687 | ||
1029 | /** | 1688 | /** |
1030 | * xilinx_vdma_channel_set_config - Configure VDMA channel | 1689 | * xilinx_dma_channel_set_config - Configure VDMA channel |
1031 | * Run-time configuration for Axi VDMA, supports: | 1690 | * Run-time configuration for Axi VDMA, supports: |
1032 | * . halt the channel | 1691 | * . halt the channel |
1033 | * . configure interrupt coalescing and inter-packet delay threshold | 1692 | * . configure interrupt coalescing and inter-packet delay threshold |
@@ -1042,13 +1701,13 @@ static int xilinx_vdma_terminate_all(struct dma_chan *dchan) | |||
1042 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | 1701 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, |
1043 | struct xilinx_vdma_config *cfg) | 1702 | struct xilinx_vdma_config *cfg) |
1044 | { | 1703 | { |
1045 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 1704 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
1046 | u32 dmacr; | 1705 | u32 dmacr; |
1047 | 1706 | ||
1048 | if (cfg->reset) | 1707 | if (cfg->reset) |
1049 | return xilinx_vdma_chan_reset(chan); | 1708 | return xilinx_dma_chan_reset(chan); |
1050 | 1709 | ||
1051 | dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | 1710 | dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
1052 | 1711 | ||
1053 | chan->config.frm_dly = cfg->frm_dly; | 1712 | chan->config.frm_dly = cfg->frm_dly; |
1054 | chan->config.park = cfg->park; | 1713 | chan->config.park = cfg->park; |
@@ -1058,8 +1717,8 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | |||
1058 | chan->config.master = cfg->master; | 1717 | chan->config.master = cfg->master; |
1059 | 1718 | ||
1060 | if (cfg->gen_lock && chan->genlock) { | 1719 | if (cfg->gen_lock && chan->genlock) { |
1061 | dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; | 1720 | dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; |
1062 | dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; | 1721 | dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; |
1063 | } | 1722 | } |
1064 | 1723 | ||
1065 | chan->config.frm_cnt_en = cfg->frm_cnt_en; | 1724 | chan->config.frm_cnt_en = cfg->frm_cnt_en; |
@@ -1071,21 +1730,21 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | |||
1071 | chan->config.coalesc = cfg->coalesc; | 1730 | chan->config.coalesc = cfg->coalesc; |
1072 | chan->config.delay = cfg->delay; | 1731 | chan->config.delay = cfg->delay; |
1073 | 1732 | ||
1074 | if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { | 1733 | if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { |
1075 | dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; | 1734 | dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; |
1076 | chan->config.coalesc = cfg->coalesc; | 1735 | chan->config.coalesc = cfg->coalesc; |
1077 | } | 1736 | } |
1078 | 1737 | ||
1079 | if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { | 1738 | if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { |
1080 | dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; | 1739 | dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; |
1081 | chan->config.delay = cfg->delay; | 1740 | chan->config.delay = cfg->delay; |
1082 | } | 1741 | } |
1083 | 1742 | ||
1084 | /* FSync Source selection */ | 1743 | /* FSync Source selection */ |
1085 | dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; | 1744 | dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; |
1086 | dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; | 1745 | dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; |
1087 | 1746 | ||
1088 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); | 1747 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); |
1089 | 1748 | ||
1090 | return 0; | 1749 | return 0; |
1091 | } | 1750 | } |
@@ -1096,14 +1755,14 @@ EXPORT_SYMBOL(xilinx_vdma_channel_set_config); | |||
1096 | */ | 1755 | */ |
1097 | 1756 | ||
1098 | /** | 1757 | /** |
1099 | * xilinx_vdma_chan_remove - Per Channel remove function | 1758 | * xilinx_dma_chan_remove - Per Channel remove function |
1100 | * @chan: Driver specific VDMA channel | 1759 | * @chan: Driver specific DMA channel |
1101 | */ | 1760 | */ |
1102 | static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | 1761 | static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) |
1103 | { | 1762 | { |
1104 | /* Disable all interrupts */ | 1763 | /* Disable all interrupts */ |
1105 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, | 1764 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, |
1106 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | 1765 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
1107 | 1766 | ||
1108 | if (chan->irq > 0) | 1767 | if (chan->irq > 0) |
1109 | free_irq(chan->irq, chan); | 1768 | free_irq(chan->irq, chan); |
@@ -1113,8 +1772,197 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | |||
1113 | list_del(&chan->common.device_node); | 1772 | list_del(&chan->common.device_node); |
1114 | } | 1773 | } |
1115 | 1774 | ||
1775 | static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | ||
1776 | struct clk **tx_clk, struct clk **rx_clk, | ||
1777 | struct clk **sg_clk, struct clk **tmp_clk) | ||
1778 | { | ||
1779 | int err; | ||
1780 | |||
1781 | *tmp_clk = NULL; | ||
1782 | |||
1783 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); | ||
1784 | if (IS_ERR(*axi_clk)) { | ||
1785 | err = PTR_ERR(*axi_clk); | ||
1786 | dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); | ||
1787 | return err; | ||
1788 | } | ||
1789 | |||
1790 | *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); | ||
1791 | if (IS_ERR(*tx_clk)) | ||
1792 | *tx_clk = NULL; | ||
1793 | |||
1794 | *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); | ||
1795 | if (IS_ERR(*rx_clk)) | ||
1796 | *rx_clk = NULL; | ||
1797 | |||
1798 | *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); | ||
1799 | if (IS_ERR(*sg_clk)) | ||
1800 | *sg_clk = NULL; | ||
1801 | |||
1802 | err = clk_prepare_enable(*axi_clk); | ||
1803 | if (err) { | ||
1804 | dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); | ||
1805 | return err; | ||
1806 | } | ||
1807 | |||
1808 | err = clk_prepare_enable(*tx_clk); | ||
1809 | if (err) { | ||
1810 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); | ||
1811 | goto err_disable_axiclk; | ||
1812 | } | ||
1813 | |||
1814 | err = clk_prepare_enable(*rx_clk); | ||
1815 | if (err) { | ||
1816 | dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); | ||
1817 | goto err_disable_txclk; | ||
1818 | } | ||
1819 | |||
1820 | err = clk_prepare_enable(*sg_clk); | ||
1821 | if (err) { | ||
1822 | dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); | ||
1823 | goto err_disable_rxclk; | ||
1824 | } | ||
1825 | |||
1826 | return 0; | ||
1827 | |||
1828 | err_disable_rxclk: | ||
1829 | clk_disable_unprepare(*rx_clk); | ||
1830 | err_disable_txclk: | ||
1831 | clk_disable_unprepare(*tx_clk); | ||
1832 | err_disable_axiclk: | ||
1833 | clk_disable_unprepare(*axi_clk); | ||
1834 | |||
1835 | return err; | ||
1836 | } | ||
1837 | |||
1838 | static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | ||
1839 | struct clk **dev_clk, struct clk **tmp_clk, | ||
1840 | struct clk **tmp1_clk, struct clk **tmp2_clk) | ||
1841 | { | ||
1842 | int err; | ||
1843 | |||
1844 | *tmp_clk = NULL; | ||
1845 | *tmp1_clk = NULL; | ||
1846 | *tmp2_clk = NULL; | ||
1847 | |||
1848 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); | ||
1849 | if (IS_ERR(*axi_clk)) { | ||
1850 | err = PTR_ERR(*axi_clk); | ||
1851 | dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); | ||
1852 | return err; | ||
1853 | } | ||
1854 | |||
1855 | *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); | ||
1856 | if (IS_ERR(*dev_clk)) { | ||
1857 | err = PTR_ERR(*dev_clk); | ||
1858 | dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); | ||
1859 | return err; | ||
1860 | } | ||
1861 | |||
1862 | err = clk_prepare_enable(*axi_clk); | ||
1863 | if (err) { | ||
1864 | dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); | ||
1865 | return err; | ||
1866 | } | ||
1867 | |||
1868 | err = clk_prepare_enable(*dev_clk); | ||
1869 | if (err) { | ||
1870 | dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); | ||
1871 | goto err_disable_axiclk; | ||
1872 | } | ||
1873 | |||
1874 | return 0; | ||
1875 | |||
1876 | err_disable_axiclk: | ||
1877 | clk_disable_unprepare(*axi_clk); | ||
1878 | |||
1879 | return err; | ||
1880 | } | ||
1881 | |||
1882 | static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, | ||
1883 | struct clk **tx_clk, struct clk **txs_clk, | ||
1884 | struct clk **rx_clk, struct clk **rxs_clk) | ||
1885 | { | ||
1886 | int err; | ||
1887 | |||
1888 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); | ||
1889 | if (IS_ERR(*axi_clk)) { | ||
1890 | err = PTR_ERR(*axi_clk); | ||
1891 | dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); | ||
1892 | return err; | ||
1893 | } | ||
1894 | |||
1895 | *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); | ||
1896 | if (IS_ERR(*tx_clk)) | ||
1897 | *tx_clk = NULL; | ||
1898 | |||
1899 | *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); | ||
1900 | if (IS_ERR(*txs_clk)) | ||
1901 | *txs_clk = NULL; | ||
1902 | |||
1903 | *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); | ||
1904 | if (IS_ERR(*rx_clk)) | ||
1905 | *rx_clk = NULL; | ||
1906 | |||
1907 | *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); | ||
1908 | if (IS_ERR(*rxs_clk)) | ||
1909 | *rxs_clk = NULL; | ||
1910 | |||
1911 | err = clk_prepare_enable(*axi_clk); | ||
1912 | if (err) { | ||
1913 | dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); | ||
1914 | return err; | ||
1915 | } | ||
1916 | |||
1917 | err = clk_prepare_enable(*tx_clk); | ||
1918 | if (err) { | ||
1919 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); | ||
1920 | goto err_disable_axiclk; | ||
1921 | } | ||
1922 | |||
1923 | err = clk_prepare_enable(*txs_clk); | ||
1924 | if (err) { | ||
1925 | dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); | ||
1926 | goto err_disable_txclk; | ||
1927 | } | ||
1928 | |||
1929 | err = clk_prepare_enable(*rx_clk); | ||
1930 | if (err) { | ||
1931 | dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); | ||
1932 | goto err_disable_txsclk; | ||
1933 | } | ||
1934 | |||
1935 | err = clk_prepare_enable(*rxs_clk); | ||
1936 | if (err) { | ||
1937 | dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); | ||
1938 | goto err_disable_rxclk; | ||
1939 | } | ||
1940 | |||
1941 | return 0; | ||
1942 | |||
1943 | err_disable_rxclk: | ||
1944 | clk_disable_unprepare(*rx_clk); | ||
1945 | err_disable_txsclk: | ||
1946 | clk_disable_unprepare(*txs_clk); | ||
1947 | err_disable_txclk: | ||
1948 | clk_disable_unprepare(*tx_clk); | ||
1949 | err_disable_axiclk: | ||
1950 | clk_disable_unprepare(*axi_clk); | ||
1951 | |||
1952 | return err; | ||
1953 | } | ||
1954 | |||
1955 | static void xdma_disable_allclks(struct xilinx_dma_device *xdev) | ||
1956 | { | ||
1957 | clk_disable_unprepare(xdev->rxs_clk); | ||
1958 | clk_disable_unprepare(xdev->rx_clk); | ||
1959 | clk_disable_unprepare(xdev->txs_clk); | ||
1960 | clk_disable_unprepare(xdev->tx_clk); | ||
1961 | clk_disable_unprepare(xdev->axi_clk); | ||
1962 | } | ||
1963 | |||
1116 | /** | 1964 | /** |
1117 | * xilinx_vdma_chan_probe - Per Channel Probing | 1965 | * xilinx_dma_chan_probe - Per Channel Probing |
1118 | * It get channel features from the device tree entry and | 1966 | * It get channel features from the device tree entry and |
1119 | * initialize special channel handling routines | 1967 | * initialize special channel handling routines |
1120 | * | 1968 | * |
@@ -1123,10 +1971,10 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | |||
1123 | * | 1971 | * |
1124 | * Return: '0' on success and failure value on error | 1972 | * Return: '0' on success and failure value on error |
1125 | */ | 1973 | */ |
1126 | static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | 1974 | static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
1127 | struct device_node *node) | 1975 | struct device_node *node) |
1128 | { | 1976 | { |
1129 | struct xilinx_vdma_chan *chan; | 1977 | struct xilinx_dma_chan *chan; |
1130 | bool has_dre = false; | 1978 | bool has_dre = false; |
1131 | u32 value, width; | 1979 | u32 value, width; |
1132 | int err; | 1980 | int err; |
@@ -1140,6 +1988,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1140 | chan->xdev = xdev; | 1988 | chan->xdev = xdev; |
1141 | chan->has_sg = xdev->has_sg; | 1989 | chan->has_sg = xdev->has_sg; |
1142 | chan->desc_pendingcount = 0x0; | 1990 | chan->desc_pendingcount = 0x0; |
1991 | chan->ext_addr = xdev->ext_addr; | ||
1143 | 1992 | ||
1144 | spin_lock_init(&chan->lock); | 1993 | spin_lock_init(&chan->lock); |
1145 | INIT_LIST_HEAD(&chan->pending_list); | 1994 | INIT_LIST_HEAD(&chan->pending_list); |
@@ -1169,23 +2018,27 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1169 | chan->direction = DMA_MEM_TO_DEV; | 2018 | chan->direction = DMA_MEM_TO_DEV; |
1170 | chan->id = 0; | 2019 | chan->id = 0; |
1171 | 2020 | ||
1172 | chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; | 2021 | chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; |
1173 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; | 2022 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
2023 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; | ||
1174 | 2024 | ||
1175 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | 2025 | if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || |
1176 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) | 2026 | xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) |
1177 | chan->flush_on_fsync = true; | 2027 | chan->flush_on_fsync = true; |
2028 | } | ||
1178 | } else if (of_device_is_compatible(node, | 2029 | } else if (of_device_is_compatible(node, |
1179 | "xlnx,axi-vdma-s2mm-channel")) { | 2030 | "xlnx,axi-vdma-s2mm-channel")) { |
1180 | chan->direction = DMA_DEV_TO_MEM; | 2031 | chan->direction = DMA_DEV_TO_MEM; |
1181 | chan->id = 1; | 2032 | chan->id = 1; |
1182 | 2033 | ||
1183 | chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; | 2034 | chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; |
1184 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; | 2035 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
2036 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; | ||
1185 | 2037 | ||
1186 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | 2038 | if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || |
1187 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) | 2039 | xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) |
1188 | chan->flush_on_fsync = true; | 2040 | chan->flush_on_fsync = true; |
2041 | } | ||
1189 | } else { | 2042 | } else { |
1190 | dev_err(xdev->dev, "Invalid channel compatible node\n"); | 2043 | dev_err(xdev->dev, "Invalid channel compatible node\n"); |
1191 | return -EINVAL; | 2044 | return -EINVAL; |
@@ -1193,15 +2046,22 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1193 | 2046 | ||
1194 | /* Request the interrupt */ | 2047 | /* Request the interrupt */ |
1195 | chan->irq = irq_of_parse_and_map(node, 0); | 2048 | chan->irq = irq_of_parse_and_map(node, 0); |
1196 | err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, | 2049 | err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, |
1197 | "xilinx-vdma-controller", chan); | 2050 | "xilinx-dma-controller", chan); |
1198 | if (err) { | 2051 | if (err) { |
1199 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); | 2052 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); |
1200 | return err; | 2053 | return err; |
1201 | } | 2054 | } |
1202 | 2055 | ||
2056 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) | ||
2057 | chan->start_transfer = xilinx_dma_start_transfer; | ||
2058 | else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) | ||
2059 | chan->start_transfer = xilinx_cdma_start_transfer; | ||
2060 | else | ||
2061 | chan->start_transfer = xilinx_vdma_start_transfer; | ||
2062 | |||
1203 | /* Initialize the tasklet */ | 2063 | /* Initialize the tasklet */ |
1204 | tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, | 2064 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, |
1205 | (unsigned long)chan); | 2065 | (unsigned long)chan); |
1206 | 2066 | ||
1207 | /* | 2067 | /* |
@@ -1214,7 +2074,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1214 | xdev->chan[chan->id] = chan; | 2074 | xdev->chan[chan->id] = chan; |
1215 | 2075 | ||
1216 | /* Reset the channel */ | 2076 | /* Reset the channel */ |
1217 | err = xilinx_vdma_chan_reset(chan); | 2077 | err = xilinx_dma_chan_reset(chan); |
1218 | if (err < 0) { | 2078 | if (err < 0) { |
1219 | dev_err(xdev->dev, "Reset channel failed\n"); | 2079 | dev_err(xdev->dev, "Reset channel failed\n"); |
1220 | return err; | 2080 | return err; |
@@ -1233,28 +2093,54 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1233 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | 2093 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, |
1234 | struct of_dma *ofdma) | 2094 | struct of_dma *ofdma) |
1235 | { | 2095 | { |
1236 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; | 2096 | struct xilinx_dma_device *xdev = ofdma->of_dma_data; |
1237 | int chan_id = dma_spec->args[0]; | 2097 | int chan_id = dma_spec->args[0]; |
1238 | 2098 | ||
1239 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) | 2099 | if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) |
1240 | return NULL; | 2100 | return NULL; |
1241 | 2101 | ||
1242 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); | 2102 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); |
1243 | } | 2103 | } |
1244 | 2104 | ||
2105 | static const struct xilinx_dma_config axidma_config = { | ||
2106 | .dmatype = XDMA_TYPE_AXIDMA, | ||
2107 | .clk_init = axidma_clk_init, | ||
2108 | }; | ||
2109 | |||
2110 | static const struct xilinx_dma_config axicdma_config = { | ||
2111 | .dmatype = XDMA_TYPE_CDMA, | ||
2112 | .clk_init = axicdma_clk_init, | ||
2113 | }; | ||
2114 | |||
2115 | static const struct xilinx_dma_config axivdma_config = { | ||
2116 | .dmatype = XDMA_TYPE_VDMA, | ||
2117 | .clk_init = axivdma_clk_init, | ||
2118 | }; | ||
2119 | |||
2120 | static const struct of_device_id xilinx_dma_of_ids[] = { | ||
2121 | { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, | ||
2122 | { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, | ||
2123 | { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, | ||
2124 | {} | ||
2125 | }; | ||
2126 | MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); | ||
2127 | |||
1245 | /** | 2128 | /** |
1246 | * xilinx_vdma_probe - Driver probe function | 2129 | * xilinx_dma_probe - Driver probe function |
1247 | * @pdev: Pointer to the platform_device structure | 2130 | * @pdev: Pointer to the platform_device structure |
1248 | * | 2131 | * |
1249 | * Return: '0' on success and failure value on error | 2132 | * Return: '0' on success and failure value on error |
1250 | */ | 2133 | */ |
1251 | static int xilinx_vdma_probe(struct platform_device *pdev) | 2134 | static int xilinx_dma_probe(struct platform_device *pdev) |
1252 | { | 2135 | { |
2136 | int (*clk_init)(struct platform_device *, struct clk **, struct clk **, | ||
2137 | struct clk **, struct clk **, struct clk **) | ||
2138 | = axivdma_clk_init; | ||
1253 | struct device_node *node = pdev->dev.of_node; | 2139 | struct device_node *node = pdev->dev.of_node; |
1254 | struct xilinx_vdma_device *xdev; | 2140 | struct xilinx_dma_device *xdev; |
1255 | struct device_node *child; | 2141 | struct device_node *child, *np = pdev->dev.of_node; |
1256 | struct resource *io; | 2142 | struct resource *io; |
1257 | u32 num_frames; | 2143 | u32 num_frames, addr_width; |
1258 | int i, err; | 2144 | int i, err; |
1259 | 2145 | ||
1260 | /* Allocate and initialize the DMA engine structure */ | 2146 | /* Allocate and initialize the DMA engine structure */ |
@@ -1263,6 +2149,20 @@ static int xilinx_vdma_probe(struct platform_device *pdev) | |||
1263 | return -ENOMEM; | 2149 | return -ENOMEM; |
1264 | 2150 | ||
1265 | xdev->dev = &pdev->dev; | 2151 | xdev->dev = &pdev->dev; |
2152 | if (np) { | ||
2153 | const struct of_device_id *match; | ||
2154 | |||
2155 | match = of_match_node(xilinx_dma_of_ids, np); | ||
2156 | if (match && match->data) { | ||
2157 | xdev->dma_config = match->data; | ||
2158 | clk_init = xdev->dma_config->clk_init; | ||
2159 | } | ||
2160 | } | ||
2161 | |||
2162 | err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, | ||
2163 | &xdev->rx_clk, &xdev->rxs_clk); | ||
2164 | if (err) | ||
2165 | return err; | ||
1266 | 2166 | ||
1267 | /* Request and map I/O memory */ | 2167 | /* Request and map I/O memory */ |
1268 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2168 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -1273,46 +2173,77 @@ static int xilinx_vdma_probe(struct platform_device *pdev) | |||
1273 | /* Retrieve the DMA engine properties from the device tree */ | 2173 | /* Retrieve the DMA engine properties from the device tree */ |
1274 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | 2174 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); |
1275 | 2175 | ||
1276 | err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); | 2176 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
1277 | if (err < 0) { | 2177 | err = of_property_read_u32(node, "xlnx,num-fstores", |
1278 | dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); | 2178 | &num_frames); |
1279 | return err; | 2179 | if (err < 0) { |
2180 | dev_err(xdev->dev, | ||
2181 | "missing xlnx,num-fstores property\n"); | ||
2182 | return err; | ||
2183 | } | ||
2184 | |||
2185 | err = of_property_read_u32(node, "xlnx,flush-fsync", | ||
2186 | &xdev->flush_on_fsync); | ||
2187 | if (err < 0) | ||
2188 | dev_warn(xdev->dev, | ||
2189 | "missing xlnx,flush-fsync property\n"); | ||
1280 | } | 2190 | } |
1281 | 2191 | ||
1282 | err = of_property_read_u32(node, "xlnx,flush-fsync", | 2192 | err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); |
1283 | &xdev->flush_on_fsync); | ||
1284 | if (err < 0) | 2193 | if (err < 0) |
1285 | dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n"); | 2194 | dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); |
2195 | |||
2196 | if (addr_width > 32) | ||
2197 | xdev->ext_addr = true; | ||
2198 | else | ||
2199 | xdev->ext_addr = false; | ||
2200 | |||
2201 | /* Set the dma mask bits */ | ||
2202 | dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); | ||
1286 | 2203 | ||
1287 | /* Initialize the DMA engine */ | 2204 | /* Initialize the DMA engine */ |
1288 | xdev->common.dev = &pdev->dev; | 2205 | xdev->common.dev = &pdev->dev; |
1289 | 2206 | ||
1290 | INIT_LIST_HEAD(&xdev->common.channels); | 2207 | INIT_LIST_HEAD(&xdev->common.channels); |
1291 | dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); | 2208 | if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { |
1292 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); | 2209 | dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); |
2210 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); | ||
2211 | } | ||
1293 | 2212 | ||
1294 | xdev->common.device_alloc_chan_resources = | 2213 | xdev->common.device_alloc_chan_resources = |
1295 | xilinx_vdma_alloc_chan_resources; | 2214 | xilinx_dma_alloc_chan_resources; |
1296 | xdev->common.device_free_chan_resources = | 2215 | xdev->common.device_free_chan_resources = |
1297 | xilinx_vdma_free_chan_resources; | 2216 | xilinx_dma_free_chan_resources; |
1298 | xdev->common.device_prep_interleaved_dma = | 2217 | xdev->common.device_terminate_all = xilinx_dma_terminate_all; |
2218 | xdev->common.device_tx_status = xilinx_dma_tx_status; | ||
2219 | xdev->common.device_issue_pending = xilinx_dma_issue_pending; | ||
2220 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||
2221 | xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; | ||
2222 | /* Residue calculation is supported by only AXI DMA */ | ||
2223 | xdev->common.residue_granularity = | ||
2224 | DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
2225 | } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { | ||
2226 | dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); | ||
2227 | xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; | ||
2228 | } else { | ||
2229 | xdev->common.device_prep_interleaved_dma = | ||
1299 | xilinx_vdma_dma_prep_interleaved; | 2230 | xilinx_vdma_dma_prep_interleaved; |
1300 | xdev->common.device_terminate_all = xilinx_vdma_terminate_all; | 2231 | } |
1301 | xdev->common.device_tx_status = xilinx_vdma_tx_status; | ||
1302 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; | ||
1303 | 2232 | ||
1304 | platform_set_drvdata(pdev, xdev); | 2233 | platform_set_drvdata(pdev, xdev); |
1305 | 2234 | ||
1306 | /* Initialize the channels */ | 2235 | /* Initialize the channels */ |
1307 | for_each_child_of_node(node, child) { | 2236 | for_each_child_of_node(node, child) { |
1308 | err = xilinx_vdma_chan_probe(xdev, child); | 2237 | err = xilinx_dma_chan_probe(xdev, child); |
1309 | if (err < 0) | 2238 | if (err < 0) |
1310 | goto error; | 2239 | goto disable_clks; |
1311 | } | 2240 | } |
1312 | 2241 | ||
1313 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | 2242 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
1314 | if (xdev->chan[i]) | 2243 | for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) |
1315 | xdev->chan[i]->num_frms = num_frames; | 2244 | if (xdev->chan[i]) |
2245 | xdev->chan[i]->num_frms = num_frames; | ||
2246 | } | ||
1316 | 2247 | ||
1317 | /* Register the DMA engine with the core */ | 2248 | /* Register the DMA engine with the core */ |
1318 | dma_async_device_register(&xdev->common); | 2249 | dma_async_device_register(&xdev->common); |
@@ -1329,49 +2260,47 @@ static int xilinx_vdma_probe(struct platform_device *pdev) | |||
1329 | 2260 | ||
1330 | return 0; | 2261 | return 0; |
1331 | 2262 | ||
2263 | disable_clks: | ||
2264 | xdma_disable_allclks(xdev); | ||
1332 | error: | 2265 | error: |
1333 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | 2266 | for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) |
1334 | if (xdev->chan[i]) | 2267 | if (xdev->chan[i]) |
1335 | xilinx_vdma_chan_remove(xdev->chan[i]); | 2268 | xilinx_dma_chan_remove(xdev->chan[i]); |
1336 | 2269 | ||
1337 | return err; | 2270 | return err; |
1338 | } | 2271 | } |
1339 | 2272 | ||
1340 | /** | 2273 | /** |
1341 | * xilinx_vdma_remove - Driver remove function | 2274 | * xilinx_dma_remove - Driver remove function |
1342 | * @pdev: Pointer to the platform_device structure | 2275 | * @pdev: Pointer to the platform_device structure |
1343 | * | 2276 | * |
1344 | * Return: Always '0' | 2277 | * Return: Always '0' |
1345 | */ | 2278 | */ |
1346 | static int xilinx_vdma_remove(struct platform_device *pdev) | 2279 | static int xilinx_dma_remove(struct platform_device *pdev) |
1347 | { | 2280 | { |
1348 | struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); | 2281 | struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); |
1349 | int i; | 2282 | int i; |
1350 | 2283 | ||
1351 | of_dma_controller_free(pdev->dev.of_node); | 2284 | of_dma_controller_free(pdev->dev.of_node); |
1352 | 2285 | ||
1353 | dma_async_device_unregister(&xdev->common); | 2286 | dma_async_device_unregister(&xdev->common); |
1354 | 2287 | ||
1355 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | 2288 | for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) |
1356 | if (xdev->chan[i]) | 2289 | if (xdev->chan[i]) |
1357 | xilinx_vdma_chan_remove(xdev->chan[i]); | 2290 | xilinx_dma_chan_remove(xdev->chan[i]); |
2291 | |||
2292 | xdma_disable_allclks(xdev); | ||
1358 | 2293 | ||
1359 | return 0; | 2294 | return 0; |
1360 | } | 2295 | } |
1361 | 2296 | ||
1362 | static const struct of_device_id xilinx_vdma_of_ids[] = { | ||
1363 | { .compatible = "xlnx,axi-vdma-1.00.a",}, | ||
1364 | {} | ||
1365 | }; | ||
1366 | MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids); | ||
1367 | |||
1368 | static struct platform_driver xilinx_vdma_driver = { | 2297 | static struct platform_driver xilinx_vdma_driver = { |
1369 | .driver = { | 2298 | .driver = { |
1370 | .name = "xilinx-vdma", | 2299 | .name = "xilinx-vdma", |
1371 | .of_match_table = xilinx_vdma_of_ids, | 2300 | .of_match_table = xilinx_dma_of_ids, |
1372 | }, | 2301 | }, |
1373 | .probe = xilinx_vdma_probe, | 2302 | .probe = xilinx_dma_probe, |
1374 | .remove = xilinx_vdma_remove, | 2303 | .remove = xilinx_dma_remove, |
1375 | }; | 2304 | }; |
1376 | 2305 | ||
1377 | module_platform_driver(xilinx_vdma_driver); | 2306 | module_platform_driver(xilinx_vdma_driver); |
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 520ed1dd5780..4fd7f9802f1b 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c | |||
@@ -144,16 +144,16 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, | |||
144 | struct dw_dma_slave *slave = c->tx_param; | 144 | struct dw_dma_slave *slave = c->tx_param; |
145 | 145 | ||
146 | slave->dma_dev = &dma_dev->dev; | 146 | slave->dma_dev = &dma_dev->dev; |
147 | slave->src_master = 1; | 147 | slave->m_master = 0; |
148 | slave->dst_master = 0; | 148 | slave->p_master = 1; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (c->rx_param) { | 151 | if (c->rx_param) { |
152 | struct dw_dma_slave *slave = c->rx_param; | 152 | struct dw_dma_slave *slave = c->rx_param; |
153 | 153 | ||
154 | slave->dma_dev = &dma_dev->dev; | 154 | slave->dma_dev = &dma_dev->dev; |
155 | slave->src_master = 1; | 155 | slave->m_master = 0; |
156 | slave->dst_master = 0; | 156 | slave->p_master = 1; |
157 | } | 157 | } |
158 | 158 | ||
159 | spi_pdata.dma_filter = lpss_dma_filter; | 159 | spi_pdata.dma_filter = lpss_dma_filter; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 98862aa5bb58..5eea74d7f9f4 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -1454,13 +1454,13 @@ byt_serial_setup(struct serial_private *priv, | |||
1454 | return -EINVAL; | 1454 | return -EINVAL; |
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | rx_param->src_master = 1; | 1457 | rx_param->m_master = 0; |
1458 | rx_param->dst_master = 0; | 1458 | rx_param->p_master = 1; |
1459 | 1459 | ||
1460 | dma->rxconf.src_maxburst = 16; | 1460 | dma->rxconf.src_maxburst = 16; |
1461 | 1461 | ||
1462 | tx_param->src_master = 1; | 1462 | tx_param->m_master = 0; |
1463 | tx_param->dst_master = 0; | 1463 | tx_param->p_master = 1; |
1464 | 1464 | ||
1465 | dma->txconf.dst_maxburst = 16; | 1465 | dma->txconf.dst_maxburst = 16; |
1466 | 1466 | ||
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 10fe2a211c2e..27e9ec8778eb 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
@@ -86,7 +86,7 @@ struct pl08x_channel_data { | |||
86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 | 86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 |
87 | */ | 87 | */ |
88 | struct pl08x_platform_data { | 88 | struct pl08x_platform_data { |
89 | const struct pl08x_channel_data *slave_channels; | 89 | struct pl08x_channel_data *slave_channels; |
90 | unsigned int num_slave_channels; | 90 | unsigned int num_slave_channels; |
91 | struct pl08x_channel_data memcpy_channel; | 91 | struct pl08x_channel_data memcpy_channel; |
92 | int (*get_xfer_signal)(const struct pl08x_channel_data *); | 92 | int (*get_xfer_signal)(const struct pl08x_channel_data *); |
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index 71456442ebe3..f2e538aaddad 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h | |||
@@ -27,6 +27,7 @@ struct dw_dma; | |||
27 | * @regs: memory mapped I/O space | 27 | * @regs: memory mapped I/O space |
28 | * @clk: hclk clock | 28 | * @clk: hclk clock |
29 | * @dw: struct dw_dma that is filed by dw_dma_probe() | 29 | * @dw: struct dw_dma that is filed by dw_dma_probe() |
30 | * @pdata: pointer to platform data | ||
30 | */ | 31 | */ |
31 | struct dw_dma_chip { | 32 | struct dw_dma_chip { |
32 | struct device *dev; | 33 | struct device *dev; |
@@ -34,10 +35,12 @@ struct dw_dma_chip { | |||
34 | void __iomem *regs; | 35 | void __iomem *regs; |
35 | struct clk *clk; | 36 | struct clk *clk; |
36 | struct dw_dma *dw; | 37 | struct dw_dma *dw; |
38 | |||
39 | const struct dw_dma_platform_data *pdata; | ||
37 | }; | 40 | }; |
38 | 41 | ||
39 | /* Export to the platform drivers */ | 42 | /* Export to the platform drivers */ |
40 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); | 43 | int dw_dma_probe(struct dw_dma_chip *chip); |
41 | int dw_dma_remove(struct dw_dma_chip *chip); | 44 | int dw_dma_remove(struct dw_dma_chip *chip); |
42 | 45 | ||
43 | /* DMA API extensions */ | 46 | /* DMA API extensions */ |
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h index 34b98f276ed0..3ae300052553 100644 --- a/include/linux/dma/xilinx_dma.h +++ b/include/linux/dma/xilinx_dma.h | |||
@@ -41,6 +41,20 @@ struct xilinx_vdma_config { | |||
41 | int ext_fsync; | 41 | int ext_fsync; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | /** | ||
45 | * enum xdma_ip_type: DMA IP type. | ||
46 | * | ||
47 | * XDMA_TYPE_AXIDMA: Axi dma ip. | ||
48 | * XDMA_TYPE_CDMA: Axi cdma ip. | ||
49 | * XDMA_TYPE_VDMA: Axi vdma ip. | ||
50 | * | ||
51 | */ | ||
52 | enum xdma_ip_type { | ||
53 | XDMA_TYPE_AXIDMA = 0, | ||
54 | XDMA_TYPE_CDMA, | ||
55 | XDMA_TYPE_VDMA, | ||
56 | }; | ||
57 | |||
44 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | 58 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, |
45 | struct xilinx_vdma_config *cfg); | 59 | struct xilinx_vdma_config *cfg); |
46 | 60 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 017433712833..30de0197263a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( | |||
804 | sg_dma_address(&sg) = buf; | 804 | sg_dma_address(&sg) = buf; |
805 | sg_dma_len(&sg) = len; | 805 | sg_dma_len(&sg) = len; |
806 | 806 | ||
807 | if (!chan || !chan->device || !chan->device->device_prep_slave_sg) | ||
808 | return NULL; | ||
809 | |||
807 | return chan->device->device_prep_slave_sg(chan, &sg, 1, | 810 | return chan->device->device_prep_slave_sg(chan, &sg, 1, |
808 | dir, flags, NULL); | 811 | dir, flags, NULL); |
809 | } | 812 | } |
@@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( | |||
812 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 815 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
813 | enum dma_transfer_direction dir, unsigned long flags) | 816 | enum dma_transfer_direction dir, unsigned long flags) |
814 | { | 817 | { |
818 | if (!chan || !chan->device || !chan->device->device_prep_slave_sg) | ||
819 | return NULL; | ||
820 | |||
815 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, | 821 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, |
816 | dir, flags, NULL); | 822 | dir, flags, NULL); |
817 | } | 823 | } |
@@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( | |||
823 | enum dma_transfer_direction dir, unsigned long flags, | 829 | enum dma_transfer_direction dir, unsigned long flags, |
824 | struct rio_dma_ext *rio_ext) | 830 | struct rio_dma_ext *rio_ext) |
825 | { | 831 | { |
832 | if (!chan || !chan->device || !chan->device->device_prep_slave_sg) | ||
833 | return NULL; | ||
834 | |||
826 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, | 835 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, |
827 | dir, flags, rio_ext); | 836 | dir, flags, rio_ext); |
828 | } | 837 | } |
@@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( | |||
833 | size_t period_len, enum dma_transfer_direction dir, | 842 | size_t period_len, enum dma_transfer_direction dir, |
834 | unsigned long flags) | 843 | unsigned long flags) |
835 | { | 844 | { |
845 | if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) | ||
846 | return NULL; | ||
847 | |||
836 | return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, | 848 | return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, |
837 | period_len, dir, flags); | 849 | period_len, dir, flags); |
838 | } | 850 | } |
@@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( | |||
841 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 853 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
842 | unsigned long flags) | 854 | unsigned long flags) |
843 | { | 855 | { |
856 | if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) | ||
857 | return NULL; | ||
858 | |||
844 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); | 859 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); |
845 | } | 860 | } |
846 | 861 | ||
@@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( | |||
848 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 863 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
849 | unsigned long flags) | 864 | unsigned long flags) |
850 | { | 865 | { |
851 | if (!chan || !chan->device) | 866 | if (!chan || !chan->device || !chan->device->device_prep_dma_memset) |
852 | return NULL; | 867 | return NULL; |
853 | 868 | ||
854 | return chan->device->device_prep_dma_memset(chan, dest, value, | 869 | return chan->device->device_prep_dma_memset(chan, dest, value, |
@@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | |||
861 | struct scatterlist *src_sg, unsigned int src_nents, | 876 | struct scatterlist *src_sg, unsigned int src_nents, |
862 | unsigned long flags) | 877 | unsigned long flags) |
863 | { | 878 | { |
879 | if (!chan || !chan->device || !chan->device->device_prep_dma_sg) | ||
880 | return NULL; | ||
881 | |||
864 | return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, | 882 | return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, |
865 | src_sg, src_nents, flags); | 883 | src_sg, src_nents, flags); |
866 | } | 884 | } |
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 03b6095d3b18..d15d8ba8cc24 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
@@ -21,15 +21,15 @@ | |||
21 | * @dma_dev: required DMA master device | 21 | * @dma_dev: required DMA master device |
22 | * @src_id: src request line | 22 | * @src_id: src request line |
23 | * @dst_id: dst request line | 23 | * @dst_id: dst request line |
24 | * @src_master: src master for transfers on allocated channel. | 24 | * @m_master: memory master for transfers on allocated channel |
25 | * @dst_master: dest master for transfers on allocated channel. | 25 | * @p_master: peripheral master for transfers on allocated channel |
26 | */ | 26 | */ |
27 | struct dw_dma_slave { | 27 | struct dw_dma_slave { |
28 | struct device *dma_dev; | 28 | struct device *dma_dev; |
29 | u8 src_id; | 29 | u8 src_id; |
30 | u8 dst_id; | 30 | u8 dst_id; |
31 | u8 src_master; | 31 | u8 m_master; |
32 | u8 dst_master; | 32 | u8 p_master; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | /** | 35 | /** |
@@ -43,7 +43,7 @@ struct dw_dma_slave { | |||
43 | * @block_size: Maximum block size supported by the controller | 43 | * @block_size: Maximum block size supported by the controller |
44 | * @nr_masters: Number of AHB masters supported by the controller | 44 | * @nr_masters: Number of AHB masters supported by the controller |
45 | * @data_width: Maximum data width supported by hardware per AHB master | 45 | * @data_width: Maximum data width supported by hardware per AHB master |
46 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | 46 | * (in bytes, power of 2) |
47 | */ | 47 | */ |
48 | struct dw_dma_platform_data { | 48 | struct dw_dma_platform_data { |
49 | unsigned int nr_channels; | 49 | unsigned int nr_channels; |
@@ -55,7 +55,7 @@ struct dw_dma_platform_data { | |||
55 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | 55 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ |
56 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | 56 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ |
57 | unsigned char chan_priority; | 57 | unsigned char chan_priority; |
58 | unsigned short block_size; | 58 | unsigned int block_size; |
59 | unsigned char nr_masters; | 59 | unsigned char nr_masters; |
60 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; | 60 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; |
61 | }; | 61 | }; |
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c index ef4881e7753a..25993527370b 100644 --- a/sound/soc/intel/common/sst-firmware.c +++ b/sound/soc/intel/common/sst-firmware.c | |||
@@ -203,7 +203,7 @@ static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem, | |||
203 | 203 | ||
204 | chip->dev = dev; | 204 | chip->dev = dev; |
205 | 205 | ||
206 | err = dw_dma_probe(chip, NULL); | 206 | err = dw_dma_probe(chip); |
207 | if (err) | 207 | if (err) |
208 | return ERR_PTR(err); | 208 | return ERR_PTR(err); |
209 | 209 | ||