diff options
44 files changed, 2516 insertions, 687 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-hidma b/Documentation/ABI/testing/sysfs-platform-hidma new file mode 100644 index 000000000000..d36441538660 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-hidma | |||
@@ -0,0 +1,9 @@ | |||
1 | What: /sys/devices/platform/hidma-*/chid | ||
2 | /sys/devices/platform/QCOM8061:*/chid | ||
3 | Date: Dec 2015 | ||
4 | KernelVersion: 4.4 | ||
5 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
6 | Description: | ||
7 | Contains the ID of the channel within the HIDMA instance. | ||
8 | It is used to associate a given HIDMA channel with the | ||
9 | priority and weight calls in the management interface. | ||
diff --git a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt index 1396078d15ac..baf9b34d20bf 100644 --- a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt +++ b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt | |||
@@ -12,6 +12,10 @@ Required properties: | |||
12 | - reg: Should contain DMA registers location and length. | 12 | - reg: Should contain DMA registers location and length. |
13 | - interrupts: Should contain the DMA interrupts associated | 13 | - interrupts: Should contain the DMA interrupts associated |
14 | to the DMA channels in ascending order. | 14 | to the DMA channels in ascending order. |
15 | - interrupt-names: Should contain the names of the interrupt | ||
16 | in the form "dmaXX". | ||
17 | Use "dma-shared-all" for the common interrupt line | ||
18 | that is shared by all dma channels. | ||
15 | - #dma-cells: Must be <1>, the cell in the dmas property of the | 19 | - #dma-cells: Must be <1>, the cell in the dmas property of the |
16 | client device represents the DREQ number. | 20 | client device represents the DREQ number. |
17 | - brcm,dma-channel-mask: Bit mask representing the channels | 21 | - brcm,dma-channel-mask: Bit mask representing the channels |
@@ -34,13 +38,35 @@ dma: dma@7e007000 { | |||
34 | <1 24>, | 38 | <1 24>, |
35 | <1 25>, | 39 | <1 25>, |
36 | <1 26>, | 40 | <1 26>, |
41 | /* dma channel 11-14 share one irq */ | ||
37 | <1 27>, | 42 | <1 27>, |
43 | <1 27>, | ||
44 | <1 27>, | ||
45 | <1 27>, | ||
46 | /* unused shared irq for all channels */ | ||
38 | <1 28>; | 47 | <1 28>; |
48 | interrupt-names = "dma0", | ||
49 | "dma1", | ||
50 | "dma2", | ||
51 | "dma3", | ||
52 | "dma4", | ||
53 | "dma5", | ||
54 | "dma6", | ||
55 | "dma7", | ||
56 | "dma8", | ||
57 | "dma9", | ||
58 | "dma10", | ||
59 | "dma11", | ||
60 | "dma12", | ||
61 | "dma13", | ||
62 | "dma14", | ||
63 | "dma-shared-all"; | ||
39 | 64 | ||
40 | #dma-cells = <1>; | 65 | #dma-cells = <1>; |
41 | brcm,dma-channel-mask = <0x7f35>; | 66 | brcm,dma-channel-mask = <0x7f35>; |
42 | }; | 67 | }; |
43 | 68 | ||
69 | |||
44 | DMA clients connected to the BCM2835 DMA controller must use the format | 70 | DMA clients connected to the BCM2835 DMA controller must use the format |
45 | described in the dma.txt file, using a two-cell specifier for each channel. | 71 | described in the dma.txt file, using a two-cell specifier for each channel. |
46 | 72 | ||
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt index 276ef815ef32..c075f5988135 100644 --- a/Documentation/devicetree/bindings/dma/mv-xor.txt +++ b/Documentation/devicetree/bindings/dma/mv-xor.txt | |||
@@ -1,7 +1,10 @@ | |||
1 | * Marvell XOR engines | 1 | * Marvell XOR engines |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: Should be "marvell,orion-xor" or "marvell,armada-380-xor" | 4 | - compatible: Should be one of the following: |
5 | - "marvell,orion-xor" | ||
6 | - "marvell,armada-380-xor" | ||
7 | - "marvell,armada-3700-xor". | ||
5 | - reg: Should contain registers location and length (two sets) | 8 | - reg: Should contain registers location and length (two sets) |
6 | the first set is the low registers, the second set the high | 9 | the first set is the low registers, the second set the high |
7 | registers for the XOR engine. | 10 | registers for the XOR engine. |
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt index 1c9d48ea4914..9cbf5d9df8fd 100644 --- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt +++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt | |||
@@ -13,6 +13,8 @@ Required properties: | |||
13 | - clock-names: must contain "bam_clk" entry | 13 | - clock-names: must contain "bam_clk" entry |
14 | - qcom,ee : indicates the active Execution Environment identifier (0-7) used in | 14 | - qcom,ee : indicates the active Execution Environment identifier (0-7) used in |
15 | the secure world. | 15 | the secure world. |
16 | - qcom,controlled-remotely : optional, indicates that the bam is controlled by | ||
17 | remote proccessor i.e. execution environment. | ||
16 | 18 | ||
17 | Example: | 19 | Example: |
18 | 20 | ||
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index c261598164a7..0f5583293c9c 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt | |||
@@ -13,6 +13,11 @@ Required properties: | |||
13 | - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1: | 13 | - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1: |
14 | increase from chan n->0 | 14 | increase from chan n->0 |
15 | - block_size: Maximum block size supported by the controller | 15 | - block_size: Maximum block size supported by the controller |
16 | - data-width: Maximum data width supported by hardware per AHB master | ||
17 | (in bytes, power of 2) | ||
18 | |||
19 | |||
20 | Deprecated properties: | ||
16 | - data_width: Maximum data width supported by hardware per AHB master | 21 | - data_width: Maximum data width supported by hardware per AHB master |
17 | (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | 22 | (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) |
18 | 23 | ||
@@ -38,7 +43,7 @@ Example: | |||
38 | chan_allocation_order = <1>; | 43 | chan_allocation_order = <1>; |
39 | chan_priority = <1>; | 44 | chan_priority = <1>; |
40 | block_size = <0xfff>; | 45 | block_size = <0xfff>; |
41 | data_width = <3 3>; | 46 | data-width = <8 8>; |
42 | }; | 47 | }; |
43 | 48 | ||
44 | DMA clients connected to the Designware DMA controller must use the format | 49 | DMA clients connected to the Designware DMA controller must use the format |
@@ -47,8 +52,8 @@ The four cells in order are: | |||
47 | 52 | ||
48 | 1. A phandle pointing to the DMA controller | 53 | 1. A phandle pointing to the DMA controller |
49 | 2. The DMA request line number | 54 | 2. The DMA request line number |
50 | 3. Source master for transfers on allocated channel | 55 | 3. Memory master for transfers on allocated channel |
51 | 4. Destination master for transfers on allocated channel | 56 | 4. Peripheral master for transfers on allocated channel |
52 | 57 | ||
53 | Example: | 58 | Example: |
54 | 59 | ||
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi index cfb5052239a1..2f53bedb0cde 100644 --- a/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi | |||
@@ -112,7 +112,7 @@ | |||
112 | chan_allocation_order = <0>; | 112 | chan_allocation_order = <0>; |
113 | chan_priority = <1>; | 113 | chan_priority = <1>; |
114 | block_size = <0x7ff>; | 114 | block_size = <0x7ff>; |
115 | data_width = <2>; | 115 | data-width = <4>; |
116 | clocks = <&ahb_clk>; | 116 | clocks = <&ahb_clk>; |
117 | clock-names = "hclk"; | 117 | clock-names = "hclk"; |
118 | }; | 118 | }; |
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 8aaf193711bf..84dcf3e5c8d9 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi | |||
@@ -47,9 +47,29 @@ | |||
47 | <1 24>, | 47 | <1 24>, |
48 | <1 25>, | 48 | <1 25>, |
49 | <1 26>, | 49 | <1 26>, |
50 | /* dma channel 11-14 share one irq */ | ||
50 | <1 27>, | 51 | <1 27>, |
52 | <1 27>, | ||
53 | <1 27>, | ||
54 | <1 27>, | ||
55 | /* unused shared irq for all channels */ | ||
51 | <1 28>; | 56 | <1 28>; |
52 | 57 | interrupt-names = "dma0", | |
58 | "dma1", | ||
59 | "dma2", | ||
60 | "dma3", | ||
61 | "dma4", | ||
62 | "dma5", | ||
63 | "dma6", | ||
64 | "dma7", | ||
65 | "dma8", | ||
66 | "dma9", | ||
67 | "dma10", | ||
68 | "dma11", | ||
69 | "dma12", | ||
70 | "dma13", | ||
71 | "dma14", | ||
72 | "dma-shared-all"; | ||
53 | #dma-cells = <1>; | 73 | #dma-cells = <1>; |
54 | brcm,dma-channel-mask = <0x7f35>; | 74 | brcm,dma-channel-mask = <0x7f35>; |
55 | }; | 75 | }; |
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 14594ce8c18a..449acf0d8272 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi | |||
@@ -117,7 +117,7 @@ | |||
117 | chan_priority = <1>; | 117 | chan_priority = <1>; |
118 | block_size = <0xfff>; | 118 | block_size = <0xfff>; |
119 | dma-masters = <2>; | 119 | dma-masters = <2>; |
120 | data_width = <3 3>; | 120 | data-width = <8 8>; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | dma@eb000000 { | 123 | dma@eb000000 { |
@@ -133,7 +133,7 @@ | |||
133 | chan_allocation_order = <1>; | 133 | chan_allocation_order = <1>; |
134 | chan_priority = <1>; | 134 | chan_priority = <1>; |
135 | block_size = <0xfff>; | 135 | block_size = <0xfff>; |
136 | data_width = <3 3>; | 136 | data-width = <8 8>; |
137 | }; | 137 | }; |
138 | 138 | ||
139 | fsmc: flash@b0000000 { | 139 | fsmc: flash@b0000000 { |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index bf445aa48282..00d6dcc1d9b6 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -1365,8 +1365,8 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | |||
1365 | slave->dma_dev = &dw_dmac0_device.dev; | 1365 | slave->dma_dev = &dw_dmac0_device.dev; |
1366 | slave->src_id = 0; | 1366 | slave->src_id = 0; |
1367 | slave->dst_id = 1; | 1367 | slave->dst_id = 1; |
1368 | slave->src_master = 1; | 1368 | slave->m_master = 1; |
1369 | slave->dst_master = 0; | 1369 | slave->p_master = 0; |
1370 | 1370 | ||
1371 | data->dma_slave = slave; | 1371 | data->dma_slave = slave; |
1372 | data->dma_filter = at32_mci_dma_filter; | 1372 | data->dma_filter = at32_mci_dma_filter; |
@@ -2061,16 +2061,16 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, | |||
2061 | if (flags & AC97C_CAPTURE) { | 2061 | if (flags & AC97C_CAPTURE) { |
2062 | rx_dws->dma_dev = &dw_dmac0_device.dev; | 2062 | rx_dws->dma_dev = &dw_dmac0_device.dev; |
2063 | rx_dws->src_id = 3; | 2063 | rx_dws->src_id = 3; |
2064 | rx_dws->src_master = 0; | 2064 | rx_dws->m_master = 0; |
2065 | rx_dws->dst_master = 1; | 2065 | rx_dws->p_master = 1; |
2066 | } | 2066 | } |
2067 | 2067 | ||
2068 | /* Check if DMA slave interface for playback should be configured. */ | 2068 | /* Check if DMA slave interface for playback should be configured. */ |
2069 | if (flags & AC97C_PLAYBACK) { | 2069 | if (flags & AC97C_PLAYBACK) { |
2070 | tx_dws->dma_dev = &dw_dmac0_device.dev; | 2070 | tx_dws->dma_dev = &dw_dmac0_device.dev; |
2071 | tx_dws->dst_id = 4; | 2071 | tx_dws->dst_id = 4; |
2072 | tx_dws->src_master = 0; | 2072 | tx_dws->m_master = 0; |
2073 | tx_dws->dst_master = 1; | 2073 | tx_dws->p_master = 1; |
2074 | } | 2074 | } |
2075 | 2075 | ||
2076 | if (platform_device_add_data(pdev, data, | 2076 | if (platform_device_add_data(pdev, data, |
@@ -2141,8 +2141,8 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) | |||
2141 | 2141 | ||
2142 | dws->dma_dev = &dw_dmac0_device.dev; | 2142 | dws->dma_dev = &dw_dmac0_device.dev; |
2143 | dws->dst_id = 2; | 2143 | dws->dst_id = 2; |
2144 | dws->src_master = 0; | 2144 | dws->m_master = 0; |
2145 | dws->dst_master = 1; | 2145 | dws->p_master = 1; |
2146 | 2146 | ||
2147 | if (platform_device_add_data(pdev, data, | 2147 | if (platform_device_add_data(pdev, data, |
2148 | sizeof(struct atmel_abdac_pdata))) | 2148 | sizeof(struct atmel_abdac_pdata))) |
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 902034991517..2cb6f7e04b5c 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -201,8 +201,8 @@ static struct sata_dwc_host_priv host_pvt; | |||
201 | static struct dw_dma_slave sata_dwc_dma_dws = { | 201 | static struct dw_dma_slave sata_dwc_dma_dws = { |
202 | .src_id = 0, | 202 | .src_id = 0, |
203 | .dst_id = 0, | 203 | .dst_id = 0, |
204 | .src_master = 0, | 204 | .m_master = 1, |
205 | .dst_master = 1, | 205 | .p_master = 0, |
206 | }; | 206 | }; |
207 | 207 | ||
208 | /* | 208 | /* |
@@ -1248,7 +1248,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1248 | hsdev->dma->dev = &ofdev->dev; | 1248 | hsdev->dma->dev = &ofdev->dev; |
1249 | 1249 | ||
1250 | /* Initialize AHB DMAC */ | 1250 | /* Initialize AHB DMAC */ |
1251 | err = dw_dma_probe(hsdev->dma, NULL); | 1251 | err = dw_dma_probe(hsdev->dma); |
1252 | if (err) | 1252 | if (err) |
1253 | goto error_dma_iomap; | 1253 | goto error_dma_iomap; |
1254 | 1254 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d96d87c56f2e..67b37ce94143 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -332,7 +332,7 @@ config MPC512X_DMA | |||
332 | 332 | ||
333 | config MV_XOR | 333 | config MV_XOR |
334 | bool "Marvell XOR engine support" | 334 | bool "Marvell XOR engine support" |
335 | depends on PLAT_ORION | 335 | depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST |
336 | select DMA_ENGINE | 336 | select DMA_ENGINE |
337 | select DMA_ENGINE_RAID | 337 | select DMA_ENGINE_RAID |
338 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 338 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 9b42c0588550..81db1c4811ce 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -107,16 +107,20 @@ struct pl08x_driver_data; | |||
107 | /** | 107 | /** |
108 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives | 108 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives |
109 | * @channels: the number of channels available in this variant | 109 | * @channels: the number of channels available in this variant |
110 | * @signals: the number of request signals available from the hardware | ||
110 | * @dualmaster: whether this version supports dual AHB masters or not. | 111 | * @dualmaster: whether this version supports dual AHB masters or not. |
111 | * @nomadik: whether the channels have Nomadik security extension bits | 112 | * @nomadik: whether the channels have Nomadik security extension bits |
112 | * that need to be checked for permission before use and some registers are | 113 | * that need to be checked for permission before use and some registers are |
113 | * missing | 114 | * missing |
114 | * @pl080s: whether this version is a PL080S, which has separate register and | 115 | * @pl080s: whether this version is a PL080S, which has separate register and |
115 | * LLI word for transfer size. | 116 | * LLI word for transfer size. |
117 | * @max_transfer_size: the maximum single element transfer size for this | ||
118 | * PL08x variant. | ||
116 | */ | 119 | */ |
117 | struct vendor_data { | 120 | struct vendor_data { |
118 | u8 config_offset; | 121 | u8 config_offset; |
119 | u8 channels; | 122 | u8 channels; |
123 | u8 signals; | ||
120 | bool dualmaster; | 124 | bool dualmaster; |
121 | bool nomadik; | 125 | bool nomadik; |
122 | bool pl080s; | 126 | bool pl080s; |
@@ -235,7 +239,7 @@ struct pl08x_dma_chan { | |||
235 | struct virt_dma_chan vc; | 239 | struct virt_dma_chan vc; |
236 | struct pl08x_phy_chan *phychan; | 240 | struct pl08x_phy_chan *phychan; |
237 | const char *name; | 241 | const char *name; |
238 | const struct pl08x_channel_data *cd; | 242 | struct pl08x_channel_data *cd; |
239 | struct dma_slave_config cfg; | 243 | struct dma_slave_config cfg; |
240 | struct pl08x_txd *at; | 244 | struct pl08x_txd *at; |
241 | struct pl08x_driver_data *host; | 245 | struct pl08x_driver_data *host; |
@@ -1909,6 +1913,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1909 | 1913 | ||
1910 | if (slave) { | 1914 | if (slave) { |
1911 | chan->cd = &pl08x->pd->slave_channels[i]; | 1915 | chan->cd = &pl08x->pd->slave_channels[i]; |
1916 | /* | ||
1917 | * Some implementations have muxed signals, whereas some | ||
1918 | * use a mux in front of the signals and need dynamic | ||
1919 | * assignment of signals. | ||
1920 | */ | ||
1921 | chan->signal = i; | ||
1912 | pl08x_dma_slave_init(chan); | 1922 | pl08x_dma_slave_init(chan); |
1913 | } else { | 1923 | } else { |
1914 | chan->cd = &pl08x->pd->memcpy_channel; | 1924 | chan->cd = &pl08x->pd->memcpy_channel; |
@@ -2050,40 +2060,33 @@ static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, | |||
2050 | struct of_dma *ofdma) | 2060 | struct of_dma *ofdma) |
2051 | { | 2061 | { |
2052 | struct pl08x_driver_data *pl08x = ofdma->of_dma_data; | 2062 | struct pl08x_driver_data *pl08x = ofdma->of_dma_data; |
2053 | struct pl08x_channel_data *data; | ||
2054 | struct pl08x_dma_chan *chan; | ||
2055 | struct dma_chan *dma_chan; | 2063 | struct dma_chan *dma_chan; |
2064 | struct pl08x_dma_chan *plchan; | ||
2056 | 2065 | ||
2057 | if (!pl08x) | 2066 | if (!pl08x) |
2058 | return NULL; | 2067 | return NULL; |
2059 | 2068 | ||
2060 | if (dma_spec->args_count != 2) | 2069 | if (dma_spec->args_count != 2) { |
2070 | dev_err(&pl08x->adev->dev, | ||
2071 | "DMA channel translation requires two cells\n"); | ||
2061 | return NULL; | 2072 | return NULL; |
2073 | } | ||
2062 | 2074 | ||
2063 | dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); | 2075 | dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); |
2064 | if (dma_chan) | 2076 | if (!dma_chan) { |
2065 | return dma_get_slave_channel(dma_chan); | 2077 | dev_err(&pl08x->adev->dev, |
2066 | 2078 | "DMA slave channel not found\n"); | |
2067 | chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data), | ||
2068 | GFP_KERNEL); | ||
2069 | if (!chan) | ||
2070 | return NULL; | 2079 | return NULL; |
2080 | } | ||
2071 | 2081 | ||
2072 | data = (void *)&chan[1]; | 2082 | plchan = to_pl08x_chan(dma_chan); |
2073 | data->bus_id = "(none)"; | 2083 | dev_dbg(&pl08x->adev->dev, |
2074 | data->periph_buses = dma_spec->args[1]; | 2084 | "translated channel for signal %d\n", |
2075 | 2085 | dma_spec->args[0]); | |
2076 | chan->cd = data; | ||
2077 | chan->host = pl08x; | ||
2078 | chan->slave = true; | ||
2079 | chan->name = data->bus_id; | ||
2080 | chan->state = PL08X_CHAN_IDLE; | ||
2081 | chan->signal = dma_spec->args[0]; | ||
2082 | chan->vc.desc_free = pl08x_desc_free; | ||
2083 | |||
2084 | vchan_init(&chan->vc, &pl08x->slave); | ||
2085 | 2086 | ||
2086 | return dma_get_slave_channel(&chan->vc.chan); | 2087 | /* Augment channel data for applicable AHB buses */ |
2088 | plchan->cd->periph_buses = dma_spec->args[1]; | ||
2089 | return dma_get_slave_channel(dma_chan); | ||
2087 | } | 2090 | } |
2088 | 2091 | ||
2089 | static int pl08x_of_probe(struct amba_device *adev, | 2092 | static int pl08x_of_probe(struct amba_device *adev, |
@@ -2091,9 +2094,11 @@ static int pl08x_of_probe(struct amba_device *adev, | |||
2091 | struct device_node *np) | 2094 | struct device_node *np) |
2092 | { | 2095 | { |
2093 | struct pl08x_platform_data *pd; | 2096 | struct pl08x_platform_data *pd; |
2097 | struct pl08x_channel_data *chanp = NULL; | ||
2094 | u32 cctl_memcpy = 0; | 2098 | u32 cctl_memcpy = 0; |
2095 | u32 val; | 2099 | u32 val; |
2096 | int ret; | 2100 | int ret; |
2101 | int i; | ||
2097 | 2102 | ||
2098 | pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); | 2103 | pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); |
2099 | if (!pd) | 2104 | if (!pd) |
@@ -2195,6 +2200,27 @@ static int pl08x_of_probe(struct amba_device *adev, | |||
2195 | /* Use the buses that can access memory, obviously */ | 2200 | /* Use the buses that can access memory, obviously */ |
2196 | pd->memcpy_channel.periph_buses = pd->mem_buses; | 2201 | pd->memcpy_channel.periph_buses = pd->mem_buses; |
2197 | 2202 | ||
2203 | /* | ||
2204 | * Allocate channel data for all possible slave channels (one | ||
2205 | * for each possible signal), channels will then be allocated | ||
2206 | * for a device and have it's AHB interfaces set up at | ||
2207 | * translation time. | ||
2208 | */ | ||
2209 | chanp = devm_kcalloc(&adev->dev, | ||
2210 | pl08x->vd->signals, | ||
2211 | sizeof(struct pl08x_channel_data), | ||
2212 | GFP_KERNEL); | ||
2213 | if (!chanp) | ||
2214 | return -ENOMEM; | ||
2215 | |||
2216 | pd->slave_channels = chanp; | ||
2217 | for (i = 0; i < pl08x->vd->signals; i++) { | ||
2218 | /* chanp->periph_buses will be assigned at translation */ | ||
2219 | chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); | ||
2220 | chanp++; | ||
2221 | } | ||
2222 | pd->num_slave_channels = pl08x->vd->signals; | ||
2223 | |||
2198 | pl08x->pd = pd; | 2224 | pl08x->pd = pd; |
2199 | 2225 | ||
2200 | return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, | 2226 | return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, |
@@ -2234,6 +2260,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2234 | goto out_no_pl08x; | 2260 | goto out_no_pl08x; |
2235 | } | 2261 | } |
2236 | 2262 | ||
2263 | /* Assign useful pointers to the driver state */ | ||
2264 | pl08x->adev = adev; | ||
2265 | pl08x->vd = vd; | ||
2266 | |||
2237 | /* Initialize memcpy engine */ | 2267 | /* Initialize memcpy engine */ |
2238 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | 2268 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); |
2239 | pl08x->memcpy.dev = &adev->dev; | 2269 | pl08x->memcpy.dev = &adev->dev; |
@@ -2284,10 +2314,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2284 | } | 2314 | } |
2285 | } | 2315 | } |
2286 | 2316 | ||
2287 | /* Assign useful pointers to the driver state */ | ||
2288 | pl08x->adev = adev; | ||
2289 | pl08x->vd = vd; | ||
2290 | |||
2291 | /* By default, AHB1 only. If dualmaster, from platform */ | 2317 | /* By default, AHB1 only. If dualmaster, from platform */ |
2292 | pl08x->lli_buses = PL08X_AHB1; | 2318 | pl08x->lli_buses = PL08X_AHB1; |
2293 | pl08x->mem_buses = PL08X_AHB1; | 2319 | pl08x->mem_buses = PL08X_AHB1; |
@@ -2438,6 +2464,7 @@ out_no_pl08x: | |||
2438 | static struct vendor_data vendor_pl080 = { | 2464 | static struct vendor_data vendor_pl080 = { |
2439 | .config_offset = PL080_CH_CONFIG, | 2465 | .config_offset = PL080_CH_CONFIG, |
2440 | .channels = 8, | 2466 | .channels = 8, |
2467 | .signals = 16, | ||
2441 | .dualmaster = true, | 2468 | .dualmaster = true, |
2442 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | 2469 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, |
2443 | }; | 2470 | }; |
@@ -2445,6 +2472,7 @@ static struct vendor_data vendor_pl080 = { | |||
2445 | static struct vendor_data vendor_nomadik = { | 2472 | static struct vendor_data vendor_nomadik = { |
2446 | .config_offset = PL080_CH_CONFIG, | 2473 | .config_offset = PL080_CH_CONFIG, |
2447 | .channels = 8, | 2474 | .channels = 8, |
2475 | .signals = 32, | ||
2448 | .dualmaster = true, | 2476 | .dualmaster = true, |
2449 | .nomadik = true, | 2477 | .nomadik = true, |
2450 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | 2478 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, |
@@ -2453,6 +2481,7 @@ static struct vendor_data vendor_nomadik = { | |||
2453 | static struct vendor_data vendor_pl080s = { | 2481 | static struct vendor_data vendor_pl080s = { |
2454 | .config_offset = PL080S_CH_CONFIG, | 2482 | .config_offset = PL080S_CH_CONFIG, |
2455 | .channels = 8, | 2483 | .channels = 8, |
2484 | .signals = 32, | ||
2456 | .pl080s = true, | 2485 | .pl080s = true, |
2457 | .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, | 2486 | .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, |
2458 | }; | 2487 | }; |
@@ -2460,6 +2489,7 @@ static struct vendor_data vendor_pl080s = { | |||
2460 | static struct vendor_data vendor_pl081 = { | 2489 | static struct vendor_data vendor_pl081 = { |
2461 | .config_offset = PL080_CH_CONFIG, | 2490 | .config_offset = PL080_CH_CONFIG, |
2462 | .channels = 2, | 2491 | .channels = 2, |
2492 | .signals = 16, | ||
2463 | .dualmaster = false, | 2493 | .dualmaster = false, |
2464 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, | 2494 | .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, |
2465 | }; | 2495 | }; |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 996c4b00d323..6149b27c33ad 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -46,6 +46,9 @@ | |||
46 | 46 | ||
47 | #include "virt-dma.h" | 47 | #include "virt-dma.h" |
48 | 48 | ||
49 | #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14 | ||
50 | #define BCM2835_DMA_CHAN_NAME_SIZE 8 | ||
51 | |||
49 | struct bcm2835_dmadev { | 52 | struct bcm2835_dmadev { |
50 | struct dma_device ddev; | 53 | struct dma_device ddev; |
51 | spinlock_t lock; | 54 | spinlock_t lock; |
@@ -73,7 +76,6 @@ struct bcm2835_chan { | |||
73 | struct list_head node; | 76 | struct list_head node; |
74 | 77 | ||
75 | struct dma_slave_config cfg; | 78 | struct dma_slave_config cfg; |
76 | bool cyclic; | ||
77 | unsigned int dreq; | 79 | unsigned int dreq; |
78 | 80 | ||
79 | int ch; | 81 | int ch; |
@@ -82,6 +84,9 @@ struct bcm2835_chan { | |||
82 | 84 | ||
83 | void __iomem *chan_base; | 85 | void __iomem *chan_base; |
84 | int irq_number; | 86 | int irq_number; |
87 | unsigned int irq_flags; | ||
88 | |||
89 | bool is_lite_channel; | ||
85 | }; | 90 | }; |
86 | 91 | ||
87 | struct bcm2835_desc { | 92 | struct bcm2835_desc { |
@@ -89,47 +94,104 @@ struct bcm2835_desc { | |||
89 | struct virt_dma_desc vd; | 94 | struct virt_dma_desc vd; |
90 | enum dma_transfer_direction dir; | 95 | enum dma_transfer_direction dir; |
91 | 96 | ||
92 | struct bcm2835_cb_entry *cb_list; | ||
93 | |||
94 | unsigned int frames; | 97 | unsigned int frames; |
95 | size_t size; | 98 | size_t size; |
99 | |||
100 | bool cyclic; | ||
101 | |||
102 | struct bcm2835_cb_entry cb_list[]; | ||
96 | }; | 103 | }; |
97 | 104 | ||
98 | #define BCM2835_DMA_CS 0x00 | 105 | #define BCM2835_DMA_CS 0x00 |
99 | #define BCM2835_DMA_ADDR 0x04 | 106 | #define BCM2835_DMA_ADDR 0x04 |
107 | #define BCM2835_DMA_TI 0x08 | ||
100 | #define BCM2835_DMA_SOURCE_AD 0x0c | 108 | #define BCM2835_DMA_SOURCE_AD 0x0c |
101 | #define BCM2835_DMA_DEST_AD 0x10 | 109 | #define BCM2835_DMA_DEST_AD 0x10 |
102 | #define BCM2835_DMA_NEXTCB 0x1C | 110 | #define BCM2835_DMA_LEN 0x14 |
111 | #define BCM2835_DMA_STRIDE 0x18 | ||
112 | #define BCM2835_DMA_NEXTCB 0x1c | ||
113 | #define BCM2835_DMA_DEBUG 0x20 | ||
103 | 114 | ||
104 | /* DMA CS Control and Status bits */ | 115 | /* DMA CS Control and Status bits */ |
105 | #define BCM2835_DMA_ACTIVE BIT(0) | 116 | #define BCM2835_DMA_ACTIVE BIT(0) /* activate the DMA */ |
106 | #define BCM2835_DMA_INT BIT(2) | 117 | #define BCM2835_DMA_END BIT(1) /* current CB has ended */ |
118 | #define BCM2835_DMA_INT BIT(2) /* interrupt status */ | ||
119 | #define BCM2835_DMA_DREQ BIT(3) /* DREQ state */ | ||
107 | #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ | 120 | #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ |
108 | #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ | 121 | #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ |
109 | #define BCM2835_DMA_ERR BIT(8) | 122 | #define BCM2835_DMA_WAITING_FOR_WRITES BIT(6) /* waiting for last |
123 | * AXI-write to ack | ||
124 | */ | ||
125 | #define BCM2835_DMA_ERR BIT(8) | ||
126 | #define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16) /* AXI priority */ | ||
127 | #define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20) /* panic priority */ | ||
128 | /* current value of TI.BCM2835_DMA_WAIT_RESP */ | ||
129 | #define BCM2835_DMA_WAIT_FOR_WRITES BIT(28) | ||
130 | #define BCM2835_DMA_DIS_DEBUG BIT(29) /* disable debug pause signal */ | ||
110 | #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ | 131 | #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ |
111 | #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ | 132 | #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ |
112 | 133 | ||
134 | /* Transfer information bits - also bcm2835_cb.info field */ | ||
113 | #define BCM2835_DMA_INT_EN BIT(0) | 135 | #define BCM2835_DMA_INT_EN BIT(0) |
136 | #define BCM2835_DMA_TDMODE BIT(1) /* 2D-Mode */ | ||
137 | #define BCM2835_DMA_WAIT_RESP BIT(3) /* wait for AXI-write to be acked */ | ||
114 | #define BCM2835_DMA_D_INC BIT(4) | 138 | #define BCM2835_DMA_D_INC BIT(4) |
115 | #define BCM2835_DMA_D_DREQ BIT(6) | 139 | #define BCM2835_DMA_D_WIDTH BIT(5) /* 128bit writes if set */ |
140 | #define BCM2835_DMA_D_DREQ BIT(6) /* enable DREQ for destination */ | ||
141 | #define BCM2835_DMA_D_IGNORE BIT(7) /* ignore destination writes */ | ||
116 | #define BCM2835_DMA_S_INC BIT(8) | 142 | #define BCM2835_DMA_S_INC BIT(8) |
117 | #define BCM2835_DMA_S_DREQ BIT(10) | 143 | #define BCM2835_DMA_S_WIDTH BIT(9) /* 128bit writes if set */ |
118 | 144 | #define BCM2835_DMA_S_DREQ BIT(10) /* enable SREQ for source */ | |
119 | #define BCM2835_DMA_PER_MAP(x) ((x) << 16) | 145 | #define BCM2835_DMA_S_IGNORE BIT(11) /* ignore source reads - read 0 */ |
146 | #define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12) | ||
147 | #define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16) /* REQ source */ | ||
148 | #define BCM2835_DMA_WAIT(x) ((x & 31) << 21) /* add DMA-wait cycles */ | ||
149 | #define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */ | ||
150 | |||
151 | /* debug register bits */ | ||
152 | #define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0) | ||
153 | #define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1) | ||
154 | #define BCM2835_DMA_DEBUG_READ_ERR BIT(2) | ||
155 | #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4 | ||
156 | #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4 | ||
157 | #define BCM2835_DMA_DEBUG_ID_SHIFT 16 | ||
158 | #define BCM2835_DMA_DEBUG_ID_BITS 9 | ||
159 | #define BCM2835_DMA_DEBUG_STATE_SHIFT 16 | ||
160 | #define BCM2835_DMA_DEBUG_STATE_BITS 9 | ||
161 | #define BCM2835_DMA_DEBUG_VERSION_SHIFT 25 | ||
162 | #define BCM2835_DMA_DEBUG_VERSION_BITS 3 | ||
163 | #define BCM2835_DMA_DEBUG_LITE BIT(28) | ||
164 | |||
165 | /* shared registers for all dma channels */ | ||
166 | #define BCM2835_DMA_INT_STATUS 0xfe0 | ||
167 | #define BCM2835_DMA_ENABLE 0xff0 | ||
120 | 168 | ||
121 | #define BCM2835_DMA_DATA_TYPE_S8 1 | 169 | #define BCM2835_DMA_DATA_TYPE_S8 1 |
122 | #define BCM2835_DMA_DATA_TYPE_S16 2 | 170 | #define BCM2835_DMA_DATA_TYPE_S16 2 |
123 | #define BCM2835_DMA_DATA_TYPE_S32 4 | 171 | #define BCM2835_DMA_DATA_TYPE_S32 4 |
124 | #define BCM2835_DMA_DATA_TYPE_S128 16 | 172 | #define BCM2835_DMA_DATA_TYPE_S128 16 |
125 | 173 | ||
126 | #define BCM2835_DMA_BULK_MASK BIT(0) | ||
127 | #define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3)) | ||
128 | |||
129 | /* Valid only for channels 0 - 14, 15 has its own base address */ | 174 | /* Valid only for channels 0 - 14, 15 has its own base address */ |
130 | #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ | 175 | #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ |
131 | #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) | 176 | #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) |
132 | 177 | ||
178 | /* the max dma length for different channels */ | ||
179 | #define MAX_DMA_LEN SZ_1G | ||
180 | #define MAX_LITE_DMA_LEN (SZ_64K - 4) | ||
181 | |||
182 | static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c) | ||
183 | { | ||
184 | /* lite and normal channels have different max frame length */ | ||
185 | return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN; | ||
186 | } | ||
187 | |||
188 | /* how many frames of max_len size do we need to transfer len bytes */ | ||
189 | static inline size_t bcm2835_dma_frames_for_length(size_t len, | ||
190 | size_t max_len) | ||
191 | { | ||
192 | return DIV_ROUND_UP(len, max_len); | ||
193 | } | ||
194 | |||
133 | static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) | 195 | static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) |
134 | { | 196 | { |
135 | return container_of(d, struct bcm2835_dmadev, ddev); | 197 | return container_of(d, struct bcm2835_dmadev, ddev); |
@@ -146,19 +208,209 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( | |||
146 | return container_of(t, struct bcm2835_desc, vd.tx); | 208 | return container_of(t, struct bcm2835_desc, vd.tx); |
147 | } | 209 | } |
148 | 210 | ||
149 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) | 211 | static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc) |
150 | { | 212 | { |
151 | struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); | 213 | size_t i; |
152 | int i; | ||
153 | 214 | ||
154 | for (i = 0; i < desc->frames; i++) | 215 | for (i = 0; i < desc->frames; i++) |
155 | dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, | 216 | dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, |
156 | desc->cb_list[i].paddr); | 217 | desc->cb_list[i].paddr); |
157 | 218 | ||
158 | kfree(desc->cb_list); | ||
159 | kfree(desc); | 219 | kfree(desc); |
160 | } | 220 | } |
161 | 221 | ||
222 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) | ||
223 | { | ||
224 | bcm2835_dma_free_cb_chain( | ||
225 | container_of(vd, struct bcm2835_desc, vd)); | ||
226 | } | ||
227 | |||
228 | static void bcm2835_dma_create_cb_set_length( | ||
229 | struct bcm2835_chan *chan, | ||
230 | struct bcm2835_dma_cb *control_block, | ||
231 | size_t len, | ||
232 | size_t period_len, | ||
233 | size_t *total_len, | ||
234 | u32 finalextrainfo) | ||
235 | { | ||
236 | size_t max_len = bcm2835_dma_max_frame_length(chan); | ||
237 | |||
238 | /* set the length taking lite-channel limitations into account */ | ||
239 | control_block->length = min_t(u32, len, max_len); | ||
240 | |||
241 | /* finished if we have no period_length */ | ||
242 | if (!period_len) | ||
243 | return; | ||
244 | |||
245 | /* | ||
246 | * period_len means: that we need to generate | ||
247 | * transfers that are terminating at every | ||
248 | * multiple of period_len - this is typically | ||
249 | * used to set the interrupt flag in info | ||
250 | * which is required during cyclic transfers | ||
251 | */ | ||
252 | |||
253 | /* have we filled in period_length yet? */ | ||
254 | if (*total_len + control_block->length < period_len) | ||
255 | return; | ||
256 | |||
257 | /* calculate the length that remains to reach period_length */ | ||
258 | control_block->length = period_len - *total_len; | ||
259 | |||
260 | /* reset total_length for next period */ | ||
261 | *total_len = 0; | ||
262 | |||
263 | /* add extrainfo bits in info */ | ||
264 | control_block->info |= finalextrainfo; | ||
265 | } | ||
266 | |||
267 | static inline size_t bcm2835_dma_count_frames_for_sg( | ||
268 | struct bcm2835_chan *c, | ||
269 | struct scatterlist *sgl, | ||
270 | unsigned int sg_len) | ||
271 | { | ||
272 | size_t frames = 0; | ||
273 | struct scatterlist *sgent; | ||
274 | unsigned int i; | ||
275 | size_t plength = bcm2835_dma_max_frame_length(c); | ||
276 | |||
277 | for_each_sg(sgl, sgent, sg_len, i) | ||
278 | frames += bcm2835_dma_frames_for_length( | ||
279 | sg_dma_len(sgent), plength); | ||
280 | |||
281 | return frames; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * bcm2835_dma_create_cb_chain - create a control block and fills data in | ||
286 | * | ||
287 | * @chan: the @dma_chan for which we run this | ||
288 | * @direction: the direction in which we transfer | ||
289 | * @cyclic: it is a cyclic transfer | ||
290 | * @info: the default info bits to apply per controlblock | ||
291 | * @frames: number of controlblocks to allocate | ||
292 | * @src: the src address to assign (if the S_INC bit is set | ||
293 | * in @info, then it gets incremented) | ||
294 | * @dst: the dst address to assign (if the D_INC bit is set | ||
295 | * in @info, then it gets incremented) | ||
296 | * @buf_len: the full buffer length (may also be 0) | ||
297 | * @period_len: the period length when to apply @finalextrainfo | ||
298 | * in addition to the last transfer | ||
299 | * this will also break some control-blocks early | ||
300 | * @finalextrainfo: additional bits in last controlblock | ||
301 | * (or when period_len is reached in case of cyclic) | ||
302 | * @gfp: the GFP flag to use for allocation | ||
303 | */ | ||
304 | static struct bcm2835_desc *bcm2835_dma_create_cb_chain( | ||
305 | struct dma_chan *chan, enum dma_transfer_direction direction, | ||
306 | bool cyclic, u32 info, u32 finalextrainfo, size_t frames, | ||
307 | dma_addr_t src, dma_addr_t dst, size_t buf_len, | ||
308 | size_t period_len, gfp_t gfp) | ||
309 | { | ||
310 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
311 | size_t len = buf_len, total_len; | ||
312 | size_t frame; | ||
313 | struct bcm2835_desc *d; | ||
314 | struct bcm2835_cb_entry *cb_entry; | ||
315 | struct bcm2835_dma_cb *control_block; | ||
316 | |||
317 | if (!frames) | ||
318 | return NULL; | ||
319 | |||
320 | /* allocate and setup the descriptor. */ | ||
321 | d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry), | ||
322 | gfp); | ||
323 | if (!d) | ||
324 | return NULL; | ||
325 | |||
326 | d->c = c; | ||
327 | d->dir = direction; | ||
328 | d->cyclic = cyclic; | ||
329 | |||
330 | /* | ||
331 | * Iterate over all frames, create a control block | ||
332 | * for each frame and link them together. | ||
333 | */ | ||
334 | for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) { | ||
335 | cb_entry = &d->cb_list[frame]; | ||
336 | cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp, | ||
337 | &cb_entry->paddr); | ||
338 | if (!cb_entry->cb) | ||
339 | goto error_cb; | ||
340 | |||
341 | /* fill in the control block */ | ||
342 | control_block = cb_entry->cb; | ||
343 | control_block->info = info; | ||
344 | control_block->src = src; | ||
345 | control_block->dst = dst; | ||
346 | control_block->stride = 0; | ||
347 | control_block->next = 0; | ||
348 | /* set up length in control_block if requested */ | ||
349 | if (buf_len) { | ||
350 | /* calculate length honoring period_length */ | ||
351 | bcm2835_dma_create_cb_set_length( | ||
352 | c, control_block, | ||
353 | len, period_len, &total_len, | ||
354 | cyclic ? finalextrainfo : 0); | ||
355 | |||
356 | /* calculate new remaining length */ | ||
357 | len -= control_block->length; | ||
358 | } | ||
359 | |||
360 | /* link this the last controlblock */ | ||
361 | if (frame) | ||
362 | d->cb_list[frame - 1].cb->next = cb_entry->paddr; | ||
363 | |||
364 | /* update src and dst and length */ | ||
365 | if (src && (info & BCM2835_DMA_S_INC)) | ||
366 | src += control_block->length; | ||
367 | if (dst && (info & BCM2835_DMA_D_INC)) | ||
368 | dst += control_block->length; | ||
369 | |||
370 | /* Length of total transfer */ | ||
371 | d->size += control_block->length; | ||
372 | } | ||
373 | |||
374 | /* the last frame requires extra flags */ | ||
375 | d->cb_list[d->frames - 1].cb->info |= finalextrainfo; | ||
376 | |||
377 | /* detect a size missmatch */ | ||
378 | if (buf_len && (d->size != buf_len)) | ||
379 | goto error_cb; | ||
380 | |||
381 | return d; | ||
382 | error_cb: | ||
383 | bcm2835_dma_free_cb_chain(d); | ||
384 | |||
385 | return NULL; | ||
386 | } | ||
387 | |||
388 | static void bcm2835_dma_fill_cb_chain_with_sg( | ||
389 | struct dma_chan *chan, | ||
390 | enum dma_transfer_direction direction, | ||
391 | struct bcm2835_cb_entry *cb, | ||
392 | struct scatterlist *sgl, | ||
393 | unsigned int sg_len) | ||
394 | { | ||
395 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
396 | size_t max_len = bcm2835_dma_max_frame_length(c); | ||
397 | unsigned int i, len; | ||
398 | dma_addr_t addr; | ||
399 | struct scatterlist *sgent; | ||
400 | |||
401 | for_each_sg(sgl, sgent, sg_len, i) { | ||
402 | for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); | ||
403 | len > 0; | ||
404 | addr += cb->cb->length, len -= cb->cb->length, cb++) { | ||
405 | if (direction == DMA_DEV_TO_MEM) | ||
406 | cb->cb->dst = addr; | ||
407 | else | ||
408 | cb->cb->src = addr; | ||
409 | cb->cb->length = min(len, max_len); | ||
410 | } | ||
411 | } | ||
412 | } | ||
413 | |||
162 | static int bcm2835_dma_abort(void __iomem *chan_base) | 414 | static int bcm2835_dma_abort(void __iomem *chan_base) |
163 | { | 415 | { |
164 | unsigned long cs; | 416 | unsigned long cs; |
@@ -218,6 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
218 | struct bcm2835_desc *d; | 470 | struct bcm2835_desc *d; |
219 | unsigned long flags; | 471 | unsigned long flags; |
220 | 472 | ||
473 | /* check the shared interrupt */ | ||
474 | if (c->irq_flags & IRQF_SHARED) { | ||
475 | /* check if the interrupt is enabled */ | ||
476 | flags = readl(c->chan_base + BCM2835_DMA_CS); | ||
477 | /* if not set then we are not the reason for the irq */ | ||
478 | if (!(flags & BCM2835_DMA_INT)) | ||
479 | return IRQ_NONE; | ||
480 | } | ||
481 | |||
221 | spin_lock_irqsave(&c->vc.lock, flags); | 482 | spin_lock_irqsave(&c->vc.lock, flags); |
222 | 483 | ||
223 | /* Acknowledge interrupt */ | 484 | /* Acknowledge interrupt */ |
@@ -226,12 +487,18 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
226 | d = c->desc; | 487 | d = c->desc; |
227 | 488 | ||
228 | if (d) { | 489 | if (d) { |
229 | /* TODO Only works for cyclic DMA */ | 490 | if (d->cyclic) { |
230 | vchan_cyclic_callback(&d->vd); | 491 | /* call the cyclic callback */ |
231 | } | 492 | vchan_cyclic_callback(&d->vd); |
232 | 493 | ||
233 | /* Keep the DMA engine running */ | 494 | /* Keep the DMA engine running */ |
234 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); | 495 | writel(BCM2835_DMA_ACTIVE, |
496 | c->chan_base + BCM2835_DMA_CS); | ||
497 | } else { | ||
498 | vchan_cookie_complete(&c->desc->vd); | ||
499 | bcm2835_dma_start_desc(c); | ||
500 | } | ||
501 | } | ||
235 | 502 | ||
236 | spin_unlock_irqrestore(&c->vc.lock, flags); | 503 | spin_unlock_irqrestore(&c->vc.lock, flags); |
237 | 504 | ||
@@ -252,8 +519,8 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | |||
252 | return -ENOMEM; | 519 | return -ENOMEM; |
253 | } | 520 | } |
254 | 521 | ||
255 | return request_irq(c->irq_number, | 522 | return request_irq(c->irq_number, bcm2835_dma_callback, |
256 | bcm2835_dma_callback, 0, "DMA IRQ", c); | 523 | c->irq_flags, "DMA IRQ", c); |
257 | } | 524 | } |
258 | 525 | ||
259 | static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) | 526 | static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) |
@@ -339,8 +606,6 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) | |||
339 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 606 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
340 | unsigned long flags; | 607 | unsigned long flags; |
341 | 608 | ||
342 | c->cyclic = true; /* Nothing else is implemented */ | ||
343 | |||
344 | spin_lock_irqsave(&c->vc.lock, flags); | 609 | spin_lock_irqsave(&c->vc.lock, flags); |
345 | if (vchan_issue_pending(&c->vc) && !c->desc) | 610 | if (vchan_issue_pending(&c->vc) && !c->desc) |
346 | bcm2835_dma_start_desc(c); | 611 | bcm2835_dma_start_desc(c); |
@@ -348,122 +613,160 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) | |||
348 | spin_unlock_irqrestore(&c->vc.lock, flags); | 613 | spin_unlock_irqrestore(&c->vc.lock, flags); |
349 | } | 614 | } |
350 | 615 | ||
351 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | 616 | struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( |
352 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 617 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
353 | size_t period_len, enum dma_transfer_direction direction, | 618 | size_t len, unsigned long flags) |
354 | unsigned long flags) | ||
355 | { | 619 | { |
356 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 620 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
357 | enum dma_slave_buswidth dev_width; | ||
358 | struct bcm2835_desc *d; | 621 | struct bcm2835_desc *d; |
359 | dma_addr_t dev_addr; | 622 | u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC; |
360 | unsigned int es, sync_type; | 623 | u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP; |
361 | unsigned int frame; | 624 | size_t max_len = bcm2835_dma_max_frame_length(c); |
362 | int i; | 625 | size_t frames; |
626 | |||
627 | /* if src, dst or len is not given return with an error */ | ||
628 | if (!src || !dst || !len) | ||
629 | return NULL; | ||
630 | |||
631 | /* calculate number of frames */ | ||
632 | frames = bcm2835_dma_frames_for_length(len, max_len); | ||
633 | |||
634 | /* allocate the CB chain - this also fills in the pointers */ | ||
635 | d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false, | ||
636 | info, extra, frames, | ||
637 | src, dst, len, 0, GFP_KERNEL); | ||
638 | if (!d) | ||
639 | return NULL; | ||
640 | |||
641 | return vchan_tx_prep(&c->vc, &d->vd, flags); | ||
642 | } | ||
643 | |||
644 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( | ||
645 | struct dma_chan *chan, | ||
646 | struct scatterlist *sgl, unsigned int sg_len, | ||
647 | enum dma_transfer_direction direction, | ||
648 | unsigned long flags, void *context) | ||
649 | { | ||
650 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
651 | struct bcm2835_desc *d; | ||
652 | dma_addr_t src = 0, dst = 0; | ||
653 | u32 info = BCM2835_DMA_WAIT_RESP; | ||
654 | u32 extra = BCM2835_DMA_INT_EN; | ||
655 | size_t frames; | ||
363 | 656 | ||
364 | /* Grab configuration */ | ||
365 | if (!is_slave_direction(direction)) { | 657 | if (!is_slave_direction(direction)) { |
366 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 658 | dev_err(chan->device->dev, |
659 | "%s: bad direction?\n", __func__); | ||
367 | return NULL; | 660 | return NULL; |
368 | } | 661 | } |
369 | 662 | ||
663 | if (c->dreq != 0) | ||
664 | info |= BCM2835_DMA_PER_MAP(c->dreq); | ||
665 | |||
370 | if (direction == DMA_DEV_TO_MEM) { | 666 | if (direction == DMA_DEV_TO_MEM) { |
371 | dev_addr = c->cfg.src_addr; | 667 | if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) |
372 | dev_width = c->cfg.src_addr_width; | 668 | return NULL; |
373 | sync_type = BCM2835_DMA_S_DREQ; | 669 | src = c->cfg.src_addr; |
670 | info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; | ||
374 | } else { | 671 | } else { |
375 | dev_addr = c->cfg.dst_addr; | 672 | if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) |
376 | dev_width = c->cfg.dst_addr_width; | 673 | return NULL; |
377 | sync_type = BCM2835_DMA_D_DREQ; | 674 | dst = c->cfg.dst_addr; |
675 | info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; | ||
378 | } | 676 | } |
379 | 677 | ||
380 | /* Bus width translates to the element size (ES) */ | 678 | /* count frames in sg list */ |
381 | switch (dev_width) { | 679 | frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len); |
382 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
383 | es = BCM2835_DMA_DATA_TYPE_S32; | ||
384 | break; | ||
385 | default: | ||
386 | return NULL; | ||
387 | } | ||
388 | 680 | ||
389 | /* Now allocate and setup the descriptor. */ | 681 | /* allocate the CB chain */ |
390 | d = kzalloc(sizeof(*d), GFP_NOWAIT); | 682 | d = bcm2835_dma_create_cb_chain(chan, direction, false, |
683 | info, extra, | ||
684 | frames, src, dst, 0, 0, | ||
685 | GFP_KERNEL); | ||
391 | if (!d) | 686 | if (!d) |
392 | return NULL; | 687 | return NULL; |
393 | 688 | ||
394 | d->c = c; | 689 | /* fill in frames with scatterlist pointers */ |
395 | d->dir = direction; | 690 | bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list, |
396 | d->frames = buf_len / period_len; | 691 | sgl, sg_len); |
397 | 692 | ||
398 | d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); | 693 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
399 | if (!d->cb_list) { | 694 | } |
400 | kfree(d); | 695 | |
696 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | ||
697 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
698 | size_t period_len, enum dma_transfer_direction direction, | ||
699 | unsigned long flags) | ||
700 | { | ||
701 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | ||
702 | struct bcm2835_desc *d; | ||
703 | dma_addr_t src, dst; | ||
704 | u32 info = BCM2835_DMA_WAIT_RESP; | ||
705 | u32 extra = BCM2835_DMA_INT_EN; | ||
706 | size_t max_len = bcm2835_dma_max_frame_length(c); | ||
707 | size_t frames; | ||
708 | |||
709 | /* Grab configuration */ | ||
710 | if (!is_slave_direction(direction)) { | ||
711 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | ||
401 | return NULL; | 712 | return NULL; |
402 | } | 713 | } |
403 | /* Allocate memory for control blocks */ | ||
404 | for (i = 0; i < d->frames; i++) { | ||
405 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
406 | 714 | ||
407 | cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, | 715 | if (!buf_len) { |
408 | &cb_entry->paddr); | 716 | dev_err(chan->device->dev, |
409 | if (!cb_entry->cb) | 717 | "%s: bad buffer length (= 0)\n", __func__); |
410 | goto error_cb; | 718 | return NULL; |
411 | } | 719 | } |
412 | 720 | ||
413 | /* | 721 | /* |
414 | * Iterate over all frames, create a control block | 722 | * warn if buf_len is not a multiple of period_len - this may leed |
415 | * for each frame and link them together. | 723 | * to unexpected latencies for interrupts and thus audiable clicks |
416 | */ | 724 | */ |
417 | for (frame = 0; frame < d->frames; frame++) { | 725 | if (buf_len % period_len) |
418 | struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; | 726 | dev_warn_once(chan->device->dev, |
419 | 727 | "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n", | |
420 | /* Setup adresses */ | 728 | __func__, buf_len, period_len); |
421 | if (d->dir == DMA_DEV_TO_MEM) { | ||
422 | control_block->info = BCM2835_DMA_D_INC; | ||
423 | control_block->src = dev_addr; | ||
424 | control_block->dst = buf_addr + frame * period_len; | ||
425 | } else { | ||
426 | control_block->info = BCM2835_DMA_S_INC; | ||
427 | control_block->src = buf_addr + frame * period_len; | ||
428 | control_block->dst = dev_addr; | ||
429 | } | ||
430 | 729 | ||
431 | /* Enable interrupt */ | 730 | /* Setup DREQ channel */ |
432 | control_block->info |= BCM2835_DMA_INT_EN; | 731 | if (c->dreq != 0) |
732 | info |= BCM2835_DMA_PER_MAP(c->dreq); | ||
433 | 733 | ||
434 | /* Setup synchronization */ | 734 | if (direction == DMA_DEV_TO_MEM) { |
435 | if (sync_type != 0) | 735 | if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) |
436 | control_block->info |= sync_type; | 736 | return NULL; |
737 | src = c->cfg.src_addr; | ||
738 | dst = buf_addr; | ||
739 | info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; | ||
740 | } else { | ||
741 | if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
742 | return NULL; | ||
743 | dst = c->cfg.dst_addr; | ||
744 | src = buf_addr; | ||
745 | info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; | ||
746 | } | ||
437 | 747 | ||
438 | /* Setup DREQ channel */ | 748 | /* calculate number of frames */ |
439 | if (c->dreq != 0) | 749 | frames = /* number of periods */ |
440 | control_block->info |= | 750 | DIV_ROUND_UP(buf_len, period_len) * |
441 | BCM2835_DMA_PER_MAP(c->dreq); | 751 | /* number of frames per period */ |
752 | bcm2835_dma_frames_for_length(period_len, max_len); | ||
442 | 753 | ||
443 | /* Length of a frame */ | 754 | /* |
444 | control_block->length = period_len; | 755 | * allocate the CB chain |
445 | d->size += control_block->length; | 756 | * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine |
757 | * implementation calls prep_dma_cyclic with interrupts disabled. | ||
758 | */ | ||
759 | d = bcm2835_dma_create_cb_chain(chan, direction, true, | ||
760 | info, extra, | ||
761 | frames, src, dst, buf_len, | ||
762 | period_len, GFP_NOWAIT); | ||
763 | if (!d) | ||
764 | return NULL; | ||
446 | 765 | ||
447 | /* | 766 | /* wrap around into a loop */ |
448 | * Next block is the next frame. | 767 | d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr; |
449 | * This DMA engine driver currently only supports cyclic DMA. | ||
450 | * Therefore, wrap around at number of frames. | ||
451 | */ | ||
452 | control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; | ||
453 | } | ||
454 | 768 | ||
455 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 769 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
456 | error_cb: | ||
457 | i--; | ||
458 | for (; i >= 0; i--) { | ||
459 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
460 | |||
461 | dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); | ||
462 | } | ||
463 | |||
464 | kfree(d->cb_list); | ||
465 | kfree(d); | ||
466 | return NULL; | ||
467 | } | 770 | } |
468 | 771 | ||
469 | static int bcm2835_dma_slave_config(struct dma_chan *chan, | 772 | static int bcm2835_dma_slave_config(struct dma_chan *chan, |
@@ -529,7 +832,8 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
529 | return 0; | 832 | return 0; |
530 | } | 833 | } |
531 | 834 | ||
532 | static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) | 835 | static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, |
836 | int irq, unsigned int irq_flags) | ||
533 | { | 837 | { |
534 | struct bcm2835_chan *c; | 838 | struct bcm2835_chan *c; |
535 | 839 | ||
@@ -544,6 +848,12 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) | |||
544 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); | 848 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); |
545 | c->ch = chan_id; | 849 | c->ch = chan_id; |
546 | c->irq_number = irq; | 850 | c->irq_number = irq; |
851 | c->irq_flags = irq_flags; | ||
852 | |||
853 | /* check in DEBUG register if this is a LITE channel */ | ||
854 | if (readl(c->chan_base + BCM2835_DMA_DEBUG) & | ||
855 | BCM2835_DMA_DEBUG_LITE) | ||
856 | c->is_lite_channel = true; | ||
547 | 857 | ||
548 | return 0; | 858 | return 0; |
549 | } | 859 | } |
@@ -587,9 +897,11 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
587 | struct resource *res; | 897 | struct resource *res; |
588 | void __iomem *base; | 898 | void __iomem *base; |
589 | int rc; | 899 | int rc; |
590 | int i; | 900 | int i, j; |
591 | int irq; | 901 | int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1]; |
902 | int irq_flags; | ||
592 | uint32_t chans_available; | 903 | uint32_t chans_available; |
904 | char chan_name[BCM2835_DMA_CHAN_NAME_SIZE]; | ||
593 | 905 | ||
594 | if (!pdev->dev.dma_mask) | 906 | if (!pdev->dev.dma_mask) |
595 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | 907 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; |
@@ -615,16 +927,22 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
615 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | 927 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); |
616 | dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); | 928 | dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); |
617 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); | 929 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
930 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | ||
931 | dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); | ||
618 | od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; | 932 | od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; |
619 | od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; | 933 | od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; |
620 | od->ddev.device_tx_status = bcm2835_dma_tx_status; | 934 | od->ddev.device_tx_status = bcm2835_dma_tx_status; |
621 | od->ddev.device_issue_pending = bcm2835_dma_issue_pending; | 935 | od->ddev.device_issue_pending = bcm2835_dma_issue_pending; |
622 | od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; | 936 | od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; |
937 | od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg; | ||
938 | od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy; | ||
623 | od->ddev.device_config = bcm2835_dma_slave_config; | 939 | od->ddev.device_config = bcm2835_dma_slave_config; |
624 | od->ddev.device_terminate_all = bcm2835_dma_terminate_all; | 940 | od->ddev.device_terminate_all = bcm2835_dma_terminate_all; |
625 | od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 941 | od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
626 | od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 942 | od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
627 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 943 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | |
944 | BIT(DMA_MEM_TO_MEM); | ||
945 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
628 | od->ddev.dev = &pdev->dev; | 946 | od->ddev.dev = &pdev->dev; |
629 | INIT_LIST_HEAD(&od->ddev.channels); | 947 | INIT_LIST_HEAD(&od->ddev.channels); |
630 | spin_lock_init(&od->lock); | 948 | spin_lock_init(&od->lock); |
@@ -640,22 +958,48 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
640 | goto err_no_dma; | 958 | goto err_no_dma; |
641 | } | 959 | } |
642 | 960 | ||
643 | /* | 961 | /* get irqs for each channel that we support */ |
644 | * Do not use the FIQ and BULK channels, | 962 | for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { |
645 | * because they are used by the GPU. | 963 | /* skip masked out channels */ |
646 | */ | 964 | if (!(chans_available & (1 << i))) { |
647 | chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK); | 965 | irq[i] = -1; |
966 | continue; | ||
967 | } | ||
648 | 968 | ||
649 | for (i = 0; i < pdev->num_resources; i++) { | 969 | /* get the named irq */ |
650 | irq = platform_get_irq(pdev, i); | 970 | snprintf(chan_name, sizeof(chan_name), "dma%i", i); |
651 | if (irq < 0) | 971 | irq[i] = platform_get_irq_byname(pdev, chan_name); |
652 | break; | 972 | if (irq[i] >= 0) |
973 | continue; | ||
653 | 974 | ||
654 | if (chans_available & (1 << i)) { | 975 | /* legacy device tree case handling */ |
655 | rc = bcm2835_dma_chan_init(od, i, irq); | 976 | dev_warn_once(&pdev->dev, |
656 | if (rc) | 977 | "missing interrupt-names property in device tree - legacy interpretation is used\n"); |
657 | goto err_no_dma; | 978 | /* |
658 | } | 979 | * in case of channel >= 11 |
980 | * use the 11th interrupt and that is shared | ||
981 | */ | ||
982 | irq[i] = platform_get_irq(pdev, i < 11 ? i : 11); | ||
983 | } | ||
984 | |||
985 | /* get irqs for each channel */ | ||
986 | for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { | ||
987 | /* skip channels without irq */ | ||
988 | if (irq[i] < 0) | ||
989 | continue; | ||
990 | |||
991 | /* check if there are other channels that also use this irq */ | ||
992 | irq_flags = 0; | ||
993 | for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++) | ||
994 | if ((i != j) && (irq[j] == irq[i])) { | ||
995 | irq_flags = IRQF_SHARED; | ||
996 | break; | ||
997 | } | ||
998 | |||
999 | /* initialize the channel */ | ||
1000 | rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags); | ||
1001 | if (rc) | ||
1002 | goto err_no_dma; | ||
659 | } | 1003 | } |
660 | 1004 | ||
661 | dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); | 1005 | dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 0cb259c59916..8c9f45fd55fc 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -289,7 +289,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
289 | do { | 289 | do { |
290 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 290 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
291 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 291 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
292 | pr_err("%s: timeout!\n", __func__); | 292 | dev_err(chan->device->dev, "%s: timeout!\n", __func__); |
293 | return DMA_ERROR; | 293 | return DMA_ERROR; |
294 | } | 294 | } |
295 | if (status != DMA_IN_PROGRESS) | 295 | if (status != DMA_IN_PROGRESS) |
@@ -482,7 +482,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | |||
482 | device = chan->device; | 482 | device = chan->device; |
483 | 483 | ||
484 | /* check if the channel supports slave transactions */ | 484 | /* check if the channel supports slave transactions */ |
485 | if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) | 485 | if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || |
486 | test_bit(DMA_CYCLIC, device->cap_mask.bits))) | ||
486 | return -ENXIO; | 487 | return -ENXIO; |
487 | 488 | ||
488 | /* | 489 | /* |
@@ -518,7 +519,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |||
518 | struct dma_chan *chan; | 519 | struct dma_chan *chan; |
519 | 520 | ||
520 | if (mask && !__dma_device_satisfies_mask(dev, mask)) { | 521 | if (mask && !__dma_device_satisfies_mask(dev, mask)) { |
521 | pr_debug("%s: wrong capabilities\n", __func__); | 522 | dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); |
522 | return NULL; | 523 | return NULL; |
523 | } | 524 | } |
524 | /* devices with multiple channels need special handling as we need to | 525 | /* devices with multiple channels need special handling as we need to |
@@ -533,12 +534,12 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |||
533 | 534 | ||
534 | list_for_each_entry(chan, &dev->channels, device_node) { | 535 | list_for_each_entry(chan, &dev->channels, device_node) { |
535 | if (chan->client_count) { | 536 | if (chan->client_count) { |
536 | pr_debug("%s: %s busy\n", | 537 | dev_dbg(dev->dev, "%s: %s busy\n", |
537 | __func__, dma_chan_name(chan)); | 538 | __func__, dma_chan_name(chan)); |
538 | continue; | 539 | continue; |
539 | } | 540 | } |
540 | if (fn && !fn(chan, fn_param)) { | 541 | if (fn && !fn(chan, fn_param)) { |
541 | pr_debug("%s: %s filter said false\n", | 542 | dev_dbg(dev->dev, "%s: %s filter said false\n", |
542 | __func__, dma_chan_name(chan)); | 543 | __func__, dma_chan_name(chan)); |
543 | continue; | 544 | continue; |
544 | } | 545 | } |
@@ -567,11 +568,12 @@ static struct dma_chan *find_candidate(struct dma_device *device, | |||
567 | 568 | ||
568 | if (err) { | 569 | if (err) { |
569 | if (err == -ENODEV) { | 570 | if (err == -ENODEV) { |
570 | pr_debug("%s: %s module removed\n", __func__, | 571 | dev_dbg(device->dev, "%s: %s module removed\n", |
571 | dma_chan_name(chan)); | 572 | __func__, dma_chan_name(chan)); |
572 | list_del_rcu(&device->global_node); | 573 | list_del_rcu(&device->global_node); |
573 | } else | 574 | } else |
574 | pr_debug("%s: failed to get %s: (%d)\n", | 575 | dev_dbg(device->dev, |
576 | "%s: failed to get %s: (%d)\n", | ||
575 | __func__, dma_chan_name(chan), err); | 577 | __func__, dma_chan_name(chan), err); |
576 | 578 | ||
577 | if (--device->privatecnt == 0) | 579 | if (--device->privatecnt == 0) |
@@ -602,7 +604,8 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |||
602 | device->privatecnt++; | 604 | device->privatecnt++; |
603 | err = dma_chan_get(chan); | 605 | err = dma_chan_get(chan); |
604 | if (err) { | 606 | if (err) { |
605 | pr_debug("%s: failed to get %s: (%d)\n", | 607 | dev_dbg(chan->device->dev, |
608 | "%s: failed to get %s: (%d)\n", | ||
606 | __func__, dma_chan_name(chan), err); | 609 | __func__, dma_chan_name(chan), err); |
607 | chan = NULL; | 610 | chan = NULL; |
608 | if (--device->privatecnt == 0) | 611 | if (--device->privatecnt == 0) |
@@ -814,8 +817,9 @@ void dmaengine_get(void) | |||
814 | list_del_rcu(&device->global_node); | 817 | list_del_rcu(&device->global_node); |
815 | break; | 818 | break; |
816 | } else if (err) | 819 | } else if (err) |
817 | pr_debug("%s: failed to get %s: (%d)\n", | 820 | dev_dbg(chan->device->dev, |
818 | __func__, dma_chan_name(chan), err); | 821 | "%s: failed to get %s: (%d)\n", |
822 | __func__, dma_chan_name(chan), err); | ||
819 | } | 823 | } |
820 | } | 824 | } |
821 | 825 | ||
@@ -862,12 +866,12 @@ static bool device_has_all_tx_types(struct dma_device *device) | |||
862 | return false; | 866 | return false; |
863 | #endif | 867 | #endif |
864 | 868 | ||
865 | #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) | 869 | #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) |
866 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) | 870 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) |
867 | return false; | 871 | return false; |
868 | #endif | 872 | #endif |
869 | 873 | ||
870 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) | 874 | #if IS_ENABLED(CONFIG_ASYNC_XOR) |
871 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | 875 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
872 | return false; | 876 | return false; |
873 | 877 | ||
@@ -877,7 +881,7 @@ static bool device_has_all_tx_types(struct dma_device *device) | |||
877 | #endif | 881 | #endif |
878 | #endif | 882 | #endif |
879 | 883 | ||
880 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) | 884 | #if IS_ENABLED(CONFIG_ASYNC_PQ) |
881 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | 885 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) |
882 | return false; | 886 | return false; |
883 | 887 | ||
@@ -1222,8 +1226,9 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
1222 | 1226 | ||
1223 | while (tx->cookie == -EBUSY) { | 1227 | while (tx->cookie == -EBUSY) { |
1224 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 1228 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
1225 | pr_err("%s timeout waiting for descriptor submission\n", | 1229 | dev_err(tx->chan->device->dev, |
1226 | __func__); | 1230 | "%s timeout waiting for descriptor submission\n", |
1231 | __func__); | ||
1227 | return DMA_ERROR; | 1232 | return DMA_ERROR; |
1228 | } | 1233 | } |
1229 | cpu_relax(); | 1234 | cpu_relax(); |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 5ad0ec1f0e29..edf053f73a49 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -45,22 +45,19 @@ | |||
45 | DW_DMA_MSIZE_16; \ | 45 | DW_DMA_MSIZE_16; \ |
46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ | 46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
47 | DW_DMA_MSIZE_16; \ | 47 | DW_DMA_MSIZE_16; \ |
48 | u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ | ||
49 | _dwc->p_master : _dwc->m_master; \ | ||
50 | u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ | ||
51 | _dwc->p_master : _dwc->m_master; \ | ||
48 | \ | 52 | \ |
49 | (DWC_CTLL_DST_MSIZE(_dmsize) \ | 53 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
50 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | 54 | | DWC_CTLL_SRC_MSIZE(_smsize) \ |
51 | | DWC_CTLL_LLP_D_EN \ | 55 | | DWC_CTLL_LLP_D_EN \ |
52 | | DWC_CTLL_LLP_S_EN \ | 56 | | DWC_CTLL_LLP_S_EN \ |
53 | | DWC_CTLL_DMS(_dwc->dst_master) \ | 57 | | DWC_CTLL_DMS(_dms) \ |
54 | | DWC_CTLL_SMS(_dwc->src_master)); \ | 58 | | DWC_CTLL_SMS(_sms)); \ |
55 | }) | 59 | }) |
56 | 60 | ||
57 | /* | ||
58 | * Number of descriptors to allocate for each channel. This should be | ||
59 | * made configurable somehow; preferably, the clients (at least the | ||
60 | * ones using slave transfers) should be able to give us a hint. | ||
61 | */ | ||
62 | #define NR_DESCS_PER_CHANNEL 64 | ||
63 | |||
64 | /* The set of bus widths supported by the DMA controller */ | 61 | /* The set of bus widths supported by the DMA controller */ |
65 | #define DW_DMA_BUSWIDTHS \ | 62 | #define DW_DMA_BUSWIDTHS \ |
66 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | 63 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
@@ -80,76 +77,78 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | |||
80 | return to_dw_desc(dwc->active_list.next); | 77 | return to_dw_desc(dwc->active_list.next); |
81 | } | 78 | } |
82 | 79 | ||
83 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 80 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) |
84 | { | 81 | { |
85 | struct dw_desc *desc, *_desc; | 82 | struct dw_desc *desc = txd_to_dw_desc(tx); |
86 | struct dw_desc *ret = NULL; | 83 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); |
87 | unsigned int i = 0; | 84 | dma_cookie_t cookie; |
88 | unsigned long flags; | 85 | unsigned long flags; |
89 | 86 | ||
90 | spin_lock_irqsave(&dwc->lock, flags); | 87 | spin_lock_irqsave(&dwc->lock, flags); |
91 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 88 | cookie = dma_cookie_assign(tx); |
92 | i++; | 89 | |
93 | if (async_tx_test_ack(&desc->txd)) { | 90 | /* |
94 | list_del(&desc->desc_node); | 91 | * REVISIT: We should attempt to chain as many descriptors as |
95 | ret = desc; | 92 | * possible, perhaps even appending to those already submitted |
96 | break; | 93 | * for DMA. But this is hard to do in a race-free manner. |
97 | } | 94 | */ |
98 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 95 | |
99 | } | 96 | list_add_tail(&desc->desc_node, &dwc->queue); |
100 | spin_unlock_irqrestore(&dwc->lock, flags); | 97 | spin_unlock_irqrestore(&dwc->lock, flags); |
98 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", | ||
99 | __func__, desc->txd.cookie); | ||
101 | 100 | ||
102 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); | 101 | return cookie; |
102 | } | ||
103 | 103 | ||
104 | return ret; | 104 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
105 | { | ||
106 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
107 | struct dw_desc *desc; | ||
108 | dma_addr_t phys; | ||
109 | |||
110 | desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys); | ||
111 | if (!desc) | ||
112 | return NULL; | ||
113 | |||
114 | dwc->descs_allocated++; | ||
115 | INIT_LIST_HEAD(&desc->tx_list); | ||
116 | dma_async_tx_descriptor_init(&desc->txd, &dwc->chan); | ||
117 | desc->txd.tx_submit = dwc_tx_submit; | ||
118 | desc->txd.flags = DMA_CTRL_ACK; | ||
119 | desc->txd.phys = phys; | ||
120 | return desc; | ||
105 | } | 121 | } |
106 | 122 | ||
107 | /* | ||
108 | * Move a descriptor, including any children, to the free list. | ||
109 | * `desc' must not be on any lists. | ||
110 | */ | ||
111 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | 123 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
112 | { | 124 | { |
113 | unsigned long flags; | 125 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
126 | struct dw_desc *child, *_next; | ||
114 | 127 | ||
115 | if (desc) { | 128 | if (unlikely(!desc)) |
116 | struct dw_desc *child; | 129 | return; |
117 | 130 | ||
118 | spin_lock_irqsave(&dwc->lock, flags); | 131 | list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { |
119 | list_for_each_entry(child, &desc->tx_list, desc_node) | 132 | list_del(&child->desc_node); |
120 | dev_vdbg(chan2dev(&dwc->chan), | 133 | dma_pool_free(dw->desc_pool, child, child->txd.phys); |
121 | "moving child desc %p to freelist\n", | 134 | dwc->descs_allocated--; |
122 | child); | ||
123 | list_splice_init(&desc->tx_list, &dwc->free_list); | ||
124 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); | ||
125 | list_add(&desc->desc_node, &dwc->free_list); | ||
126 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
127 | } | 135 | } |
136 | |||
137 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | ||
138 | dwc->descs_allocated--; | ||
128 | } | 139 | } |
129 | 140 | ||
130 | static void dwc_initialize(struct dw_dma_chan *dwc) | 141 | static void dwc_initialize(struct dw_dma_chan *dwc) |
131 | { | 142 | { |
132 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 143 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
133 | struct dw_dma_slave *dws = dwc->chan.private; | ||
134 | u32 cfghi = DWC_CFGH_FIFO_MODE; | 144 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
135 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | 145 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); |
136 | 146 | ||
137 | if (dwc->initialized == true) | 147 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) |
138 | return; | 148 | return; |
139 | 149 | ||
140 | if (dws) { | 150 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
141 | /* | 151 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); |
142 | * We need controller-specific data to set up slave | ||
143 | * transfers. | ||
144 | */ | ||
145 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
146 | |||
147 | cfghi |= DWC_CFGH_DST_PER(dws->dst_id); | ||
148 | cfghi |= DWC_CFGH_SRC_PER(dws->src_id); | ||
149 | } else { | ||
150 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); | ||
151 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); | ||
152 | } | ||
153 | 152 | ||
154 | channel_writel(dwc, CFG_LO, cfglo); | 153 | channel_writel(dwc, CFG_LO, cfglo); |
155 | channel_writel(dwc, CFG_HI, cfghi); | 154 | channel_writel(dwc, CFG_HI, cfghi); |
@@ -158,26 +157,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
158 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 157 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
159 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 158 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
160 | 159 | ||
161 | dwc->initialized = true; | 160 | set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); |
162 | } | 161 | } |
163 | 162 | ||
164 | /*----------------------------------------------------------------------*/ | 163 | /*----------------------------------------------------------------------*/ |
165 | 164 | ||
166 | static inline unsigned int dwc_fast_ffs(unsigned long long v) | ||
167 | { | ||
168 | /* | ||
169 | * We can be a lot more clever here, but this should take care | ||
170 | * of the most common optimization. | ||
171 | */ | ||
172 | if (!(v & 7)) | ||
173 | return 3; | ||
174 | else if (!(v & 3)) | ||
175 | return 2; | ||
176 | else if (!(v & 1)) | ||
177 | return 1; | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | 165 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
182 | { | 166 | { |
183 | dev_err(chan2dev(&dwc->chan), | 167 | dev_err(chan2dev(&dwc->chan), |
@@ -209,12 +193,12 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |||
209 | * Software emulation of LLP mode relies on interrupts to continue | 193 | * Software emulation of LLP mode relies on interrupts to continue |
210 | * multi block transfer. | 194 | * multi block transfer. |
211 | */ | 195 | */ |
212 | ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; | 196 | ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; |
213 | 197 | ||
214 | channel_writel(dwc, SAR, desc->lli.sar); | 198 | channel_writel(dwc, SAR, lli_read(desc, sar)); |
215 | channel_writel(dwc, DAR, desc->lli.dar); | 199 | channel_writel(dwc, DAR, lli_read(desc, dar)); |
216 | channel_writel(dwc, CTL_LO, ctllo); | 200 | channel_writel(dwc, CTL_LO, ctllo); |
217 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | 201 | channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); |
218 | channel_set_bit(dw, CH_EN, dwc->mask); | 202 | channel_set_bit(dw, CH_EN, dwc->mask); |
219 | 203 | ||
220 | /* Move pointer to next descriptor */ | 204 | /* Move pointer to next descriptor */ |
@@ -225,6 +209,7 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |||
225 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 209 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
226 | { | 210 | { |
227 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 211 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
212 | u8 lms = DWC_LLP_LMS(dwc->m_master); | ||
228 | unsigned long was_soft_llp; | 213 | unsigned long was_soft_llp; |
229 | 214 | ||
230 | /* ASSERT: channel is idle */ | 215 | /* ASSERT: channel is idle */ |
@@ -249,7 +234,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
249 | 234 | ||
250 | dwc_initialize(dwc); | 235 | dwc_initialize(dwc); |
251 | 236 | ||
252 | dwc->residue = first->total_len; | 237 | first->residue = first->total_len; |
253 | dwc->tx_node_active = &first->tx_list; | 238 | dwc->tx_node_active = &first->tx_list; |
254 | 239 | ||
255 | /* Submit first block */ | 240 | /* Submit first block */ |
@@ -260,9 +245,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
260 | 245 | ||
261 | dwc_initialize(dwc); | 246 | dwc_initialize(dwc); |
262 | 247 | ||
263 | channel_writel(dwc, LLP, first->txd.phys); | 248 | channel_writel(dwc, LLP, first->txd.phys | lms); |
264 | channel_writel(dwc, CTL_LO, | 249 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
265 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
266 | channel_writel(dwc, CTL_HI, 0); | 250 | channel_writel(dwc, CTL_HI, 0); |
267 | channel_set_bit(dw, CH_EN, dwc->mask); | 251 | channel_set_bit(dw, CH_EN, dwc->mask); |
268 | } | 252 | } |
@@ -305,11 +289,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
305 | list_for_each_entry(child, &desc->tx_list, desc_node) | 289 | list_for_each_entry(child, &desc->tx_list, desc_node) |
306 | async_tx_ack(&child->txd); | 290 | async_tx_ack(&child->txd); |
307 | async_tx_ack(&desc->txd); | 291 | async_tx_ack(&desc->txd); |
308 | 292 | dwc_desc_put(dwc, desc); | |
309 | list_splice_init(&desc->tx_list, &dwc->free_list); | ||
310 | list_move(&desc->desc_node, &dwc->free_list); | ||
311 | |||
312 | dma_descriptor_unmap(txd); | ||
313 | spin_unlock_irqrestore(&dwc->lock, flags); | 293 | spin_unlock_irqrestore(&dwc->lock, flags); |
314 | 294 | ||
315 | if (callback) | 295 | if (callback) |
@@ -380,11 +360,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
380 | 360 | ||
381 | head = &desc->tx_list; | 361 | head = &desc->tx_list; |
382 | if (active != head) { | 362 | if (active != head) { |
383 | /* Update desc to reflect last sent one */ | 363 | /* Update residue to reflect last sent descriptor */ |
384 | if (active != head->next) | 364 | if (active == head->next) |
385 | desc = to_dw_desc(active->prev); | 365 | desc->residue -= desc->len; |
386 | 366 | else | |
387 | dwc->residue -= desc->len; | 367 | desc->residue -= to_dw_desc(active->prev)->len; |
388 | 368 | ||
389 | child = to_dw_desc(active); | 369 | child = to_dw_desc(active); |
390 | 370 | ||
@@ -399,8 +379,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
399 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | 379 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); |
400 | } | 380 | } |
401 | 381 | ||
402 | dwc->residue = 0; | ||
403 | |||
404 | spin_unlock_irqrestore(&dwc->lock, flags); | 382 | spin_unlock_irqrestore(&dwc->lock, flags); |
405 | 383 | ||
406 | dwc_complete_all(dw, dwc); | 384 | dwc_complete_all(dw, dwc); |
@@ -408,7 +386,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
408 | } | 386 | } |
409 | 387 | ||
410 | if (list_empty(&dwc->active_list)) { | 388 | if (list_empty(&dwc->active_list)) { |
411 | dwc->residue = 0; | ||
412 | spin_unlock_irqrestore(&dwc->lock, flags); | 389 | spin_unlock_irqrestore(&dwc->lock, flags); |
413 | return; | 390 | return; |
414 | } | 391 | } |
@@ -423,31 +400,31 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
423 | 400 | ||
424 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 401 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
425 | /* Initial residue value */ | 402 | /* Initial residue value */ |
426 | dwc->residue = desc->total_len; | 403 | desc->residue = desc->total_len; |
427 | 404 | ||
428 | /* Check first descriptors addr */ | 405 | /* Check first descriptors addr */ |
429 | if (desc->txd.phys == llp) { | 406 | if (desc->txd.phys == DWC_LLP_LOC(llp)) { |
430 | spin_unlock_irqrestore(&dwc->lock, flags); | 407 | spin_unlock_irqrestore(&dwc->lock, flags); |
431 | return; | 408 | return; |
432 | } | 409 | } |
433 | 410 | ||
434 | /* Check first descriptors llp */ | 411 | /* Check first descriptors llp */ |
435 | if (desc->lli.llp == llp) { | 412 | if (lli_read(desc, llp) == llp) { |
436 | /* This one is currently in progress */ | 413 | /* This one is currently in progress */ |
437 | dwc->residue -= dwc_get_sent(dwc); | 414 | desc->residue -= dwc_get_sent(dwc); |
438 | spin_unlock_irqrestore(&dwc->lock, flags); | 415 | spin_unlock_irqrestore(&dwc->lock, flags); |
439 | return; | 416 | return; |
440 | } | 417 | } |
441 | 418 | ||
442 | dwc->residue -= desc->len; | 419 | desc->residue -= desc->len; |
443 | list_for_each_entry(child, &desc->tx_list, desc_node) { | 420 | list_for_each_entry(child, &desc->tx_list, desc_node) { |
444 | if (child->lli.llp == llp) { | 421 | if (lli_read(child, llp) == llp) { |
445 | /* Currently in progress */ | 422 | /* Currently in progress */ |
446 | dwc->residue -= dwc_get_sent(dwc); | 423 | desc->residue -= dwc_get_sent(dwc); |
447 | spin_unlock_irqrestore(&dwc->lock, flags); | 424 | spin_unlock_irqrestore(&dwc->lock, flags); |
448 | return; | 425 | return; |
449 | } | 426 | } |
450 | dwc->residue -= child->len; | 427 | desc->residue -= child->len; |
451 | } | 428 | } |
452 | 429 | ||
453 | /* | 430 | /* |
@@ -469,10 +446,14 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
469 | spin_unlock_irqrestore(&dwc->lock, flags); | 446 | spin_unlock_irqrestore(&dwc->lock, flags); |
470 | } | 447 | } |
471 | 448 | ||
472 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 449 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) |
473 | { | 450 | { |
474 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 451 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
475 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); | 452 | lli_read(desc, sar), |
453 | lli_read(desc, dar), | ||
454 | lli_read(desc, llp), | ||
455 | lli_read(desc, ctlhi), | ||
456 | lli_read(desc, ctllo)); | ||
476 | } | 457 | } |
477 | 458 | ||
478 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 459 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -508,9 +489,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
508 | */ | 489 | */ |
509 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" | 490 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
510 | " cookie: %d\n", bad_desc->txd.cookie); | 491 | " cookie: %d\n", bad_desc->txd.cookie); |
511 | dwc_dump_lli(dwc, &bad_desc->lli); | 492 | dwc_dump_lli(dwc, bad_desc); |
512 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 493 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
513 | dwc_dump_lli(dwc, &child->lli); | 494 | dwc_dump_lli(dwc, child); |
514 | 495 | ||
515 | spin_unlock_irqrestore(&dwc->lock, flags); | 496 | spin_unlock_irqrestore(&dwc->lock, flags); |
516 | 497 | ||
@@ -561,7 +542,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
561 | */ | 542 | */ |
562 | if (unlikely(status_err & dwc->mask) || | 543 | if (unlikely(status_err & dwc->mask) || |
563 | unlikely(status_xfer & dwc->mask)) { | 544 | unlikely(status_xfer & dwc->mask)) { |
564 | int i; | 545 | unsigned int i; |
565 | 546 | ||
566 | dev_err(chan2dev(&dwc->chan), | 547 | dev_err(chan2dev(&dwc->chan), |
567 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", | 548 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", |
@@ -583,7 +564,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
583 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 564 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
584 | 565 | ||
585 | for (i = 0; i < dwc->cdesc->periods; i++) | 566 | for (i = 0; i < dwc->cdesc->periods; i++) |
586 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | 567 | dwc_dump_lli(dwc, dwc->cdesc->desc[i]); |
587 | 568 | ||
588 | spin_unlock_irqrestore(&dwc->lock, flags); | 569 | spin_unlock_irqrestore(&dwc->lock, flags); |
589 | } | 570 | } |
@@ -601,7 +582,7 @@ static void dw_dma_tasklet(unsigned long data) | |||
601 | u32 status_block; | 582 | u32 status_block; |
602 | u32 status_xfer; | 583 | u32 status_xfer; |
603 | u32 status_err; | 584 | u32 status_err; |
604 | int i; | 585 | unsigned int i; |
605 | 586 | ||
606 | status_block = dma_readl(dw, RAW.BLOCK); | 587 | status_block = dma_readl(dw, RAW.BLOCK); |
607 | status_xfer = dma_readl(dw, RAW.XFER); | 588 | status_xfer = dma_readl(dw, RAW.XFER); |
@@ -670,30 +651,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
670 | 651 | ||
671 | /*----------------------------------------------------------------------*/ | 652 | /*----------------------------------------------------------------------*/ |
672 | 653 | ||
673 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | ||
674 | { | ||
675 | struct dw_desc *desc = txd_to_dw_desc(tx); | ||
676 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | ||
677 | dma_cookie_t cookie; | ||
678 | unsigned long flags; | ||
679 | |||
680 | spin_lock_irqsave(&dwc->lock, flags); | ||
681 | cookie = dma_cookie_assign(tx); | ||
682 | |||
683 | /* | ||
684 | * REVISIT: We should attempt to chain as many descriptors as | ||
685 | * possible, perhaps even appending to those already submitted | ||
686 | * for DMA. But this is hard to do in a race-free manner. | ||
687 | */ | ||
688 | |||
689 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); | ||
690 | list_add_tail(&desc->desc_node, &dwc->queue); | ||
691 | |||
692 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
693 | |||
694 | return cookie; | ||
695 | } | ||
696 | |||
697 | static struct dma_async_tx_descriptor * | 654 | static struct dma_async_tx_descriptor * |
698 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 655 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
699 | size_t len, unsigned long flags) | 656 | size_t len, unsigned long flags) |
@@ -705,10 +662,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
705 | struct dw_desc *prev; | 662 | struct dw_desc *prev; |
706 | size_t xfer_count; | 663 | size_t xfer_count; |
707 | size_t offset; | 664 | size_t offset; |
665 | u8 m_master = dwc->m_master; | ||
708 | unsigned int src_width; | 666 | unsigned int src_width; |
709 | unsigned int dst_width; | 667 | unsigned int dst_width; |
710 | unsigned int data_width; | 668 | unsigned int data_width = dw->pdata->data_width[m_master]; |
711 | u32 ctllo; | 669 | u32 ctllo; |
670 | u8 lms = DWC_LLP_LMS(m_master); | ||
712 | 671 | ||
713 | dev_vdbg(chan2dev(chan), | 672 | dev_vdbg(chan2dev(chan), |
714 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, | 673 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, |
@@ -721,11 +680,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
721 | 680 | ||
722 | dwc->direction = DMA_MEM_TO_MEM; | 681 | dwc->direction = DMA_MEM_TO_MEM; |
723 | 682 | ||
724 | data_width = min_t(unsigned int, dw->data_width[dwc->src_master], | 683 | src_width = dst_width = __ffs(data_width | src | dest | len); |
725 | dw->data_width[dwc->dst_master]); | ||
726 | |||
727 | src_width = dst_width = min_t(unsigned int, data_width, | ||
728 | dwc_fast_ffs(src | dest | len)); | ||
729 | 684 | ||
730 | ctllo = DWC_DEFAULT_CTLLO(chan) | 685 | ctllo = DWC_DEFAULT_CTLLO(chan) |
731 | | DWC_CTLL_DST_WIDTH(dst_width) | 686 | | DWC_CTLL_DST_WIDTH(dst_width) |
@@ -743,27 +698,27 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
743 | if (!desc) | 698 | if (!desc) |
744 | goto err_desc_get; | 699 | goto err_desc_get; |
745 | 700 | ||
746 | desc->lli.sar = src + offset; | 701 | lli_write(desc, sar, src + offset); |
747 | desc->lli.dar = dest + offset; | 702 | lli_write(desc, dar, dest + offset); |
748 | desc->lli.ctllo = ctllo; | 703 | lli_write(desc, ctllo, ctllo); |
749 | desc->lli.ctlhi = xfer_count; | 704 | lli_write(desc, ctlhi, xfer_count); |
750 | desc->len = xfer_count << src_width; | 705 | desc->len = xfer_count << src_width; |
751 | 706 | ||
752 | if (!first) { | 707 | if (!first) { |
753 | first = desc; | 708 | first = desc; |
754 | } else { | 709 | } else { |
755 | prev->lli.llp = desc->txd.phys; | 710 | lli_write(prev, llp, desc->txd.phys | lms); |
756 | list_add_tail(&desc->desc_node, | 711 | list_add_tail(&desc->desc_node, &first->tx_list); |
757 | &first->tx_list); | ||
758 | } | 712 | } |
759 | prev = desc; | 713 | prev = desc; |
760 | } | 714 | } |
761 | 715 | ||
762 | if (flags & DMA_PREP_INTERRUPT) | 716 | if (flags & DMA_PREP_INTERRUPT) |
763 | /* Trigger interrupt after last block */ | 717 | /* Trigger interrupt after last block */ |
764 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 718 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
765 | 719 | ||
766 | prev->lli.llp = 0; | 720 | prev->lli.llp = 0; |
721 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
767 | first->txd.flags = flags; | 722 | first->txd.flags = flags; |
768 | first->total_len = len; | 723 | first->total_len = len; |
769 | 724 | ||
@@ -785,10 +740,12 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
785 | struct dw_desc *prev; | 740 | struct dw_desc *prev; |
786 | struct dw_desc *first; | 741 | struct dw_desc *first; |
787 | u32 ctllo; | 742 | u32 ctllo; |
743 | u8 m_master = dwc->m_master; | ||
744 | u8 lms = DWC_LLP_LMS(m_master); | ||
788 | dma_addr_t reg; | 745 | dma_addr_t reg; |
789 | unsigned int reg_width; | 746 | unsigned int reg_width; |
790 | unsigned int mem_width; | 747 | unsigned int mem_width; |
791 | unsigned int data_width; | 748 | unsigned int data_width = dw->pdata->data_width[m_master]; |
792 | unsigned int i; | 749 | unsigned int i; |
793 | struct scatterlist *sg; | 750 | struct scatterlist *sg; |
794 | size_t total_len = 0; | 751 | size_t total_len = 0; |
@@ -814,8 +771,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
814 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 771 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
815 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 772 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
816 | 773 | ||
817 | data_width = dw->data_width[dwc->src_master]; | ||
818 | |||
819 | for_each_sg(sgl, sg, sg_len, i) { | 774 | for_each_sg(sgl, sg, sg_len, i) { |
820 | struct dw_desc *desc; | 775 | struct dw_desc *desc; |
821 | u32 len, dlen, mem; | 776 | u32 len, dlen, mem; |
@@ -823,17 +778,16 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
823 | mem = sg_dma_address(sg); | 778 | mem = sg_dma_address(sg); |
824 | len = sg_dma_len(sg); | 779 | len = sg_dma_len(sg); |
825 | 780 | ||
826 | mem_width = min_t(unsigned int, | 781 | mem_width = __ffs(data_width | mem | len); |
827 | data_width, dwc_fast_ffs(mem | len)); | ||
828 | 782 | ||
829 | slave_sg_todev_fill_desc: | 783 | slave_sg_todev_fill_desc: |
830 | desc = dwc_desc_get(dwc); | 784 | desc = dwc_desc_get(dwc); |
831 | if (!desc) | 785 | if (!desc) |
832 | goto err_desc_get; | 786 | goto err_desc_get; |
833 | 787 | ||
834 | desc->lli.sar = mem; | 788 | lli_write(desc, sar, mem); |
835 | desc->lli.dar = reg; | 789 | lli_write(desc, dar, reg); |
836 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 790 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); |
837 | if ((len >> mem_width) > dwc->block_size) { | 791 | if ((len >> mem_width) > dwc->block_size) { |
838 | dlen = dwc->block_size << mem_width; | 792 | dlen = dwc->block_size << mem_width; |
839 | mem += dlen; | 793 | mem += dlen; |
@@ -843,15 +797,14 @@ slave_sg_todev_fill_desc: | |||
843 | len = 0; | 797 | len = 0; |
844 | } | 798 | } |
845 | 799 | ||
846 | desc->lli.ctlhi = dlen >> mem_width; | 800 | lli_write(desc, ctlhi, dlen >> mem_width); |
847 | desc->len = dlen; | 801 | desc->len = dlen; |
848 | 802 | ||
849 | if (!first) { | 803 | if (!first) { |
850 | first = desc; | 804 | first = desc; |
851 | } else { | 805 | } else { |
852 | prev->lli.llp = desc->txd.phys; | 806 | lli_write(prev, llp, desc->txd.phys | lms); |
853 | list_add_tail(&desc->desc_node, | 807 | list_add_tail(&desc->desc_node, &first->tx_list); |
854 | &first->tx_list); | ||
855 | } | 808 | } |
856 | prev = desc; | 809 | prev = desc; |
857 | total_len += dlen; | 810 | total_len += dlen; |
@@ -871,8 +824,6 @@ slave_sg_todev_fill_desc: | |||
871 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 824 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
872 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 825 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
873 | 826 | ||
874 | data_width = dw->data_width[dwc->dst_master]; | ||
875 | |||
876 | for_each_sg(sgl, sg, sg_len, i) { | 827 | for_each_sg(sgl, sg, sg_len, i) { |
877 | struct dw_desc *desc; | 828 | struct dw_desc *desc; |
878 | u32 len, dlen, mem; | 829 | u32 len, dlen, mem; |
@@ -880,17 +831,16 @@ slave_sg_todev_fill_desc: | |||
880 | mem = sg_dma_address(sg); | 831 | mem = sg_dma_address(sg); |
881 | len = sg_dma_len(sg); | 832 | len = sg_dma_len(sg); |
882 | 833 | ||
883 | mem_width = min_t(unsigned int, | 834 | mem_width = __ffs(data_width | mem | len); |
884 | data_width, dwc_fast_ffs(mem | len)); | ||
885 | 835 | ||
886 | slave_sg_fromdev_fill_desc: | 836 | slave_sg_fromdev_fill_desc: |
887 | desc = dwc_desc_get(dwc); | 837 | desc = dwc_desc_get(dwc); |
888 | if (!desc) | 838 | if (!desc) |
889 | goto err_desc_get; | 839 | goto err_desc_get; |
890 | 840 | ||
891 | desc->lli.sar = reg; | 841 | lli_write(desc, sar, reg); |
892 | desc->lli.dar = mem; | 842 | lli_write(desc, dar, mem); |
893 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 843 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); |
894 | if ((len >> reg_width) > dwc->block_size) { | 844 | if ((len >> reg_width) > dwc->block_size) { |
895 | dlen = dwc->block_size << reg_width; | 845 | dlen = dwc->block_size << reg_width; |
896 | mem += dlen; | 846 | mem += dlen; |
@@ -899,15 +849,14 @@ slave_sg_fromdev_fill_desc: | |||
899 | dlen = len; | 849 | dlen = len; |
900 | len = 0; | 850 | len = 0; |
901 | } | 851 | } |
902 | desc->lli.ctlhi = dlen >> reg_width; | 852 | lli_write(desc, ctlhi, dlen >> reg_width); |
903 | desc->len = dlen; | 853 | desc->len = dlen; |
904 | 854 | ||
905 | if (!first) { | 855 | if (!first) { |
906 | first = desc; | 856 | first = desc; |
907 | } else { | 857 | } else { |
908 | prev->lli.llp = desc->txd.phys; | 858 | lli_write(prev, llp, desc->txd.phys | lms); |
909 | list_add_tail(&desc->desc_node, | 859 | list_add_tail(&desc->desc_node, &first->tx_list); |
910 | &first->tx_list); | ||
911 | } | 860 | } |
912 | prev = desc; | 861 | prev = desc; |
913 | total_len += dlen; | 862 | total_len += dlen; |
@@ -922,9 +871,10 @@ slave_sg_fromdev_fill_desc: | |||
922 | 871 | ||
923 | if (flags & DMA_PREP_INTERRUPT) | 872 | if (flags & DMA_PREP_INTERRUPT) |
924 | /* Trigger interrupt after last block */ | 873 | /* Trigger interrupt after last block */ |
925 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 874 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
926 | 875 | ||
927 | prev->lli.llp = 0; | 876 | prev->lli.llp = 0; |
877 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
928 | first->total_len = total_len; | 878 | first->total_len = total_len; |
929 | 879 | ||
930 | return &first->txd; | 880 | return &first->txd; |
@@ -941,7 +891,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) | |||
941 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 891 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
942 | struct dw_dma_slave *dws = param; | 892 | struct dw_dma_slave *dws = param; |
943 | 893 | ||
944 | if (!dws || dws->dma_dev != chan->device->dev) | 894 | if (dws->dma_dev != chan->device->dev) |
945 | return false; | 895 | return false; |
946 | 896 | ||
947 | /* We have to copy data since dws can be temporary storage */ | 897 | /* We have to copy data since dws can be temporary storage */ |
@@ -949,8 +899,8 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) | |||
949 | dwc->src_id = dws->src_id; | 899 | dwc->src_id = dws->src_id; |
950 | dwc->dst_id = dws->dst_id; | 900 | dwc->dst_id = dws->dst_id; |
951 | 901 | ||
952 | dwc->src_master = dws->src_master; | 902 | dwc->m_master = dws->m_master; |
953 | dwc->dst_master = dws->dst_master; | 903 | dwc->p_master = dws->p_master; |
954 | 904 | ||
955 | return true; | 905 | return true; |
956 | } | 906 | } |
@@ -1003,7 +953,7 @@ static int dwc_pause(struct dma_chan *chan) | |||
1003 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) | 953 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
1004 | udelay(2); | 954 | udelay(2); |
1005 | 955 | ||
1006 | dwc->paused = true; | 956 | set_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
1007 | 957 | ||
1008 | spin_unlock_irqrestore(&dwc->lock, flags); | 958 | spin_unlock_irqrestore(&dwc->lock, flags); |
1009 | 959 | ||
@@ -1016,7 +966,7 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | |||
1016 | 966 | ||
1017 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | 967 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); |
1018 | 968 | ||
1019 | dwc->paused = false; | 969 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
1020 | } | 970 | } |
1021 | 971 | ||
1022 | static int dwc_resume(struct dma_chan *chan) | 972 | static int dwc_resume(struct dma_chan *chan) |
@@ -1024,12 +974,10 @@ static int dwc_resume(struct dma_chan *chan) | |||
1024 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 974 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1025 | unsigned long flags; | 975 | unsigned long flags; |
1026 | 976 | ||
1027 | if (!dwc->paused) | ||
1028 | return 0; | ||
1029 | |||
1030 | spin_lock_irqsave(&dwc->lock, flags); | 977 | spin_lock_irqsave(&dwc->lock, flags); |
1031 | 978 | ||
1032 | dwc_chan_resume(dwc); | 979 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
980 | dwc_chan_resume(dwc); | ||
1033 | 981 | ||
1034 | spin_unlock_irqrestore(&dwc->lock, flags); | 982 | spin_unlock_irqrestore(&dwc->lock, flags); |
1035 | 983 | ||
@@ -1065,16 +1013,37 @@ static int dwc_terminate_all(struct dma_chan *chan) | |||
1065 | return 0; | 1013 | return 0; |
1066 | } | 1014 | } |
1067 | 1015 | ||
1068 | static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) | 1016 | static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) |
1017 | { | ||
1018 | struct dw_desc *desc; | ||
1019 | |||
1020 | list_for_each_entry(desc, &dwc->active_list, desc_node) | ||
1021 | if (desc->txd.cookie == c) | ||
1022 | return desc; | ||
1023 | |||
1024 | return NULL; | ||
1025 | } | ||
1026 | |||
1027 | static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie) | ||
1069 | { | 1028 | { |
1029 | struct dw_desc *desc; | ||
1070 | unsigned long flags; | 1030 | unsigned long flags; |
1071 | u32 residue; | 1031 | u32 residue; |
1072 | 1032 | ||
1073 | spin_lock_irqsave(&dwc->lock, flags); | 1033 | spin_lock_irqsave(&dwc->lock, flags); |
1074 | 1034 | ||
1075 | residue = dwc->residue; | 1035 | desc = dwc_find_desc(dwc, cookie); |
1076 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | 1036 | if (desc) { |
1077 | residue -= dwc_get_sent(dwc); | 1037 | if (desc == dwc_first_active(dwc)) { |
1038 | residue = desc->residue; | ||
1039 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | ||
1040 | residue -= dwc_get_sent(dwc); | ||
1041 | } else { | ||
1042 | residue = desc->total_len; | ||
1043 | } | ||
1044 | } else { | ||
1045 | residue = 0; | ||
1046 | } | ||
1078 | 1047 | ||
1079 | spin_unlock_irqrestore(&dwc->lock, flags); | 1048 | spin_unlock_irqrestore(&dwc->lock, flags); |
1080 | return residue; | 1049 | return residue; |
@@ -1095,10 +1064,12 @@ dwc_tx_status(struct dma_chan *chan, | |||
1095 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 1064 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
1096 | 1065 | ||
1097 | ret = dma_cookie_status(chan, cookie, txstate); | 1066 | ret = dma_cookie_status(chan, cookie, txstate); |
1098 | if (ret != DMA_COMPLETE) | 1067 | if (ret == DMA_COMPLETE) |
1099 | dma_set_residue(txstate, dwc_get_residue(dwc)); | 1068 | return ret; |
1100 | 1069 | ||
1101 | if (dwc->paused && ret == DMA_IN_PROGRESS) | 1070 | dma_set_residue(txstate, dwc_get_residue(dwc, cookie)); |
1071 | |||
1072 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS) | ||
1102 | return DMA_PAUSED; | 1073 | return DMA_PAUSED; |
1103 | 1074 | ||
1104 | return ret; | 1075 | return ret; |
@@ -1119,7 +1090,7 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
1119 | 1090 | ||
1120 | static void dw_dma_off(struct dw_dma *dw) | 1091 | static void dw_dma_off(struct dw_dma *dw) |
1121 | { | 1092 | { |
1122 | int i; | 1093 | unsigned int i; |
1123 | 1094 | ||
1124 | dma_writel(dw, CFG, 0); | 1095 | dma_writel(dw, CFG, 0); |
1125 | 1096 | ||
@@ -1133,7 +1104,7 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1133 | cpu_relax(); | 1104 | cpu_relax(); |
1134 | 1105 | ||
1135 | for (i = 0; i < dw->dma.chancnt; i++) | 1106 | for (i = 0; i < dw->dma.chancnt; i++) |
1136 | dw->chan[i].initialized = false; | 1107 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); |
1137 | } | 1108 | } |
1138 | 1109 | ||
1139 | static void dw_dma_on(struct dw_dma *dw) | 1110 | static void dw_dma_on(struct dw_dma *dw) |
@@ -1145,9 +1116,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1145 | { | 1116 | { |
1146 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1117 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1147 | struct dw_dma *dw = to_dw_dma(chan->device); | 1118 | struct dw_dma *dw = to_dw_dma(chan->device); |
1148 | struct dw_desc *desc; | ||
1149 | int i; | ||
1150 | unsigned long flags; | ||
1151 | 1119 | ||
1152 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 1120 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1153 | 1121 | ||
@@ -1165,53 +1133,26 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1165 | * doesn't mean what you think it means), and status writeback. | 1133 | * doesn't mean what you think it means), and status writeback. |
1166 | */ | 1134 | */ |
1167 | 1135 | ||
1136 | /* | ||
1137 | * We need controller-specific data to set up slave transfers. | ||
1138 | */ | ||
1139 | if (chan->private && !dw_dma_filter(chan, chan->private)) { | ||
1140 | dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); | ||
1141 | return -EINVAL; | ||
1142 | } | ||
1143 | |||
1168 | /* Enable controller here if needed */ | 1144 | /* Enable controller here if needed */ |
1169 | if (!dw->in_use) | 1145 | if (!dw->in_use) |
1170 | dw_dma_on(dw); | 1146 | dw_dma_on(dw); |
1171 | dw->in_use |= dwc->mask; | 1147 | dw->in_use |= dwc->mask; |
1172 | 1148 | ||
1173 | spin_lock_irqsave(&dwc->lock, flags); | 1149 | return 0; |
1174 | i = dwc->descs_allocated; | ||
1175 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | ||
1176 | dma_addr_t phys; | ||
1177 | |||
1178 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1179 | |||
1180 | desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); | ||
1181 | if (!desc) | ||
1182 | goto err_desc_alloc; | ||
1183 | |||
1184 | memset(desc, 0, sizeof(struct dw_desc)); | ||
1185 | |||
1186 | INIT_LIST_HEAD(&desc->tx_list); | ||
1187 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
1188 | desc->txd.tx_submit = dwc_tx_submit; | ||
1189 | desc->txd.flags = DMA_CTRL_ACK; | ||
1190 | desc->txd.phys = phys; | ||
1191 | |||
1192 | dwc_desc_put(dwc, desc); | ||
1193 | |||
1194 | spin_lock_irqsave(&dwc->lock, flags); | ||
1195 | i = ++dwc->descs_allocated; | ||
1196 | } | ||
1197 | |||
1198 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1199 | |||
1200 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | ||
1201 | |||
1202 | return i; | ||
1203 | |||
1204 | err_desc_alloc: | ||
1205 | dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); | ||
1206 | |||
1207 | return i; | ||
1208 | } | 1150 | } |
1209 | 1151 | ||
1210 | static void dwc_free_chan_resources(struct dma_chan *chan) | 1152 | static void dwc_free_chan_resources(struct dma_chan *chan) |
1211 | { | 1153 | { |
1212 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1154 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1213 | struct dw_dma *dw = to_dw_dma(chan->device); | 1155 | struct dw_dma *dw = to_dw_dma(chan->device); |
1214 | struct dw_desc *desc, *_desc; | ||
1215 | unsigned long flags; | 1156 | unsigned long flags; |
1216 | LIST_HEAD(list); | 1157 | LIST_HEAD(list); |
1217 | 1158 | ||
@@ -1224,9 +1165,15 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1224 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | 1165 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); |
1225 | 1166 | ||
1226 | spin_lock_irqsave(&dwc->lock, flags); | 1167 | spin_lock_irqsave(&dwc->lock, flags); |
1227 | list_splice_init(&dwc->free_list, &list); | 1168 | |
1228 | dwc->descs_allocated = 0; | 1169 | /* Clear custom channel configuration */ |
1229 | dwc->initialized = false; | 1170 | dwc->src_id = 0; |
1171 | dwc->dst_id = 0; | ||
1172 | |||
1173 | dwc->m_master = 0; | ||
1174 | dwc->p_master = 0; | ||
1175 | |||
1176 | clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); | ||
1230 | 1177 | ||
1231 | /* Disable interrupts */ | 1178 | /* Disable interrupts */ |
1232 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1179 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
@@ -1240,11 +1187,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1240 | if (!dw->in_use) | 1187 | if (!dw->in_use) |
1241 | dw_dma_off(dw); | 1188 | dw_dma_off(dw); |
1242 | 1189 | ||
1243 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | ||
1244 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | ||
1245 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); | ||
1246 | } | ||
1247 | |||
1248 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1190 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1249 | } | 1191 | } |
1250 | 1192 | ||
@@ -1322,6 +1264,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1322 | struct dw_cyclic_desc *retval = NULL; | 1264 | struct dw_cyclic_desc *retval = NULL; |
1323 | struct dw_desc *desc; | 1265 | struct dw_desc *desc; |
1324 | struct dw_desc *last = NULL; | 1266 | struct dw_desc *last = NULL; |
1267 | u8 lms = DWC_LLP_LMS(dwc->m_master); | ||
1325 | unsigned long was_cyclic; | 1268 | unsigned long was_cyclic; |
1326 | unsigned int reg_width; | 1269 | unsigned int reg_width; |
1327 | unsigned int periods; | 1270 | unsigned int periods; |
@@ -1375,9 +1318,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1375 | 1318 | ||
1376 | retval = ERR_PTR(-ENOMEM); | 1319 | retval = ERR_PTR(-ENOMEM); |
1377 | 1320 | ||
1378 | if (periods > NR_DESCS_PER_CHANNEL) | ||
1379 | goto out_err; | ||
1380 | |||
1381 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | 1321 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); |
1382 | if (!cdesc) | 1322 | if (!cdesc) |
1383 | goto out_err; | 1323 | goto out_err; |
@@ -1393,50 +1333,50 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1393 | 1333 | ||
1394 | switch (direction) { | 1334 | switch (direction) { |
1395 | case DMA_MEM_TO_DEV: | 1335 | case DMA_MEM_TO_DEV: |
1396 | desc->lli.dar = sconfig->dst_addr; | 1336 | lli_write(desc, dar, sconfig->dst_addr); |
1397 | desc->lli.sar = buf_addr + (period_len * i); | 1337 | lli_write(desc, sar, buf_addr + period_len * i); |
1398 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | 1338 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) |
1399 | | DWC_CTLL_DST_WIDTH(reg_width) | 1339 | | DWC_CTLL_DST_WIDTH(reg_width) |
1400 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1340 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1401 | | DWC_CTLL_DST_FIX | 1341 | | DWC_CTLL_DST_FIX |
1402 | | DWC_CTLL_SRC_INC | 1342 | | DWC_CTLL_SRC_INC |
1403 | | DWC_CTLL_INT_EN); | 1343 | | DWC_CTLL_INT_EN)); |
1404 | 1344 | ||
1405 | desc->lli.ctllo |= sconfig->device_fc ? | 1345 | lli_set(desc, ctllo, sconfig->device_fc ? |
1406 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 1346 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
1407 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 1347 | DWC_CTLL_FC(DW_DMA_FC_D_M2P)); |
1408 | 1348 | ||
1409 | break; | 1349 | break; |
1410 | case DMA_DEV_TO_MEM: | 1350 | case DMA_DEV_TO_MEM: |
1411 | desc->lli.dar = buf_addr + (period_len * i); | 1351 | lli_write(desc, dar, buf_addr + period_len * i); |
1412 | desc->lli.sar = sconfig->src_addr; | 1352 | lli_write(desc, sar, sconfig->src_addr); |
1413 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | 1353 | lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) |
1414 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1354 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1415 | | DWC_CTLL_DST_WIDTH(reg_width) | 1355 | | DWC_CTLL_DST_WIDTH(reg_width) |
1416 | | DWC_CTLL_DST_INC | 1356 | | DWC_CTLL_DST_INC |
1417 | | DWC_CTLL_SRC_FIX | 1357 | | DWC_CTLL_SRC_FIX |
1418 | | DWC_CTLL_INT_EN); | 1358 | | DWC_CTLL_INT_EN)); |
1419 | 1359 | ||
1420 | desc->lli.ctllo |= sconfig->device_fc ? | 1360 | lli_set(desc, ctllo, sconfig->device_fc ? |
1421 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 1361 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
1422 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 1362 | DWC_CTLL_FC(DW_DMA_FC_D_P2M)); |
1423 | 1363 | ||
1424 | break; | 1364 | break; |
1425 | default: | 1365 | default: |
1426 | break; | 1366 | break; |
1427 | } | 1367 | } |
1428 | 1368 | ||
1429 | desc->lli.ctlhi = (period_len >> reg_width); | 1369 | lli_write(desc, ctlhi, period_len >> reg_width); |
1430 | cdesc->desc[i] = desc; | 1370 | cdesc->desc[i] = desc; |
1431 | 1371 | ||
1432 | if (last) | 1372 | if (last) |
1433 | last->lli.llp = desc->txd.phys; | 1373 | lli_write(last, llp, desc->txd.phys | lms); |
1434 | 1374 | ||
1435 | last = desc; | 1375 | last = desc; |
1436 | } | 1376 | } |
1437 | 1377 | ||
1438 | /* Let's make a cyclic list */ | 1378 | /* Let's make a cyclic list */ |
1439 | last->lli.llp = cdesc->desc[0]->txd.phys; | 1379 | lli_write(last, llp, cdesc->desc[0]->txd.phys | lms); |
1440 | 1380 | ||
1441 | dev_dbg(chan2dev(&dwc->chan), | 1381 | dev_dbg(chan2dev(&dwc->chan), |
1442 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", | 1382 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", |
@@ -1467,7 +1407,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1467 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1407 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1468 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1408 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1469 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | 1409 | struct dw_cyclic_desc *cdesc = dwc->cdesc; |
1470 | int i; | 1410 | unsigned int i; |
1471 | unsigned long flags; | 1411 | unsigned long flags; |
1472 | 1412 | ||
1473 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); | 1413 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
@@ -1491,32 +1431,38 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1491 | kfree(cdesc->desc); | 1431 | kfree(cdesc->desc); |
1492 | kfree(cdesc); | 1432 | kfree(cdesc); |
1493 | 1433 | ||
1434 | dwc->cdesc = NULL; | ||
1435 | |||
1494 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 1436 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1495 | } | 1437 | } |
1496 | EXPORT_SYMBOL(dw_dma_cyclic_free); | 1438 | EXPORT_SYMBOL(dw_dma_cyclic_free); |
1497 | 1439 | ||
1498 | /*----------------------------------------------------------------------*/ | 1440 | /*----------------------------------------------------------------------*/ |
1499 | 1441 | ||
1500 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | 1442 | int dw_dma_probe(struct dw_dma_chip *chip) |
1501 | { | 1443 | { |
1444 | struct dw_dma_platform_data *pdata; | ||
1502 | struct dw_dma *dw; | 1445 | struct dw_dma *dw; |
1503 | bool autocfg = false; | 1446 | bool autocfg = false; |
1504 | unsigned int dw_params; | 1447 | unsigned int dw_params; |
1505 | unsigned int max_blk_size = 0; | 1448 | unsigned int i; |
1506 | int err; | 1449 | int err; |
1507 | int i; | ||
1508 | 1450 | ||
1509 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | 1451 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); |
1510 | if (!dw) | 1452 | if (!dw) |
1511 | return -ENOMEM; | 1453 | return -ENOMEM; |
1512 | 1454 | ||
1455 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); | ||
1456 | if (!dw->pdata) | ||
1457 | return -ENOMEM; | ||
1458 | |||
1513 | dw->regs = chip->regs; | 1459 | dw->regs = chip->regs; |
1514 | chip->dw = dw; | 1460 | chip->dw = dw; |
1515 | 1461 | ||
1516 | pm_runtime_get_sync(chip->dev); | 1462 | pm_runtime_get_sync(chip->dev); |
1517 | 1463 | ||
1518 | if (!pdata) { | 1464 | if (!chip->pdata) { |
1519 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | 1465 | dw_params = dma_readl(dw, DW_PARAMS); |
1520 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); | 1466 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); |
1521 | 1467 | ||
1522 | autocfg = dw_params >> DW_PARAMS_EN & 1; | 1468 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
@@ -1525,29 +1471,31 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1525 | goto err_pdata; | 1471 | goto err_pdata; |
1526 | } | 1472 | } |
1527 | 1473 | ||
1528 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); | 1474 | /* Reassign the platform data pointer */ |
1529 | if (!pdata) { | 1475 | pdata = dw->pdata; |
1530 | err = -ENOMEM; | ||
1531 | goto err_pdata; | ||
1532 | } | ||
1533 | 1476 | ||
1534 | /* Get hardware configuration parameters */ | 1477 | /* Get hardware configuration parameters */ |
1535 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | 1478 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; |
1536 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | 1479 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; |
1537 | for (i = 0; i < pdata->nr_masters; i++) { | 1480 | for (i = 0; i < pdata->nr_masters; i++) { |
1538 | pdata->data_width[i] = | 1481 | pdata->data_width[i] = |
1539 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; | 1482 | 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); |
1540 | } | 1483 | } |
1541 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | 1484 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); |
1542 | 1485 | ||
1543 | /* Fill platform data with the default values */ | 1486 | /* Fill platform data with the default values */ |
1544 | pdata->is_private = true; | 1487 | pdata->is_private = true; |
1545 | pdata->is_memcpy = true; | 1488 | pdata->is_memcpy = true; |
1546 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | 1489 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1547 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | 1490 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; |
1548 | } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { | 1491 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
1549 | err = -EINVAL; | 1492 | err = -EINVAL; |
1550 | goto err_pdata; | 1493 | goto err_pdata; |
1494 | } else { | ||
1495 | memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); | ||
1496 | |||
1497 | /* Reassign the platform data pointer */ | ||
1498 | pdata = dw->pdata; | ||
1551 | } | 1499 | } |
1552 | 1500 | ||
1553 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), | 1501 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), |
@@ -1557,11 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1557 | goto err_pdata; | 1505 | goto err_pdata; |
1558 | } | 1506 | } |
1559 | 1507 | ||
1560 | /* Get hardware configuration parameters */ | ||
1561 | dw->nr_masters = pdata->nr_masters; | ||
1562 | for (i = 0; i < dw->nr_masters; i++) | ||
1563 | dw->data_width[i] = pdata->data_width[i]; | ||
1564 | |||
1565 | /* Calculate all channel mask before DMA setup */ | 1508 | /* Calculate all channel mask before DMA setup */ |
1566 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1509 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1567 | 1510 | ||
@@ -1608,7 +1551,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1608 | 1551 | ||
1609 | INIT_LIST_HEAD(&dwc->active_list); | 1552 | INIT_LIST_HEAD(&dwc->active_list); |
1610 | INIT_LIST_HEAD(&dwc->queue); | 1553 | INIT_LIST_HEAD(&dwc->queue); |
1611 | INIT_LIST_HEAD(&dwc->free_list); | ||
1612 | 1554 | ||
1613 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1555 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1614 | 1556 | ||
@@ -1616,11 +1558,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1616 | 1558 | ||
1617 | /* Hardware configuration */ | 1559 | /* Hardware configuration */ |
1618 | if (autocfg) { | 1560 | if (autocfg) { |
1619 | unsigned int dwc_params; | ||
1620 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; | 1561 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; |
1621 | void __iomem *addr = chip->regs + r * sizeof(u32); | 1562 | void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; |
1622 | 1563 | unsigned int dwc_params = dma_readl_native(addr); | |
1623 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); | ||
1624 | 1564 | ||
1625 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, | 1565 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, |
1626 | dwc_params); | 1566 | dwc_params); |
@@ -1631,16 +1571,15 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1631 | * up to 0x0a for 4095. | 1571 | * up to 0x0a for 4095. |
1632 | */ | 1572 | */ |
1633 | dwc->block_size = | 1573 | dwc->block_size = |
1634 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | 1574 | (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; |
1635 | dwc->nollp = | 1575 | dwc->nollp = |
1636 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | 1576 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; |
1637 | } else { | 1577 | } else { |
1638 | dwc->block_size = pdata->block_size; | 1578 | dwc->block_size = pdata->block_size; |
1639 | 1579 | ||
1640 | /* Check if channel supports multi block transfer */ | 1580 | /* Check if channel supports multi block transfer */ |
1641 | channel_writel(dwc, LLP, 0xfffffffc); | 1581 | channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff)); |
1642 | dwc->nollp = | 1582 | dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0; |
1643 | (channel_readl(dwc, LLP) & 0xfffffffc) == 0; | ||
1644 | channel_writel(dwc, LLP, 0); | 1583 | channel_writel(dwc, LLP, 0); |
1645 | } | 1584 | } |
1646 | } | 1585 | } |
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 358f9689a3f5..0ae6c3b1d34e 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -17,8 +17,8 @@ | |||
17 | 17 | ||
18 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | 18 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) |
19 | { | 19 | { |
20 | const struct dw_dma_platform_data *pdata = (void *)pid->driver_data; | ||
20 | struct dw_dma_chip *chip; | 21 | struct dw_dma_chip *chip; |
21 | struct dw_dma_platform_data *pdata = (void *)pid->driver_data; | ||
22 | int ret; | 22 | int ret; |
23 | 23 | ||
24 | ret = pcim_enable_device(pdev); | 24 | ret = pcim_enable_device(pdev); |
@@ -49,8 +49,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
49 | chip->dev = &pdev->dev; | 49 | chip->dev = &pdev->dev; |
50 | chip->regs = pcim_iomap_table(pdev)[0]; | 50 | chip->regs = pcim_iomap_table(pdev)[0]; |
51 | chip->irq = pdev->irq; | 51 | chip->irq = pdev->irq; |
52 | chip->pdata = pdata; | ||
52 | 53 | ||
53 | ret = dw_dma_probe(chip, pdata); | 54 | ret = dw_dma_probe(chip); |
54 | if (ret) | 55 | if (ret) |
55 | return ret; | 56 | return ret; |
56 | 57 | ||
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 26edbe3a27ac..5bda0eb9f393 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -42,13 +42,13 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | |||
42 | 42 | ||
43 | slave.src_id = dma_spec->args[0]; | 43 | slave.src_id = dma_spec->args[0]; |
44 | slave.dst_id = dma_spec->args[0]; | 44 | slave.dst_id = dma_spec->args[0]; |
45 | slave.src_master = dma_spec->args[1]; | 45 | slave.m_master = dma_spec->args[1]; |
46 | slave.dst_master = dma_spec->args[2]; | 46 | slave.p_master = dma_spec->args[2]; |
47 | 47 | ||
48 | if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || | 48 | if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || |
49 | slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || | 49 | slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || |
50 | slave.src_master >= dw->nr_masters || | 50 | slave.m_master >= dw->pdata->nr_masters || |
51 | slave.dst_master >= dw->nr_masters)) | 51 | slave.p_master >= dw->pdata->nr_masters)) |
52 | return NULL; | 52 | return NULL; |
53 | 53 | ||
54 | dma_cap_zero(cap); | 54 | dma_cap_zero(cap); |
@@ -66,8 +66,8 @@ static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) | |||
66 | .dma_dev = dma_spec->dev, | 66 | .dma_dev = dma_spec->dev, |
67 | .src_id = dma_spec->slave_id, | 67 | .src_id = dma_spec->slave_id, |
68 | .dst_id = dma_spec->slave_id, | 68 | .dst_id = dma_spec->slave_id, |
69 | .src_master = 1, | 69 | .m_master = 0, |
70 | .dst_master = 0, | 70 | .p_master = 1, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | return dw_dma_filter(chan, &slave); | 73 | return dw_dma_filter(chan, &slave); |
@@ -103,6 +103,7 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
103 | struct device_node *np = pdev->dev.of_node; | 103 | struct device_node *np = pdev->dev.of_node; |
104 | struct dw_dma_platform_data *pdata; | 104 | struct dw_dma_platform_data *pdata; |
105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; | 105 | u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; |
106 | u32 nr_masters; | ||
106 | u32 nr_channels; | 107 | u32 nr_channels; |
107 | 108 | ||
108 | if (!np) { | 109 | if (!np) { |
@@ -110,6 +111,11 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
110 | return NULL; | 111 | return NULL; |
111 | } | 112 | } |
112 | 113 | ||
114 | if (of_property_read_u32(np, "dma-masters", &nr_masters)) | ||
115 | return NULL; | ||
116 | if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS) | ||
117 | return NULL; | ||
118 | |||
113 | if (of_property_read_u32(np, "dma-channels", &nr_channels)) | 119 | if (of_property_read_u32(np, "dma-channels", &nr_channels)) |
114 | return NULL; | 120 | return NULL; |
115 | 121 | ||
@@ -117,6 +123,7 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
117 | if (!pdata) | 123 | if (!pdata) |
118 | return NULL; | 124 | return NULL; |
119 | 125 | ||
126 | pdata->nr_masters = nr_masters; | ||
120 | pdata->nr_channels = nr_channels; | 127 | pdata->nr_channels = nr_channels; |
121 | 128 | ||
122 | if (of_property_read_bool(np, "is_private")) | 129 | if (of_property_read_bool(np, "is_private")) |
@@ -131,17 +138,13 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
131 | if (!of_property_read_u32(np, "block_size", &tmp)) | 138 | if (!of_property_read_u32(np, "block_size", &tmp)) |
132 | pdata->block_size = tmp; | 139 | pdata->block_size = tmp; |
133 | 140 | ||
134 | if (!of_property_read_u32(np, "dma-masters", &tmp)) { | 141 | if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) { |
135 | if (tmp > DW_DMA_MAX_NR_MASTERS) | 142 | for (tmp = 0; tmp < nr_masters; tmp++) |
136 | return NULL; | ||
137 | |||
138 | pdata->nr_masters = tmp; | ||
139 | } | ||
140 | |||
141 | if (!of_property_read_u32_array(np, "data_width", arr, | ||
142 | pdata->nr_masters)) | ||
143 | for (tmp = 0; tmp < pdata->nr_masters; tmp++) | ||
144 | pdata->data_width[tmp] = arr[tmp]; | 143 | pdata->data_width[tmp] = arr[tmp]; |
144 | } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) { | ||
145 | for (tmp = 0; tmp < nr_masters; tmp++) | ||
146 | pdata->data_width[tmp] = BIT(arr[tmp] & 0x07); | ||
147 | } | ||
145 | 148 | ||
146 | return pdata; | 149 | return pdata; |
147 | } | 150 | } |
@@ -158,7 +161,7 @@ static int dw_probe(struct platform_device *pdev) | |||
158 | struct dw_dma_chip *chip; | 161 | struct dw_dma_chip *chip; |
159 | struct device *dev = &pdev->dev; | 162 | struct device *dev = &pdev->dev; |
160 | struct resource *mem; | 163 | struct resource *mem; |
161 | struct dw_dma_platform_data *pdata; | 164 | const struct dw_dma_platform_data *pdata; |
162 | int err; | 165 | int err; |
163 | 166 | ||
164 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); | 167 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); |
@@ -183,6 +186,7 @@ static int dw_probe(struct platform_device *pdev) | |||
183 | pdata = dw_dma_parse_dt(pdev); | 186 | pdata = dw_dma_parse_dt(pdev); |
184 | 187 | ||
185 | chip->dev = dev; | 188 | chip->dev = dev; |
189 | chip->pdata = pdata; | ||
186 | 190 | ||
187 | chip->clk = devm_clk_get(chip->dev, "hclk"); | 191 | chip->clk = devm_clk_get(chip->dev, "hclk"); |
188 | if (IS_ERR(chip->clk)) | 192 | if (IS_ERR(chip->clk)) |
@@ -193,7 +197,7 @@ static int dw_probe(struct platform_device *pdev) | |||
193 | 197 | ||
194 | pm_runtime_enable(&pdev->dev); | 198 | pm_runtime_enable(&pdev->dev); |
195 | 199 | ||
196 | err = dw_dma_probe(chip, pdata); | 200 | err = dw_dma_probe(chip); |
197 | if (err) | 201 | if (err) |
198 | goto err_dw_dma_probe; | 202 | goto err_dw_dma_probe; |
199 | 203 | ||
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 0a50c18d85b8..4b7bd7834046 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -114,10 +114,6 @@ struct dw_dma_regs { | |||
114 | #define dma_writel_native writel | 114 | #define dma_writel_native writel |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | /* To access the registers in early stage of probe */ | ||
118 | #define dma_read_byaddr(addr, name) \ | ||
119 | dma_readl_native((addr) + offsetof(struct dw_dma_regs, name)) | ||
120 | |||
121 | /* Bitfields in DW_PARAMS */ | 117 | /* Bitfields in DW_PARAMS */ |
122 | #define DW_PARAMS_NR_CHAN 8 /* number of channels */ | 118 | #define DW_PARAMS_NR_CHAN 8 /* number of channels */ |
123 | #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ | 119 | #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ |
@@ -143,6 +139,10 @@ enum dw_dma_msize { | |||
143 | DW_DMA_MSIZE_256, | 139 | DW_DMA_MSIZE_256, |
144 | }; | 140 | }; |
145 | 141 | ||
142 | /* Bitfields in LLP */ | ||
143 | #define DWC_LLP_LMS(x) ((x) & 3) /* list master select */ | ||
144 | #define DWC_LLP_LOC(x) ((x) & ~3) /* next lli */ | ||
145 | |||
146 | /* Bitfields in CTL_LO */ | 146 | /* Bitfields in CTL_LO */ |
147 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | 147 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ |
148 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | 148 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ |
@@ -216,6 +216,8 @@ enum dw_dma_msize { | |||
216 | enum dw_dmac_flags { | 216 | enum dw_dmac_flags { |
217 | DW_DMA_IS_CYCLIC = 0, | 217 | DW_DMA_IS_CYCLIC = 0, |
218 | DW_DMA_IS_SOFT_LLP = 1, | 218 | DW_DMA_IS_SOFT_LLP = 1, |
219 | DW_DMA_IS_PAUSED = 2, | ||
220 | DW_DMA_IS_INITIALIZED = 3, | ||
219 | }; | 221 | }; |
220 | 222 | ||
221 | struct dw_dma_chan { | 223 | struct dw_dma_chan { |
@@ -224,8 +226,6 @@ struct dw_dma_chan { | |||
224 | u8 mask; | 226 | u8 mask; |
225 | u8 priority; | 227 | u8 priority; |
226 | enum dma_transfer_direction direction; | 228 | enum dma_transfer_direction direction; |
227 | bool paused; | ||
228 | bool initialized; | ||
229 | 229 | ||
230 | /* software emulation of the LLP transfers */ | 230 | /* software emulation of the LLP transfers */ |
231 | struct list_head *tx_node_active; | 231 | struct list_head *tx_node_active; |
@@ -236,8 +236,6 @@ struct dw_dma_chan { | |||
236 | unsigned long flags; | 236 | unsigned long flags; |
237 | struct list_head active_list; | 237 | struct list_head active_list; |
238 | struct list_head queue; | 238 | struct list_head queue; |
239 | struct list_head free_list; | ||
240 | u32 residue; | ||
241 | struct dw_cyclic_desc *cdesc; | 239 | struct dw_cyclic_desc *cdesc; |
242 | 240 | ||
243 | unsigned int descs_allocated; | 241 | unsigned int descs_allocated; |
@@ -249,8 +247,8 @@ struct dw_dma_chan { | |||
249 | /* custom slave configuration */ | 247 | /* custom slave configuration */ |
250 | u8 src_id; | 248 | u8 src_id; |
251 | u8 dst_id; | 249 | u8 dst_id; |
252 | u8 src_master; | 250 | u8 m_master; |
253 | u8 dst_master; | 251 | u8 p_master; |
254 | 252 | ||
255 | /* configuration passed via .device_config */ | 253 | /* configuration passed via .device_config */ |
256 | struct dma_slave_config dma_sconfig; | 254 | struct dma_slave_config dma_sconfig; |
@@ -283,9 +281,8 @@ struct dw_dma { | |||
283 | u8 all_chan_mask; | 281 | u8 all_chan_mask; |
284 | u8 in_use; | 282 | u8 in_use; |
285 | 283 | ||
286 | /* hardware configuration */ | 284 | /* platform data */ |
287 | unsigned char nr_masters; | 285 | struct dw_dma_platform_data *pdata; |
288 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; | ||
289 | }; | 286 | }; |
290 | 287 | ||
291 | static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) | 288 | static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) |
@@ -308,32 +305,51 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) | |||
308 | return container_of(ddev, struct dw_dma, dma); | 305 | return container_of(ddev, struct dw_dma, dma); |
309 | } | 306 | } |
310 | 307 | ||
308 | #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO | ||
309 | typedef __be32 __dw32; | ||
310 | #else | ||
311 | typedef __le32 __dw32; | ||
312 | #endif | ||
313 | |||
311 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ | 314 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ |
312 | struct dw_lli { | 315 | struct dw_lli { |
313 | /* values that are not changed by hardware */ | 316 | /* values that are not changed by hardware */ |
314 | u32 sar; | 317 | __dw32 sar; |
315 | u32 dar; | 318 | __dw32 dar; |
316 | u32 llp; /* chain to next lli */ | 319 | __dw32 llp; /* chain to next lli */ |
317 | u32 ctllo; | 320 | __dw32 ctllo; |
318 | /* values that may get written back: */ | 321 | /* values that may get written back: */ |
319 | u32 ctlhi; | 322 | __dw32 ctlhi; |
320 | /* sstat and dstat can snapshot peripheral register state. | 323 | /* sstat and dstat can snapshot peripheral register state. |
321 | * silicon config may discard either or both... | 324 | * silicon config may discard either or both... |
322 | */ | 325 | */ |
323 | u32 sstat; | 326 | __dw32 sstat; |
324 | u32 dstat; | 327 | __dw32 dstat; |
325 | }; | 328 | }; |
326 | 329 | ||
327 | struct dw_desc { | 330 | struct dw_desc { |
328 | /* FIRST values the hardware uses */ | 331 | /* FIRST values the hardware uses */ |
329 | struct dw_lli lli; | 332 | struct dw_lli lli; |
330 | 333 | ||
334 | #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO | ||
335 | #define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_be32(v)) | ||
336 | #define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_be32(v)) | ||
337 | #define lli_read(d, reg) be32_to_cpu((d)->lli.reg) | ||
338 | #define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_be32(v)) | ||
339 | #else | ||
340 | #define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v)) | ||
341 | #define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v)) | ||
342 | #define lli_read(d, reg) le32_to_cpu((d)->lli.reg) | ||
343 | #define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v)) | ||
344 | #endif | ||
345 | |||
331 | /* THEN values for driver housekeeping */ | 346 | /* THEN values for driver housekeeping */ |
332 | struct list_head desc_node; | 347 | struct list_head desc_node; |
333 | struct list_head tx_list; | 348 | struct list_head tx_list; |
334 | struct dma_async_tx_descriptor txd; | 349 | struct dma_async_tx_descriptor txd; |
335 | size_t len; | 350 | size_t len; |
336 | size_t total_len; | 351 | size_t total_len; |
352 | u32 residue; | ||
337 | }; | 353 | }; |
338 | 354 | ||
339 | #define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) | 355 | #define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node) |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index ee3463e774f8..694c44e487ed 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -1518,8 +1518,17 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data) | |||
1518 | 1518 | ||
1519 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); | 1519 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); |
1520 | 1520 | ||
1521 | if (!edma_error_pending(ecc)) | 1521 | if (!edma_error_pending(ecc)) { |
1522 | /* | ||
1523 | * The registers indicate no pending error event but the irq | ||
1524 | * handler has been called. | ||
1525 | * Ask eDMA to re-evaluate the error registers. | ||
1526 | */ | ||
1527 | dev_err(ecc->dev, "%s: Error interrupt without error event!\n", | ||
1528 | __func__); | ||
1529 | edma_write(ecc, EDMA_EEVAL, 1); | ||
1522 | return IRQ_NONE; | 1530 | return IRQ_NONE; |
1531 | } | ||
1523 | 1532 | ||
1524 | while (1) { | 1533 | while (1) { |
1525 | /* Event missed register(s) */ | 1534 | /* Event missed register(s) */ |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index aac85c30c2cf..a8828ed639b3 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -462,13 +462,12 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) | |||
462 | struct fsl_desc_sw *desc; | 462 | struct fsl_desc_sw *desc; |
463 | dma_addr_t pdesc; | 463 | dma_addr_t pdesc; |
464 | 464 | ||
465 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 465 | desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
466 | if (!desc) { | 466 | if (!desc) { |
467 | chan_dbg(chan, "out of memory for link descriptor\n"); | 467 | chan_dbg(chan, "out of memory for link descriptor\n"); |
468 | return NULL; | 468 | return NULL; |
469 | } | 469 | } |
470 | 470 | ||
471 | memset(desc, 0, sizeof(*desc)); | ||
472 | INIT_LIST_HEAD(&desc->tx_list); | 471 | INIT_LIST_HEAD(&desc->tx_list); |
473 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | 472 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); |
474 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | 473 | desc->async_tx.tx_submit = fsl_dma_tx_submit; |
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index eef145edb936..59d1e7c6fd0f 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c | |||
@@ -77,8 +77,8 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) | |||
77 | hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); | 77 | hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); |
78 | 78 | ||
79 | /* Set descriptors */ | 79 | /* Set descriptors */ |
80 | count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC; | 80 | count = desc->nents - desc->active; |
81 | for (i = 0; i < count; i++) { | 81 | for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) { |
82 | hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); | 82 | hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); |
83 | hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); | 83 | hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); |
84 | 84 | ||
@@ -160,7 +160,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) | |||
160 | return IRQ_NONE; | 160 | return IRQ_NONE; |
161 | 161 | ||
162 | /* Timeout IRQ, need wait some time, see Errata 2 */ | 162 | /* Timeout IRQ, need wait some time, see Errata 2 */ |
163 | if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY)) | 163 | if (sr & HSU_CH_SR_DESCTO_ANY) |
164 | udelay(2); | 164 | udelay(2); |
165 | 165 | ||
166 | sr &= ~HSU_CH_SR_DESCTO_ANY; | 166 | sr &= ~HSU_CH_SR_DESCTO_ANY; |
@@ -417,6 +417,8 @@ int hsu_dma_probe(struct hsu_dma_chip *chip) | |||
417 | 417 | ||
418 | hsu->dma.dev = chip->dev; | 418 | hsu->dma.dev = chip->dev; |
419 | 419 | ||
420 | dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK); | ||
421 | |||
420 | ret = dma_async_device_register(&hsu->dma); | 422 | ret = dma_async_device_register(&hsu->dma); |
421 | if (ret) | 423 | if (ret) |
422 | return ret; | 424 | return ret; |
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h index 578a8ee8cd05..50a9d1bda253 100644 --- a/drivers/dma/hsu/hsu.h +++ b/drivers/dma/hsu/hsu.h | |||
@@ -55,6 +55,10 @@ | |||
55 | #define HSU_CH_DCR_CHEI BIT(23) | 55 | #define HSU_CH_DCR_CHEI BIT(23) |
56 | #define HSU_CH_DCR_CHTOI(x) BIT(24 + (x)) | 56 | #define HSU_CH_DCR_CHTOI(x) BIT(24 + (x)) |
57 | 57 | ||
58 | /* Bits in HSU_CH_DxTSR */ | ||
59 | #define HSU_CH_DxTSR_MASK GENMASK(15, 0) | ||
60 | #define HSU_CH_DxTSR_TSR(x) ((x) & HSU_CH_DxTSR_MASK) | ||
61 | |||
58 | struct hsu_dma_sg { | 62 | struct hsu_dma_sg { |
59 | dma_addr_t addr; | 63 | dma_addr_t addr; |
60 | unsigned int len; | 64 | unsigned int len; |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index efdee1a69fc4..d406056e8892 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -690,12 +690,11 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | |||
690 | /* allocate a completion writeback area */ | 690 | /* allocate a completion writeback area */ |
691 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | 691 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
692 | ioat_chan->completion = | 692 | ioat_chan->completion = |
693 | dma_pool_alloc(ioat_chan->ioat_dma->completion_pool, | 693 | dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, |
694 | GFP_KERNEL, &ioat_chan->completion_dma); | 694 | GFP_KERNEL, &ioat_chan->completion_dma); |
695 | if (!ioat_chan->completion) | 695 | if (!ioat_chan->completion) |
696 | return -ENOMEM; | 696 | return -ENOMEM; |
697 | 697 | ||
698 | memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); | ||
699 | writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, | 698 | writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, |
700 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | 699 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
701 | writel(((u64)ioat_chan->completion_dma) >> 32, | 700 | writel(((u64)ioat_chan->completion_dma) >> 32, |
@@ -1074,6 +1073,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1074 | struct ioatdma_chan *ioat_chan; | 1073 | struct ioatdma_chan *ioat_chan; |
1075 | bool is_raid_device = false; | 1074 | bool is_raid_device = false; |
1076 | int err; | 1075 | int err; |
1076 | u16 val16; | ||
1077 | 1077 | ||
1078 | dma = &ioat_dma->dma_dev; | 1078 | dma = &ioat_dma->dma_dev; |
1079 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; | 1079 | dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; |
@@ -1173,6 +1173,17 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1173 | if (dca) | 1173 | if (dca) |
1174 | ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); | 1174 | ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); |
1175 | 1175 | ||
1176 | /* disable relaxed ordering */ | ||
1177 | err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16); | ||
1178 | if (err) | ||
1179 | return err; | ||
1180 | |||
1181 | /* clear relaxed ordering enable */ | ||
1182 | val16 &= ~IOAT_DEVCTRL_ROE; | ||
1183 | err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16); | ||
1184 | if (err) | ||
1185 | return err; | ||
1186 | |||
1176 | return 0; | 1187 | return 0; |
1177 | } | 1188 | } |
1178 | 1189 | ||
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 4994a3623aee..70534981a49b 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -26,6 +26,13 @@ | |||
26 | #define IOAT_PCI_CHANERR_INT_OFFSET 0x180 | 26 | #define IOAT_PCI_CHANERR_INT_OFFSET 0x180 |
27 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 | 27 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 |
28 | 28 | ||
29 | /* PCIe config registers */ | ||
30 | |||
31 | /* EXPCAPID + N */ | ||
32 | #define IOAT_DEVCTRL_OFFSET 0x8 | ||
33 | /* relaxed ordering enable */ | ||
34 | #define IOAT_DEVCTRL_ROE 0x10 | ||
35 | |||
29 | /* MMIO Device Registers */ | 36 | /* MMIO Device Registers */ |
30 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ | 37 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ |
31 | 38 | ||
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index e39457f13d4d..56f1fd68b620 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -364,13 +364,12 @@ mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | |||
364 | struct mmp_pdma_desc_sw *desc; | 364 | struct mmp_pdma_desc_sw *desc; |
365 | dma_addr_t pdesc; | 365 | dma_addr_t pdesc; |
366 | 366 | ||
367 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 367 | desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
368 | if (!desc) { | 368 | if (!desc) { |
369 | dev_err(chan->dev, "out of memory for link descriptor\n"); | 369 | dev_err(chan->dev, "out of memory for link descriptor\n"); |
370 | return NULL; | 370 | return NULL; |
371 | } | 371 | } |
372 | 372 | ||
373 | memset(desc, 0, sizeof(*desc)); | ||
374 | INIT_LIST_HEAD(&desc->tx_list); | 373 | INIT_LIST_HEAD(&desc->tx_list); |
375 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | 374 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); |
376 | /* each desc has submit */ | 375 | /* each desc has submit */ |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index aae76fb39adc..ccadafa51d5e 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Copyright (C) Semihalf 2009 | 3 | * Copyright (C) Semihalf 2009 |
4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | 4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 |
5 | * Copyright (C) Alexander Popov, Promcontroller 2014 | 5 | * Copyright (C) Alexander Popov, Promcontroller 2014 |
6 | * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016 | ||
6 | * | 7 | * |
7 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | 8 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description |
8 | * (defines, structures and comments) was taken from MPC5121 DMA driver | 9 | * (defines, structures and comments) was taken from MPC5121 DMA driver |
@@ -26,18 +27,19 @@ | |||
26 | */ | 27 | */ |
27 | 28 | ||
28 | /* | 29 | /* |
29 | * MPC512x and MPC8308 DMA driver. It supports | 30 | * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers |
30 | * memory to memory data transfers (tested using dmatest module) and | 31 | * (tested using dmatest module) and data transfers between memory and |
31 | * data transfers between memory and peripheral I/O memory | 32 | * peripheral I/O memory by means of slave scatter/gather with these |
32 | * by means of slave scatter/gather with these limitations: | 33 | * limitations: |
33 | * - chunked transfers (described by s/g lists with more than one item) | 34 | * - chunked transfers (described by s/g lists with more than one item) are |
34 | * are refused as long as proper support for scatter/gather is missing; | 35 | * refused as long as proper support for scatter/gather is missing |
35 | * - transfers on MPC8308 always start from software as this SoC appears | 36 | * - transfers on MPC8308 always start from software as this SoC does not have |
36 | * not to have external request lines for peripheral flow control; | 37 | * external request lines for peripheral flow control |
37 | * - only peripheral devices with 4-byte FIFO access register are supported; | 38 | * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for |
38 | * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently | 39 | * MPC512x), and 32 bytes are supported, and, consequently, source |
39 | * source and destination addresses must be 4-byte aligned | 40 | * addresses and destination addresses must be aligned accordingly; |
40 | * and transfer size must be aligned on (4 * maxburst) boundary; | 41 | * furthermore, for MPC512x SoCs, the transfer size must be aligned on |
42 | * (chunk size * maxburst) | ||
41 | */ | 43 | */ |
42 | 44 | ||
43 | #include <linux/module.h> | 45 | #include <linux/module.h> |
@@ -213,8 +215,10 @@ struct mpc_dma_chan { | |||
213 | /* Settings for access to peripheral FIFO */ | 215 | /* Settings for access to peripheral FIFO */ |
214 | dma_addr_t src_per_paddr; | 216 | dma_addr_t src_per_paddr; |
215 | u32 src_tcd_nunits; | 217 | u32 src_tcd_nunits; |
218 | u8 swidth; | ||
216 | dma_addr_t dst_per_paddr; | 219 | dma_addr_t dst_per_paddr; |
217 | u32 dst_tcd_nunits; | 220 | u32 dst_tcd_nunits; |
221 | u8 dwidth; | ||
218 | 222 | ||
219 | /* Lock for this structure */ | 223 | /* Lock for this structure */ |
220 | spinlock_t lock; | 224 | spinlock_t lock; |
@@ -247,6 +251,7 @@ static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) | |||
247 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) | 251 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) |
248 | { | 252 | { |
249 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); | 253 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); |
254 | |||
250 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); | 255 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); |
251 | } | 256 | } |
252 | 257 | ||
@@ -254,9 +259,9 @@ static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) | |||
254 | * Execute all queued DMA descriptors. | 259 | * Execute all queued DMA descriptors. |
255 | * | 260 | * |
256 | * Following requirements must be met while calling mpc_dma_execute(): | 261 | * Following requirements must be met while calling mpc_dma_execute(): |
257 | * a) mchan->lock is acquired, | 262 | * a) mchan->lock is acquired, |
258 | * b) mchan->active list is empty, | 263 | * b) mchan->active list is empty, |
259 | * c) mchan->queued list contains at least one entry. | 264 | * c) mchan->queued list contains at least one entry. |
260 | */ | 265 | */ |
261 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) | 266 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) |
262 | { | 267 | { |
@@ -446,20 +451,15 @@ static void mpc_dma_tasklet(unsigned long data) | |||
446 | if (es & MPC_DMA_DMAES_SAE) | 451 | if (es & MPC_DMA_DMAES_SAE) |
447 | dev_err(mdma->dma.dev, "- Source Address Error\n"); | 452 | dev_err(mdma->dma.dev, "- Source Address Error\n"); |
448 | if (es & MPC_DMA_DMAES_SOE) | 453 | if (es & MPC_DMA_DMAES_SOE) |
449 | dev_err(mdma->dma.dev, "- Source Offset" | 454 | dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n"); |
450 | " Configuration Error\n"); | ||
451 | if (es & MPC_DMA_DMAES_DAE) | 455 | if (es & MPC_DMA_DMAES_DAE) |
452 | dev_err(mdma->dma.dev, "- Destination Address" | 456 | dev_err(mdma->dma.dev, "- Destination Address Error\n"); |
453 | " Error\n"); | ||
454 | if (es & MPC_DMA_DMAES_DOE) | 457 | if (es & MPC_DMA_DMAES_DOE) |
455 | dev_err(mdma->dma.dev, "- Destination Offset" | 458 | dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n"); |
456 | " Configuration Error\n"); | ||
457 | if (es & MPC_DMA_DMAES_NCE) | 459 | if (es & MPC_DMA_DMAES_NCE) |
458 | dev_err(mdma->dma.dev, "- NBytes/Citter" | 460 | dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n"); |
459 | " Configuration Error\n"); | ||
460 | if (es & MPC_DMA_DMAES_SGE) | 461 | if (es & MPC_DMA_DMAES_SGE) |
461 | dev_err(mdma->dma.dev, "- Scatter/Gather" | 462 | dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n"); |
462 | " Configuration Error\n"); | ||
463 | if (es & MPC_DMA_DMAES_SBE) | 463 | if (es & MPC_DMA_DMAES_SBE) |
464 | dev_err(mdma->dma.dev, "- Source Bus Error\n"); | 464 | dev_err(mdma->dma.dev, "- Source Bus Error\n"); |
465 | if (es & MPC_DMA_DMAES_DBE) | 465 | if (es & MPC_DMA_DMAES_DBE) |
@@ -518,8 +518,8 @@ static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) | |||
518 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { | 518 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { |
519 | mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); | 519 | mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); |
520 | if (!mdesc) { | 520 | if (!mdesc) { |
521 | dev_notice(mdma->dma.dev, "Memory allocation error. " | 521 | dev_notice(mdma->dma.dev, |
522 | "Allocated only %u descriptors\n", i); | 522 | "Memory allocation error. Allocated only %u descriptors\n", i); |
523 | break; | 523 | break; |
524 | } | 524 | } |
525 | 525 | ||
@@ -684,6 +684,15 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
684 | return &mdesc->desc; | 684 | return &mdesc->desc; |
685 | } | 685 | } |
686 | 686 | ||
687 | inline u8 buswidth_to_dmatsize(u8 buswidth) | ||
688 | { | ||
689 | u8 res; | ||
690 | |||
691 | for (res = 0; buswidth > 1; buswidth /= 2) | ||
692 | res++; | ||
693 | return res; | ||
694 | } | ||
695 | |||
687 | static struct dma_async_tx_descriptor * | 696 | static struct dma_async_tx_descriptor * |
688 | mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 697 | mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
689 | unsigned int sg_len, enum dma_transfer_direction direction, | 698 | unsigned int sg_len, enum dma_transfer_direction direction, |
@@ -742,39 +751,54 @@ mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
742 | 751 | ||
743 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); | 752 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); |
744 | 753 | ||
745 | if (!IS_ALIGNED(sg_dma_address(sg), 4)) | ||
746 | goto err_prep; | ||
747 | |||
748 | if (direction == DMA_DEV_TO_MEM) { | 754 | if (direction == DMA_DEV_TO_MEM) { |
749 | tcd->saddr = per_paddr; | 755 | tcd->saddr = per_paddr; |
750 | tcd->daddr = sg_dma_address(sg); | 756 | tcd->daddr = sg_dma_address(sg); |
757 | |||
758 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth)) | ||
759 | goto err_prep; | ||
760 | |||
751 | tcd->soff = 0; | 761 | tcd->soff = 0; |
752 | tcd->doff = 4; | 762 | tcd->doff = mchan->dwidth; |
753 | } else { | 763 | } else { |
754 | tcd->saddr = sg_dma_address(sg); | 764 | tcd->saddr = sg_dma_address(sg); |
755 | tcd->daddr = per_paddr; | 765 | tcd->daddr = per_paddr; |
756 | tcd->soff = 4; | 766 | |
767 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth)) | ||
768 | goto err_prep; | ||
769 | |||
770 | tcd->soff = mchan->swidth; | ||
757 | tcd->doff = 0; | 771 | tcd->doff = 0; |
758 | } | 772 | } |
759 | 773 | ||
760 | tcd->ssize = MPC_DMA_TSIZE_4; | 774 | tcd->ssize = buswidth_to_dmatsize(mchan->swidth); |
761 | tcd->dsize = MPC_DMA_TSIZE_4; | 775 | tcd->dsize = buswidth_to_dmatsize(mchan->dwidth); |
762 | 776 | ||
763 | len = sg_dma_len(sg); | 777 | if (mdma->is_mpc8308) { |
764 | tcd->nbytes = tcd_nunits * 4; | 778 | tcd->nbytes = sg_dma_len(sg); |
765 | if (!IS_ALIGNED(len, tcd->nbytes)) | 779 | if (!IS_ALIGNED(tcd->nbytes, mchan->swidth)) |
766 | goto err_prep; | 780 | goto err_prep; |
767 | 781 | ||
768 | iter = len / tcd->nbytes; | 782 | /* No major loops for MPC8303 */ |
769 | if (iter >= 1 << 15) { | 783 | tcd->biter = 1; |
770 | /* len is too big */ | 784 | tcd->citer = 1; |
771 | goto err_prep; | 785 | } else { |
786 | len = sg_dma_len(sg); | ||
787 | tcd->nbytes = tcd_nunits * tcd->ssize; | ||
788 | if (!IS_ALIGNED(len, tcd->nbytes)) | ||
789 | goto err_prep; | ||
790 | |||
791 | iter = len / tcd->nbytes; | ||
792 | if (iter >= 1 << 15) { | ||
793 | /* len is too big */ | ||
794 | goto err_prep; | ||
795 | } | ||
796 | /* citer_linkch contains the high bits of iter */ | ||
797 | tcd->biter = iter & 0x1ff; | ||
798 | tcd->biter_linkch = iter >> 9; | ||
799 | tcd->citer = tcd->biter; | ||
800 | tcd->citer_linkch = tcd->biter_linkch; | ||
772 | } | 801 | } |
773 | /* citer_linkch contains the high bits of iter */ | ||
774 | tcd->biter = iter & 0x1ff; | ||
775 | tcd->biter_linkch = iter >> 9; | ||
776 | tcd->citer = tcd->biter; | ||
777 | tcd->citer_linkch = tcd->biter_linkch; | ||
778 | 802 | ||
779 | tcd->e_sg = 0; | 803 | tcd->e_sg = 0; |
780 | tcd->d_req = 1; | 804 | tcd->d_req = 1; |
@@ -796,40 +820,62 @@ err_prep: | |||
796 | return NULL; | 820 | return NULL; |
797 | } | 821 | } |
798 | 822 | ||
823 | inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) | ||
824 | { | ||
825 | switch (buswidth) { | ||
826 | case 16: | ||
827 | if (is_mpc8308) | ||
828 | return false; | ||
829 | case 1: | ||
830 | case 2: | ||
831 | case 4: | ||
832 | case 32: | ||
833 | break; | ||
834 | default: | ||
835 | return false; | ||
836 | } | ||
837 | |||
838 | return true; | ||
839 | } | ||
840 | |||
799 | static int mpc_dma_device_config(struct dma_chan *chan, | 841 | static int mpc_dma_device_config(struct dma_chan *chan, |
800 | struct dma_slave_config *cfg) | 842 | struct dma_slave_config *cfg) |
801 | { | 843 | { |
802 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | 844 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
845 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); | ||
803 | unsigned long flags; | 846 | unsigned long flags; |
804 | 847 | ||
805 | /* | 848 | /* |
806 | * Software constraints: | 849 | * Software constraints: |
807 | * - only transfers between a peripheral device and | 850 | * - only transfers between a peripheral device and memory are |
808 | * memory are supported; | 851 | * supported |
809 | * - only peripheral devices with 4-byte FIFO access register | 852 | * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes |
810 | * are supported; | 853 | * are supported, and, consequently, source addresses and |
811 | * - minimal transfer chunk is 4 bytes and consequently | 854 | * destination addresses; must be aligned accordingly; furthermore, |
812 | * source and destination addresses must be 4-byte aligned | 855 | * for MPC512x SoCs, the transfer size must be aligned on (chunk |
813 | * and transfer size must be aligned on (4 * maxburst) | 856 | * size * maxburst) |
814 | * boundary; | 857 | * - during the transfer, the RAM address is incremented by the size |
815 | * - during the transfer RAM address is being incremented by | 858 | * of transfer chunk |
816 | * the size of minimal transfer chunk; | 859 | * - the peripheral port's address is constant during the transfer. |
817 | * - peripheral port's address is constant during the transfer. | ||
818 | */ | 860 | */ |
819 | 861 | ||
820 | if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | 862 | if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) || |
821 | cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || | 863 | !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) { |
822 | !IS_ALIGNED(cfg->src_addr, 4) || | ||
823 | !IS_ALIGNED(cfg->dst_addr, 4)) { | ||
824 | return -EINVAL; | 864 | return -EINVAL; |
825 | } | 865 | } |
826 | 866 | ||
867 | if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) || | ||
868 | !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308)) | ||
869 | return -EINVAL; | ||
870 | |||
827 | spin_lock_irqsave(&mchan->lock, flags); | 871 | spin_lock_irqsave(&mchan->lock, flags); |
828 | 872 | ||
829 | mchan->src_per_paddr = cfg->src_addr; | 873 | mchan->src_per_paddr = cfg->src_addr; |
830 | mchan->src_tcd_nunits = cfg->src_maxburst; | 874 | mchan->src_tcd_nunits = cfg->src_maxburst; |
875 | mchan->swidth = cfg->src_addr_width; | ||
831 | mchan->dst_per_paddr = cfg->dst_addr; | 876 | mchan->dst_per_paddr = cfg->dst_addr; |
832 | mchan->dst_tcd_nunits = cfg->dst_maxburst; | 877 | mchan->dst_tcd_nunits = cfg->dst_maxburst; |
878 | mchan->dwidth = cfg->dst_addr_width; | ||
833 | 879 | ||
834 | /* Apply defaults */ | 880 | /* Apply defaults */ |
835 | if (mchan->src_tcd_nunits == 0) | 881 | if (mchan->src_tcd_nunits == 0) |
@@ -875,7 +921,6 @@ static int mpc_dma_probe(struct platform_device *op) | |||
875 | 921 | ||
876 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); | 922 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); |
877 | if (!mdma) { | 923 | if (!mdma) { |
878 | dev_err(dev, "Memory exhausted!\n"); | ||
879 | retval = -ENOMEM; | 924 | retval = -ENOMEM; |
880 | goto err; | 925 | goto err; |
881 | } | 926 | } |
@@ -999,7 +1044,8 @@ static int mpc_dma_probe(struct platform_device *op) | |||
999 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); | 1044 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); |
1000 | } else { | 1045 | } else { |
1001 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | | 1046 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
1002 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | 1047 | MPC_DMA_DMACR_ERGA | |
1048 | MPC_DMA_DMACR_ERCA); | ||
1003 | 1049 | ||
1004 | /* Disable hardware DMA requests */ | 1050 | /* Disable hardware DMA requests */ |
1005 | out_be32(&mdma->regs->dmaerqh, 0); | 1051 | out_be32(&mdma->regs->dmaerqh, 0); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 3922a5d56806..25d1dadcddd1 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -31,6 +31,12 @@ | |||
31 | #include "dmaengine.h" | 31 | #include "dmaengine.h" |
32 | #include "mv_xor.h" | 32 | #include "mv_xor.h" |
33 | 33 | ||
34 | enum mv_xor_type { | ||
35 | XOR_ORION, | ||
36 | XOR_ARMADA_38X, | ||
37 | XOR_ARMADA_37XX, | ||
38 | }; | ||
39 | |||
34 | enum mv_xor_mode { | 40 | enum mv_xor_mode { |
35 | XOR_MODE_IN_REG, | 41 | XOR_MODE_IN_REG, |
36 | XOR_MODE_IN_DESC, | 42 | XOR_MODE_IN_DESC, |
@@ -477,7 +483,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
477 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | 483 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
478 | 484 | ||
479 | dev_dbg(mv_chan_to_devp(mv_chan), | 485 | dev_dbg(mv_chan_to_devp(mv_chan), |
480 | "%s src_cnt: %d len: %u dest %pad flags: %ld\n", | 486 | "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", |
481 | __func__, src_cnt, len, &dest, flags); | 487 | __func__, src_cnt, len, &dest, flags); |
482 | 488 | ||
483 | sw_desc = mv_chan_alloc_slot(mv_chan); | 489 | sw_desc = mv_chan_alloc_slot(mv_chan); |
@@ -933,7 +939,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) | |||
933 | static struct mv_xor_chan * | 939 | static struct mv_xor_chan * |
934 | mv_xor_channel_add(struct mv_xor_device *xordev, | 940 | mv_xor_channel_add(struct mv_xor_device *xordev, |
935 | struct platform_device *pdev, | 941 | struct platform_device *pdev, |
936 | int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc) | 942 | int idx, dma_cap_mask_t cap_mask, int irq) |
937 | { | 943 | { |
938 | int ret = 0; | 944 | int ret = 0; |
939 | struct mv_xor_chan *mv_chan; | 945 | struct mv_xor_chan *mv_chan; |
@@ -945,7 +951,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
945 | 951 | ||
946 | mv_chan->idx = idx; | 952 | mv_chan->idx = idx; |
947 | mv_chan->irq = irq; | 953 | mv_chan->irq = irq; |
948 | mv_chan->op_in_desc = op_in_desc; | 954 | if (xordev->xor_type == XOR_ORION) |
955 | mv_chan->op_in_desc = XOR_MODE_IN_REG; | ||
956 | else | ||
957 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; | ||
949 | 958 | ||
950 | dma_dev = &mv_chan->dmadev; | 959 | dma_dev = &mv_chan->dmadev; |
951 | 960 | ||
@@ -1085,6 +1094,33 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | |||
1085 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | 1094 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); |
1086 | } | 1095 | } |
1087 | 1096 | ||
1097 | static void | ||
1098 | mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev) | ||
1099 | { | ||
1100 | void __iomem *base = xordev->xor_high_base; | ||
1101 | u32 win_enable = 0; | ||
1102 | int i; | ||
1103 | |||
1104 | for (i = 0; i < 8; i++) { | ||
1105 | writel(0, base + WINDOW_BASE(i)); | ||
1106 | writel(0, base + WINDOW_SIZE(i)); | ||
1107 | if (i < 4) | ||
1108 | writel(0, base + WINDOW_REMAP_HIGH(i)); | ||
1109 | } | ||
1110 | /* | ||
1111 | * For Armada3700 open default 4GB Mbus window. The dram | ||
1112 | * related configuration are done at AXIS level. | ||
1113 | */ | ||
1114 | writel(0xffff0000, base + WINDOW_SIZE(0)); | ||
1115 | win_enable |= 1; | ||
1116 | win_enable |= 3 << 16; | ||
1117 | |||
1118 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | ||
1119 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | ||
1120 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); | ||
1121 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | ||
1122 | } | ||
1123 | |||
1088 | /* | 1124 | /* |
1089 | * Since this XOR driver is basically used only for RAID5, we don't | 1125 | * Since this XOR driver is basically used only for RAID5, we don't |
1090 | * need to care about synchronizing ->suspend with DMA activity, | 1126 | * need to care about synchronizing ->suspend with DMA activity, |
@@ -1129,6 +1165,11 @@ static int mv_xor_resume(struct platform_device *dev) | |||
1129 | XOR_INTR_MASK(mv_chan)); | 1165 | XOR_INTR_MASK(mv_chan)); |
1130 | } | 1166 | } |
1131 | 1167 | ||
1168 | if (xordev->xor_type == XOR_ARMADA_37XX) { | ||
1169 | mv_xor_conf_mbus_windows_a3700(xordev); | ||
1170 | return 0; | ||
1171 | } | ||
1172 | |||
1132 | dram = mv_mbus_dram_info(); | 1173 | dram = mv_mbus_dram_info(); |
1133 | if (dram) | 1174 | if (dram) |
1134 | mv_xor_conf_mbus_windows(xordev, dram); | 1175 | mv_xor_conf_mbus_windows(xordev, dram); |
@@ -1137,8 +1178,9 @@ static int mv_xor_resume(struct platform_device *dev) | |||
1137 | } | 1178 | } |
1138 | 1179 | ||
1139 | static const struct of_device_id mv_xor_dt_ids[] = { | 1180 | static const struct of_device_id mv_xor_dt_ids[] = { |
1140 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, | 1181 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION }, |
1141 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, | 1182 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X }, |
1183 | { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX }, | ||
1142 | {}, | 1184 | {}, |
1143 | }; | 1185 | }; |
1144 | 1186 | ||
@@ -1152,7 +1194,6 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1152 | struct resource *res; | 1194 | struct resource *res; |
1153 | unsigned int max_engines, max_channels; | 1195 | unsigned int max_engines, max_channels; |
1154 | int i, ret; | 1196 | int i, ret; |
1155 | int op_in_desc; | ||
1156 | 1197 | ||
1157 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); | 1198 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
1158 | 1199 | ||
@@ -1180,12 +1221,30 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1180 | 1221 | ||
1181 | platform_set_drvdata(pdev, xordev); | 1222 | platform_set_drvdata(pdev, xordev); |
1182 | 1223 | ||
1224 | |||
1225 | /* | ||
1226 | * We need to know which type of XOR device we use before | ||
1227 | * setting up. In non-dt case it can only be the legacy one. | ||
1228 | */ | ||
1229 | xordev->xor_type = XOR_ORION; | ||
1230 | if (pdev->dev.of_node) { | ||
1231 | const struct of_device_id *of_id = | ||
1232 | of_match_device(mv_xor_dt_ids, | ||
1233 | &pdev->dev); | ||
1234 | |||
1235 | xordev->xor_type = (uintptr_t)of_id->data; | ||
1236 | } | ||
1237 | |||
1183 | /* | 1238 | /* |
1184 | * (Re-)program MBUS remapping windows if we are asked to. | 1239 | * (Re-)program MBUS remapping windows if we are asked to. |
1185 | */ | 1240 | */ |
1186 | dram = mv_mbus_dram_info(); | 1241 | if (xordev->xor_type == XOR_ARMADA_37XX) { |
1187 | if (dram) | 1242 | mv_xor_conf_mbus_windows_a3700(xordev); |
1188 | mv_xor_conf_mbus_windows(xordev, dram); | 1243 | } else { |
1244 | dram = mv_mbus_dram_info(); | ||
1245 | if (dram) | ||
1246 | mv_xor_conf_mbus_windows(xordev, dram); | ||
1247 | } | ||
1189 | 1248 | ||
1190 | /* Not all platforms can gate the clock, so it is not | 1249 | /* Not all platforms can gate the clock, so it is not |
1191 | * an error if the clock does not exists. | 1250 | * an error if the clock does not exists. |
@@ -1199,12 +1258,16 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1199 | * order for async_tx to perform well. So we limit the number | 1258 | * order for async_tx to perform well. So we limit the number |
1200 | * of engines and channels so that we take into account this | 1259 | * of engines and channels so that we take into account this |
1201 | * constraint. Note that we also want to use channels from | 1260 | * constraint. Note that we also want to use channels from |
1202 | * separate engines when possible. | 1261 | * separate engines when possible. For dual-CPU Armada 3700 |
1262 | * SoC with single XOR engine allow using its both channels. | ||
1203 | */ | 1263 | */ |
1204 | max_engines = num_present_cpus(); | 1264 | max_engines = num_present_cpus(); |
1205 | max_channels = min_t(unsigned int, | 1265 | if (xordev->xor_type == XOR_ARMADA_37XX) |
1206 | MV_XOR_MAX_CHANNELS, | 1266 | max_channels = num_present_cpus(); |
1207 | DIV_ROUND_UP(num_present_cpus(), 2)); | 1267 | else |
1268 | max_channels = min_t(unsigned int, | ||
1269 | MV_XOR_MAX_CHANNELS, | ||
1270 | DIV_ROUND_UP(num_present_cpus(), 2)); | ||
1208 | 1271 | ||
1209 | if (mv_xor_engine_count >= max_engines) | 1272 | if (mv_xor_engine_count >= max_engines) |
1210 | return 0; | 1273 | return 0; |
@@ -1212,15 +1275,11 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1212 | if (pdev->dev.of_node) { | 1275 | if (pdev->dev.of_node) { |
1213 | struct device_node *np; | 1276 | struct device_node *np; |
1214 | int i = 0; | 1277 | int i = 0; |
1215 | const struct of_device_id *of_id = | ||
1216 | of_match_device(mv_xor_dt_ids, | ||
1217 | &pdev->dev); | ||
1218 | 1278 | ||
1219 | for_each_child_of_node(pdev->dev.of_node, np) { | 1279 | for_each_child_of_node(pdev->dev.of_node, np) { |
1220 | struct mv_xor_chan *chan; | 1280 | struct mv_xor_chan *chan; |
1221 | dma_cap_mask_t cap_mask; | 1281 | dma_cap_mask_t cap_mask; |
1222 | int irq; | 1282 | int irq; |
1223 | op_in_desc = (int)of_id->data; | ||
1224 | 1283 | ||
1225 | if (i >= max_channels) | 1284 | if (i >= max_channels) |
1226 | continue; | 1285 | continue; |
@@ -1237,7 +1296,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1237 | } | 1296 | } |
1238 | 1297 | ||
1239 | chan = mv_xor_channel_add(xordev, pdev, i, | 1298 | chan = mv_xor_channel_add(xordev, pdev, i, |
1240 | cap_mask, irq, op_in_desc); | 1299 | cap_mask, irq); |
1241 | if (IS_ERR(chan)) { | 1300 | if (IS_ERR(chan)) { |
1242 | ret = PTR_ERR(chan); | 1301 | ret = PTR_ERR(chan); |
1243 | irq_dispose_mapping(irq); | 1302 | irq_dispose_mapping(irq); |
@@ -1266,8 +1325,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1266 | } | 1325 | } |
1267 | 1326 | ||
1268 | chan = mv_xor_channel_add(xordev, pdev, i, | 1327 | chan = mv_xor_channel_add(xordev, pdev, i, |
1269 | cd->cap_mask, irq, | 1328 | cd->cap_mask, irq); |
1270 | XOR_MODE_IN_REG); | ||
1271 | if (IS_ERR(chan)) { | 1329 | if (IS_ERR(chan)) { |
1272 | ret = PTR_ERR(chan); | 1330 | ret = PTR_ERR(chan); |
1273 | goto err_channel_add; | 1331 | goto err_channel_add; |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index c19fe30e5ae9..bf56e082e7cd 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -85,6 +85,7 @@ struct mv_xor_device { | |||
85 | void __iomem *xor_high_base; | 85 | void __iomem *xor_high_base; |
86 | struct clk *clk; | 86 | struct clk *clk; |
87 | struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; | 87 | struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; |
88 | int xor_type; | ||
88 | }; | 89 | }; |
89 | 90 | ||
90 | /** | 91 | /** |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 1e1f2986eba8..faae0bfe1109 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -240,8 +240,9 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
240 | struct of_phandle_args dma_spec; | 240 | struct of_phandle_args dma_spec; |
241 | struct of_dma *ofdma; | 241 | struct of_dma *ofdma; |
242 | struct dma_chan *chan; | 242 | struct dma_chan *chan; |
243 | int count, i; | 243 | int count, i, start; |
244 | int ret_no_channel = -ENODEV; | 244 | int ret_no_channel = -ENODEV; |
245 | static atomic_t last_index; | ||
245 | 246 | ||
246 | if (!np || !name) { | 247 | if (!np || !name) { |
247 | pr_err("%s: not enough information provided\n", __func__); | 248 | pr_err("%s: not enough information provided\n", __func__); |
@@ -259,8 +260,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
259 | return ERR_PTR(-ENODEV); | 260 | return ERR_PTR(-ENODEV); |
260 | } | 261 | } |
261 | 262 | ||
263 | /* | ||
264 | * approximate an average distribution across multiple | ||
265 | * entries with the same name | ||
266 | */ | ||
267 | start = atomic_inc_return(&last_index); | ||
262 | for (i = 0; i < count; i++) { | 268 | for (i = 0; i < count; i++) { |
263 | if (of_dma_match_channel(np, name, i, &dma_spec)) | 269 | if (of_dma_match_channel(np, name, |
270 | (i + start) % count, | ||
271 | &dma_spec)) | ||
264 | continue; | 272 | continue; |
265 | 273 | ||
266 | mutex_lock(&of_dma_lock); | 274 | mutex_lock(&of_dma_lock); |
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 77c1c44009d8..e756a30ccba2 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -117,6 +117,7 @@ struct pxad_chan { | |||
117 | /* protected by vc->lock */ | 117 | /* protected by vc->lock */ |
118 | struct pxad_phy *phy; | 118 | struct pxad_phy *phy; |
119 | struct dma_pool *desc_pool; /* Descriptors pool */ | 119 | struct dma_pool *desc_pool; /* Descriptors pool */ |
120 | dma_cookie_t bus_error; | ||
120 | }; | 121 | }; |
121 | 122 | ||
122 | struct pxad_device { | 123 | struct pxad_device { |
@@ -563,6 +564,7 @@ static void pxad_launch_chan(struct pxad_chan *chan, | |||
563 | return; | 564 | return; |
564 | } | 565 | } |
565 | } | 566 | } |
567 | chan->bus_error = 0; | ||
566 | 568 | ||
567 | /* | 569 | /* |
568 | * Program the descriptor's address into the DMA controller, | 570 | * Program the descriptor's address into the DMA controller, |
@@ -666,6 +668,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
666 | struct virt_dma_desc *vd, *tmp; | 668 | struct virt_dma_desc *vd, *tmp; |
667 | unsigned int dcsr; | 669 | unsigned int dcsr; |
668 | unsigned long flags; | 670 | unsigned long flags; |
671 | dma_cookie_t last_started = 0; | ||
669 | 672 | ||
670 | BUG_ON(!chan); | 673 | BUG_ON(!chan); |
671 | 674 | ||
@@ -678,6 +681,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
678 | dev_dbg(&chan->vc.chan.dev->device, | 681 | dev_dbg(&chan->vc.chan.dev->device, |
679 | "%s(): checking txd %p[%x]: completed=%d\n", | 682 | "%s(): checking txd %p[%x]: completed=%d\n", |
680 | __func__, vd, vd->tx.cookie, is_desc_completed(vd)); | 683 | __func__, vd, vd->tx.cookie, is_desc_completed(vd)); |
684 | last_started = vd->tx.cookie; | ||
681 | if (to_pxad_sw_desc(vd)->cyclic) { | 685 | if (to_pxad_sw_desc(vd)->cyclic) { |
682 | vchan_cyclic_callback(vd); | 686 | vchan_cyclic_callback(vd); |
683 | break; | 687 | break; |
@@ -690,7 +694,12 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
690 | } | 694 | } |
691 | } | 695 | } |
692 | 696 | ||
693 | if (dcsr & PXA_DCSR_STOPSTATE) { | 697 | if (dcsr & PXA_DCSR_BUSERR) { |
698 | chan->bus_error = last_started; | ||
699 | phy_disable(phy); | ||
700 | } | ||
701 | |||
702 | if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) { | ||
694 | dev_dbg(&chan->vc.chan.dev->device, | 703 | dev_dbg(&chan->vc.chan.dev->device, |
695 | "%s(): channel stopped, submitted_empty=%d issued_empty=%d", | 704 | "%s(): channel stopped, submitted_empty=%d issued_empty=%d", |
696 | __func__, | 705 | __func__, |
@@ -1249,6 +1258,9 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan, | |||
1249 | struct pxad_chan *chan = to_pxad_chan(dchan); | 1258 | struct pxad_chan *chan = to_pxad_chan(dchan); |
1250 | enum dma_status ret; | 1259 | enum dma_status ret; |
1251 | 1260 | ||
1261 | if (cookie == chan->bus_error) | ||
1262 | return DMA_ERROR; | ||
1263 | |||
1252 | ret = dma_cookie_status(dchan, cookie, txstate); | 1264 | ret = dma_cookie_status(dchan, cookie, txstate); |
1253 | if (likely(txstate && (ret != DMA_ERROR))) | 1265 | if (likely(txstate && (ret != DMA_ERROR))) |
1254 | dma_set_residue(txstate, pxad_residue(chan, cookie)); | 1266 | dma_set_residue(txstate, pxad_residue(chan, cookie)); |
@@ -1321,7 +1333,7 @@ static int pxad_init_phys(struct platform_device *op, | |||
1321 | return 0; | 1333 | return 0; |
1322 | } | 1334 | } |
1323 | 1335 | ||
1324 | static const struct of_device_id const pxad_dt_ids[] = { | 1336 | static const struct of_device_id pxad_dt_ids[] = { |
1325 | { .compatible = "marvell,pdma-1.0", }, | 1337 | { .compatible = "marvell,pdma-1.0", }, |
1326 | {} | 1338 | {} |
1327 | }; | 1339 | }; |
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile index bfea6990229f..4bfc38b45220 100644 --- a/drivers/dma/qcom/Makefile +++ b/drivers/dma/qcom/Makefile | |||
@@ -1,3 +1,5 @@ | |||
1 | obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o | 1 | obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o |
2 | obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o | 2 | obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o |
3 | hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o | 3 | hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o |
4 | obj-$(CONFIG_QCOM_HIDMA) += hdma.o | ||
5 | hdma-objs := hidma_ll.o hidma.o hidma_dbg.o | ||
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d5e0a9c3ad5d..969b48176745 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -342,7 +342,7 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = { | |||
342 | 342 | ||
343 | #define BAM_DESC_FIFO_SIZE SZ_32K | 343 | #define BAM_DESC_FIFO_SIZE SZ_32K |
344 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) | 344 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) |
345 | #define BAM_MAX_DATA_SIZE (SZ_32K - 8) | 345 | #define BAM_FIFO_SIZE (SZ_32K - 8) |
346 | 346 | ||
347 | struct bam_chan { | 347 | struct bam_chan { |
348 | struct virt_dma_chan vc; | 348 | struct virt_dma_chan vc; |
@@ -387,6 +387,7 @@ struct bam_device { | |||
387 | 387 | ||
388 | /* execution environment ID, from DT */ | 388 | /* execution environment ID, from DT */ |
389 | u32 ee; | 389 | u32 ee; |
390 | bool controlled_remotely; | ||
390 | 391 | ||
391 | const struct reg_offset_data *layout; | 392 | const struct reg_offset_data *layout; |
392 | 393 | ||
@@ -458,7 +459,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan, | |||
458 | */ | 459 | */ |
459 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), | 460 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), |
460 | bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); | 461 | bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); |
461 | writel_relaxed(BAM_DESC_FIFO_SIZE, | 462 | writel_relaxed(BAM_FIFO_SIZE, |
462 | bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); | 463 | bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); |
463 | 464 | ||
464 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ | 465 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ |
@@ -604,7 +605,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
604 | 605 | ||
605 | /* calculate number of required entries */ | 606 | /* calculate number of required entries */ |
606 | for_each_sg(sgl, sg, sg_len, i) | 607 | for_each_sg(sgl, sg, sg_len, i) |
607 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); | 608 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); |
608 | 609 | ||
609 | /* allocate enough room to accomodate the number of entries */ | 610 | /* allocate enough room to accomodate the number of entries */ |
610 | async_desc = kzalloc(sizeof(*async_desc) + | 611 | async_desc = kzalloc(sizeof(*async_desc) + |
@@ -635,10 +636,10 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
635 | desc->addr = cpu_to_le32(sg_dma_address(sg) + | 636 | desc->addr = cpu_to_le32(sg_dma_address(sg) + |
636 | curr_offset); | 637 | curr_offset); |
637 | 638 | ||
638 | if (remainder > BAM_MAX_DATA_SIZE) { | 639 | if (remainder > BAM_FIFO_SIZE) { |
639 | desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); | 640 | desc->size = cpu_to_le16(BAM_FIFO_SIZE); |
640 | remainder -= BAM_MAX_DATA_SIZE; | 641 | remainder -= BAM_FIFO_SIZE; |
641 | curr_offset += BAM_MAX_DATA_SIZE; | 642 | curr_offset += BAM_FIFO_SIZE; |
642 | } else { | 643 | } else { |
643 | desc->size = cpu_to_le16(remainder); | 644 | desc->size = cpu_to_le16(remainder); |
644 | remainder = 0; | 645 | remainder = 0; |
@@ -801,13 +802,17 @@ static irqreturn_t bam_dma_irq(int irq, void *data) | |||
801 | if (srcs & P_IRQ) | 802 | if (srcs & P_IRQ) |
802 | tasklet_schedule(&bdev->task); | 803 | tasklet_schedule(&bdev->task); |
803 | 804 | ||
804 | if (srcs & BAM_IRQ) | 805 | if (srcs & BAM_IRQ) { |
805 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); | 806 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); |
806 | 807 | ||
807 | /* don't allow reorder of the various accesses to the BAM registers */ | 808 | /* |
808 | mb(); | 809 | * don't allow reorder of the various accesses to the BAM |
810 | * registers | ||
811 | */ | ||
812 | mb(); | ||
809 | 813 | ||
810 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); | 814 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); |
815 | } | ||
811 | 816 | ||
812 | return IRQ_HANDLED; | 817 | return IRQ_HANDLED; |
813 | } | 818 | } |
@@ -1038,6 +1043,9 @@ static int bam_init(struct bam_device *bdev) | |||
1038 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); | 1043 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); |
1039 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; | 1044 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; |
1040 | 1045 | ||
1046 | if (bdev->controlled_remotely) | ||
1047 | return 0; | ||
1048 | |||
1041 | /* s/w reset bam */ | 1049 | /* s/w reset bam */ |
1042 | /* after reset all pipes are disabled and idle */ | 1050 | /* after reset all pipes are disabled and idle */ |
1043 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); | 1051 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); |
@@ -1125,6 +1133,9 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
1125 | return ret; | 1133 | return ret; |
1126 | } | 1134 | } |
1127 | 1135 | ||
1136 | bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, | ||
1137 | "qcom,controlled-remotely"); | ||
1138 | |||
1128 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); | 1139 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); |
1129 | if (IS_ERR(bdev->bamclk)) | 1140 | if (IS_ERR(bdev->bamclk)) |
1130 | return PTR_ERR(bdev->bamclk); | 1141 | return PTR_ERR(bdev->bamclk); |
@@ -1163,7 +1174,7 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
1163 | /* set max dma segment size */ | 1174 | /* set max dma segment size */ |
1164 | bdev->common.dev = bdev->dev; | 1175 | bdev->common.dev = bdev->dev; |
1165 | bdev->common.dev->dma_parms = &bdev->dma_parms; | 1176 | bdev->common.dev->dma_parms = &bdev->dma_parms; |
1166 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); | 1177 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE); |
1167 | if (ret) { | 1178 | if (ret) { |
1168 | dev_err(bdev->dev, "cannot set maximum segment size\n"); | 1179 | dev_err(bdev->dev, "cannot set maximum segment size\n"); |
1169 | goto err_bam_channel_exit; | 1180 | goto err_bam_channel_exit; |
@@ -1234,6 +1245,9 @@ static int bam_dma_remove(struct platform_device *pdev) | |||
1234 | bam_dma_terminate_all(&bdev->channels[i].vc.chan); | 1245 | bam_dma_terminate_all(&bdev->channels[i].vc.chan); |
1235 | tasklet_kill(&bdev->channels[i].vc.task); | 1246 | tasklet_kill(&bdev->channels[i].vc.task); |
1236 | 1247 | ||
1248 | if (!bdev->channels[i].fifo_virt) | ||
1249 | continue; | ||
1250 | |||
1237 | dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, | 1251 | dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, |
1238 | bdev->channels[i].fifo_virt, | 1252 | bdev->channels[i].fifo_virt, |
1239 | bdev->channels[i].fifo_phys); | 1253 | bdev->channels[i].fifo_phys); |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index cccc78efbca9..41b5c6dee713 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Qualcomm Technologies HIDMA DMA engine interface | 2 | * Qualcomm Technologies HIDMA DMA engine interface |
3 | * | 3 | * |
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | 4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 and | 7 | * it under the terms of the GNU General Public License version 2 and |
@@ -404,7 +404,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) | |||
404 | spin_unlock_irqrestore(&mchan->lock, irqflags); | 404 | spin_unlock_irqrestore(&mchan->lock, irqflags); |
405 | 405 | ||
406 | /* this suspends the existing transfer */ | 406 | /* this suspends the existing transfer */ |
407 | rc = hidma_ll_pause(dmadev->lldev); | 407 | rc = hidma_ll_disable(dmadev->lldev); |
408 | if (rc) { | 408 | if (rc) { |
409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); | 409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); |
410 | goto out; | 410 | goto out; |
@@ -427,7 +427,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) | |||
427 | list_move(&mdesc->node, &mchan->free); | 427 | list_move(&mdesc->node, &mchan->free); |
428 | } | 428 | } |
429 | 429 | ||
430 | rc = hidma_ll_resume(dmadev->lldev); | 430 | rc = hidma_ll_enable(dmadev->lldev); |
431 | out: | 431 | out: |
432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | 432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | 433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); |
@@ -488,7 +488,7 @@ static int hidma_pause(struct dma_chan *chan) | |||
488 | dmadev = to_hidma_dev(mchan->chan.device); | 488 | dmadev = to_hidma_dev(mchan->chan.device); |
489 | if (!mchan->paused) { | 489 | if (!mchan->paused) { |
490 | pm_runtime_get_sync(dmadev->ddev.dev); | 490 | pm_runtime_get_sync(dmadev->ddev.dev); |
491 | if (hidma_ll_pause(dmadev->lldev)) | 491 | if (hidma_ll_disable(dmadev->lldev)) |
492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); | 492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); |
493 | mchan->paused = true; | 493 | mchan->paused = true; |
494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | 494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
@@ -507,7 +507,7 @@ static int hidma_resume(struct dma_chan *chan) | |||
507 | dmadev = to_hidma_dev(mchan->chan.device); | 507 | dmadev = to_hidma_dev(mchan->chan.device); |
508 | if (mchan->paused) { | 508 | if (mchan->paused) { |
509 | pm_runtime_get_sync(dmadev->ddev.dev); | 509 | pm_runtime_get_sync(dmadev->ddev.dev); |
510 | rc = hidma_ll_resume(dmadev->lldev); | 510 | rc = hidma_ll_enable(dmadev->lldev); |
511 | if (!rc) | 511 | if (!rc) |
512 | mchan->paused = false; | 512 | mchan->paused = false; |
513 | else | 513 | else |
@@ -530,6 +530,43 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | |||
530 | return hidma_ll_inthandler(chirq, lldev); | 530 | return hidma_ll_inthandler(chirq, lldev); |
531 | } | 531 | } |
532 | 532 | ||
533 | static ssize_t hidma_show_values(struct device *dev, | ||
534 | struct device_attribute *attr, char *buf) | ||
535 | { | ||
536 | struct platform_device *pdev = to_platform_device(dev); | ||
537 | struct hidma_dev *mdev = platform_get_drvdata(pdev); | ||
538 | |||
539 | buf[0] = 0; | ||
540 | |||
541 | if (strcmp(attr->attr.name, "chid") == 0) | ||
542 | sprintf(buf, "%d\n", mdev->chidx); | ||
543 | |||
544 | return strlen(buf); | ||
545 | } | ||
546 | |||
547 | static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, | ||
548 | int mode) | ||
549 | { | ||
550 | struct device_attribute *attrs; | ||
551 | char *name_copy; | ||
552 | |||
553 | attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), | ||
554 | GFP_KERNEL); | ||
555 | if (!attrs) | ||
556 | return -ENOMEM; | ||
557 | |||
558 | name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); | ||
559 | if (!name_copy) | ||
560 | return -ENOMEM; | ||
561 | |||
562 | attrs->attr.name = name_copy; | ||
563 | attrs->attr.mode = mode; | ||
564 | attrs->show = hidma_show_values; | ||
565 | sysfs_attr_init(&attrs->attr); | ||
566 | |||
567 | return device_create_file(dev->ddev.dev, attrs); | ||
568 | } | ||
569 | |||
533 | static int hidma_probe(struct platform_device *pdev) | 570 | static int hidma_probe(struct platform_device *pdev) |
534 | { | 571 | { |
535 | struct hidma_dev *dmadev; | 572 | struct hidma_dev *dmadev; |
@@ -644,6 +681,8 @@ static int hidma_probe(struct platform_device *pdev) | |||
644 | 681 | ||
645 | dmadev->irq = chirq; | 682 | dmadev->irq = chirq; |
646 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | 683 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); |
684 | hidma_debug_init(dmadev); | ||
685 | hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO); | ||
647 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); | 686 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); |
648 | platform_set_drvdata(pdev, dmadev); | 687 | platform_set_drvdata(pdev, dmadev); |
649 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | 688 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
@@ -651,6 +690,7 @@ static int hidma_probe(struct platform_device *pdev) | |||
651 | return 0; | 690 | return 0; |
652 | 691 | ||
653 | uninit: | 692 | uninit: |
693 | hidma_debug_uninit(dmadev); | ||
654 | hidma_ll_uninit(dmadev->lldev); | 694 | hidma_ll_uninit(dmadev->lldev); |
655 | dmafree: | 695 | dmafree: |
656 | if (dmadev) | 696 | if (dmadev) |
@@ -668,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev) | |||
668 | pm_runtime_get_sync(dmadev->ddev.dev); | 708 | pm_runtime_get_sync(dmadev->ddev.dev); |
669 | dma_async_device_unregister(&dmadev->ddev); | 709 | dma_async_device_unregister(&dmadev->ddev); |
670 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | 710 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); |
711 | hidma_debug_uninit(dmadev); | ||
671 | hidma_ll_uninit(dmadev->lldev); | 712 | hidma_ll_uninit(dmadev->lldev); |
672 | hidma_free(dmadev); | 713 | hidma_free(dmadev); |
673 | 714 | ||
@@ -689,7 +730,6 @@ static const struct of_device_id hidma_match[] = { | |||
689 | {.compatible = "qcom,hidma-1.0",}, | 730 | {.compatible = "qcom,hidma-1.0",}, |
690 | {}, | 731 | {}, |
691 | }; | 732 | }; |
692 | |||
693 | MODULE_DEVICE_TABLE(of, hidma_match); | 733 | MODULE_DEVICE_TABLE(of, hidma_match); |
694 | 734 | ||
695 | static struct platform_driver hidma_driver = { | 735 | static struct platform_driver hidma_driver = { |
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index 231e306f6d87..db413a5efc4e 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Qualcomm Technologies HIDMA data structures | 2 | * Qualcomm Technologies HIDMA data structures |
3 | * | 3 | * |
4 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. | 4 | * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 and | 7 | * it under the terms of the GNU General Public License version 2 and |
@@ -20,32 +20,29 @@ | |||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/dmaengine.h> | 21 | #include <linux/dmaengine.h> |
22 | 22 | ||
23 | #define TRE_SIZE 32 /* each TRE is 32 bytes */ | 23 | #define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */ |
24 | #define TRE_CFG_IDX 0 | 24 | #define HIDMA_TRE_CFG_IDX 0 |
25 | #define TRE_LEN_IDX 1 | 25 | #define HIDMA_TRE_LEN_IDX 1 |
26 | #define TRE_SRC_LOW_IDX 2 | 26 | #define HIDMA_TRE_SRC_LOW_IDX 2 |
27 | #define TRE_SRC_HI_IDX 3 | 27 | #define HIDMA_TRE_SRC_HI_IDX 3 |
28 | #define TRE_DEST_LOW_IDX 4 | 28 | #define HIDMA_TRE_DEST_LOW_IDX 4 |
29 | #define TRE_DEST_HI_IDX 5 | 29 | #define HIDMA_TRE_DEST_HI_IDX 5 |
30 | |||
31 | struct hidma_tx_status { | ||
32 | u8 err_info; /* error record in this transfer */ | ||
33 | u8 err_code; /* completion code */ | ||
34 | }; | ||
35 | 30 | ||
36 | struct hidma_tre { | 31 | struct hidma_tre { |
37 | atomic_t allocated; /* if this channel is allocated */ | 32 | atomic_t allocated; /* if this channel is allocated */ |
38 | bool queued; /* flag whether this is pending */ | 33 | bool queued; /* flag whether this is pending */ |
39 | u16 status; /* status */ | 34 | u16 status; /* status */ |
40 | u32 chidx; /* index of the tre */ | 35 | u32 idx; /* index of the tre */ |
41 | u32 dma_sig; /* signature of the tre */ | 36 | u32 dma_sig; /* signature of the tre */ |
42 | const char *dev_name; /* name of the device */ | 37 | const char *dev_name; /* name of the device */ |
43 | void (*callback)(void *data); /* requester callback */ | 38 | void (*callback)(void *data); /* requester callback */ |
44 | void *data; /* Data associated with this channel*/ | 39 | void *data; /* Data associated with this channel*/ |
45 | struct hidma_lldev *lldev; /* lldma device pointer */ | 40 | struct hidma_lldev *lldev; /* lldma device pointer */ |
46 | u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ | 41 | u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ |
47 | u32 tre_index; /* the offset where this was written*/ | 42 | u32 tre_index; /* the offset where this was written*/ |
48 | u32 int_flags; /* interrupt flags */ | 43 | u32 int_flags; /* interrupt flags */ |
44 | u8 err_info; /* error record in this transfer */ | ||
45 | u8 err_code; /* completion code */ | ||
49 | }; | 46 | }; |
50 | 47 | ||
51 | struct hidma_lldev { | 48 | struct hidma_lldev { |
@@ -61,22 +58,21 @@ struct hidma_lldev { | |||
61 | void __iomem *evca; /* Event Channel address */ | 58 | void __iomem *evca; /* Event Channel address */ |
62 | struct hidma_tre | 59 | struct hidma_tre |
63 | **pending_tre_list; /* Pointers to pending TREs */ | 60 | **pending_tre_list; /* Pointers to pending TREs */ |
64 | struct hidma_tx_status | ||
65 | *tx_status_list; /* Pointers to pending TREs status*/ | ||
66 | s32 pending_tre_count; /* Number of TREs pending */ | 61 | s32 pending_tre_count; /* Number of TREs pending */ |
67 | 62 | ||
68 | void *tre_ring; /* TRE ring */ | 63 | void *tre_ring; /* TRE ring */ |
69 | dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */ | 64 | dma_addr_t tre_dma; /* TRE ring to be shared with HW */ |
70 | u32 tre_ring_size; /* Byte size of the ring */ | 65 | u32 tre_ring_size; /* Byte size of the ring */ |
71 | u32 tre_processed_off; /* last processed TRE */ | 66 | u32 tre_processed_off; /* last processed TRE */ |
72 | 67 | ||
73 | void *evre_ring; /* EVRE ring */ | 68 | void *evre_ring; /* EVRE ring */ |
74 | dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */ | 69 | dma_addr_t evre_dma; /* EVRE ring to be shared with HW */ |
75 | u32 evre_ring_size; /* Byte size of the ring */ | 70 | u32 evre_ring_size; /* Byte size of the ring */ |
76 | u32 evre_processed_off; /* last processed EVRE */ | 71 | u32 evre_processed_off; /* last processed EVRE */ |
77 | 72 | ||
78 | u32 tre_write_offset; /* TRE write location */ | 73 | u32 tre_write_offset; /* TRE write location */ |
79 | struct tasklet_struct task; /* task delivering notifications */ | 74 | struct tasklet_struct task; /* task delivering notifications */ |
75 | struct tasklet_struct rst_task; /* task to reset HW */ | ||
80 | DECLARE_KFIFO_PTR(handoff_fifo, | 76 | DECLARE_KFIFO_PTR(handoff_fifo, |
81 | struct hidma_tre *); /* pending TREs FIFO */ | 77 | struct hidma_tre *); /* pending TREs FIFO */ |
82 | }; | 78 | }; |
@@ -145,8 +141,8 @@ enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); | |||
145 | bool hidma_ll_isenabled(struct hidma_lldev *llhndl); | 141 | bool hidma_ll_isenabled(struct hidma_lldev *llhndl); |
146 | void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); | 142 | void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); |
147 | void hidma_ll_start(struct hidma_lldev *llhndl); | 143 | void hidma_ll_start(struct hidma_lldev *llhndl); |
148 | int hidma_ll_pause(struct hidma_lldev *llhndl); | 144 | int hidma_ll_disable(struct hidma_lldev *lldev); |
149 | int hidma_ll_resume(struct hidma_lldev *llhndl); | 145 | int hidma_ll_enable(struct hidma_lldev *llhndl); |
150 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, | 146 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, |
151 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); | 147 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); |
152 | int hidma_ll_setup(struct hidma_lldev *lldev); | 148 | int hidma_ll_setup(struct hidma_lldev *lldev); |
@@ -157,4 +153,6 @@ int hidma_ll_uninit(struct hidma_lldev *llhndl); | |||
157 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); | 153 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); |
158 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, | 154 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, |
159 | u8 err_code); | 155 | u8 err_code); |
156 | int hidma_debug_init(struct hidma_dev *dmadev); | ||
157 | void hidma_debug_uninit(struct hidma_dev *dmadev); | ||
160 | #endif | 158 | #endif |
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c new file mode 100644 index 000000000000..fa827e5ffd68 --- /dev/null +++ b/drivers/dma/qcom/hidma_dbg.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA debug file | ||
3 | * | ||
4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/debugfs.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/list.h> | ||
19 | #include <linux/pm_runtime.h> | ||
20 | |||
21 | #include "hidma.h" | ||
22 | |||
23 | static void hidma_ll_chstats(struct seq_file *s, void *llhndl, u32 tre_ch) | ||
24 | { | ||
25 | struct hidma_lldev *lldev = llhndl; | ||
26 | struct hidma_tre *tre; | ||
27 | u32 length; | ||
28 | dma_addr_t src_start; | ||
29 | dma_addr_t dest_start; | ||
30 | u32 *tre_local; | ||
31 | |||
32 | if (tre_ch >= lldev->nr_tres) { | ||
33 | dev_err(lldev->dev, "invalid TRE number in chstats:%d", tre_ch); | ||
34 | return; | ||
35 | } | ||
36 | tre = &lldev->trepool[tre_ch]; | ||
37 | seq_printf(s, "------Channel %d -----\n", tre_ch); | ||
38 | seq_printf(s, "allocated=%d\n", atomic_read(&tre->allocated)); | ||
39 | seq_printf(s, "queued = 0x%x\n", tre->queued); | ||
40 | seq_printf(s, "err_info = 0x%x\n", tre->err_info); | ||
41 | seq_printf(s, "err_code = 0x%x\n", tre->err_code); | ||
42 | seq_printf(s, "status = 0x%x\n", tre->status); | ||
43 | seq_printf(s, "idx = 0x%x\n", tre->idx); | ||
44 | seq_printf(s, "dma_sig = 0x%x\n", tre->dma_sig); | ||
45 | seq_printf(s, "dev_name=%s\n", tre->dev_name); | ||
46 | seq_printf(s, "callback=%p\n", tre->callback); | ||
47 | seq_printf(s, "data=%p\n", tre->data); | ||
48 | seq_printf(s, "tre_index = 0x%x\n", tre->tre_index); | ||
49 | |||
50 | tre_local = &tre->tre_local[0]; | ||
51 | src_start = tre_local[HIDMA_TRE_SRC_LOW_IDX]; | ||
52 | src_start = ((u64) (tre_local[HIDMA_TRE_SRC_HI_IDX]) << 32) + src_start; | ||
53 | dest_start = tre_local[HIDMA_TRE_DEST_LOW_IDX]; | ||
54 | dest_start += ((u64) (tre_local[HIDMA_TRE_DEST_HI_IDX]) << 32); | ||
55 | length = tre_local[HIDMA_TRE_LEN_IDX]; | ||
56 | |||
57 | seq_printf(s, "src=%pap\n", &src_start); | ||
58 | seq_printf(s, "dest=%pap\n", &dest_start); | ||
59 | seq_printf(s, "length = 0x%x\n", length); | ||
60 | } | ||
61 | |||
62 | static void hidma_ll_devstats(struct seq_file *s, void *llhndl) | ||
63 | { | ||
64 | struct hidma_lldev *lldev = llhndl; | ||
65 | |||
66 | seq_puts(s, "------Device -----\n"); | ||
67 | seq_printf(s, "lldev init = 0x%x\n", lldev->initialized); | ||
68 | seq_printf(s, "trch_state = 0x%x\n", lldev->trch_state); | ||
69 | seq_printf(s, "evch_state = 0x%x\n", lldev->evch_state); | ||
70 | seq_printf(s, "chidx = 0x%x\n", lldev->chidx); | ||
71 | seq_printf(s, "nr_tres = 0x%x\n", lldev->nr_tres); | ||
72 | seq_printf(s, "trca=%p\n", lldev->trca); | ||
73 | seq_printf(s, "tre_ring=%p\n", lldev->tre_ring); | ||
74 | seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma); | ||
75 | seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size); | ||
76 | seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off); | ||
77 | seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count); | ||
78 | seq_printf(s, "evca=%p\n", lldev->evca); | ||
79 | seq_printf(s, "evre_ring=%p\n", lldev->evre_ring); | ||
80 | seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma); | ||
81 | seq_printf(s, "evre_ring_size = 0x%x\n", lldev->evre_ring_size); | ||
82 | seq_printf(s, "evre_processed_off = 0x%x\n", lldev->evre_processed_off); | ||
83 | seq_printf(s, "tre_write_offset = 0x%x\n", lldev->tre_write_offset); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * hidma_chan_stats: display HIDMA channel statistics | ||
88 | * | ||
89 | * Display the statistics for the current HIDMA virtual channel device. | ||
90 | */ | ||
91 | static int hidma_chan_stats(struct seq_file *s, void *unused) | ||
92 | { | ||
93 | struct hidma_chan *mchan = s->private; | ||
94 | struct hidma_desc *mdesc; | ||
95 | struct hidma_dev *dmadev = mchan->dmadev; | ||
96 | |||
97 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
98 | seq_printf(s, "paused=%u\n", mchan->paused); | ||
99 | seq_printf(s, "dma_sig=%u\n", mchan->dma_sig); | ||
100 | seq_puts(s, "prepared\n"); | ||
101 | list_for_each_entry(mdesc, &mchan->prepared, node) | ||
102 | hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); | ||
103 | |||
104 | seq_puts(s, "active\n"); | ||
105 | list_for_each_entry(mdesc, &mchan->active, node) | ||
106 | hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); | ||
107 | |||
108 | seq_puts(s, "completed\n"); | ||
109 | list_for_each_entry(mdesc, &mchan->completed, node) | ||
110 | hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch); | ||
111 | |||
112 | hidma_ll_devstats(s, mchan->dmadev->lldev); | ||
113 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
114 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * hidma_dma_info: display HIDMA device info | ||
120 | * | ||
121 | * Display the info for the current HIDMA device. | ||
122 | */ | ||
123 | static int hidma_dma_info(struct seq_file *s, void *unused) | ||
124 | { | ||
125 | struct hidma_dev *dmadev = s->private; | ||
126 | resource_size_t sz; | ||
127 | |||
128 | seq_printf(s, "nr_descriptors=%d\n", dmadev->nr_descriptors); | ||
129 | seq_printf(s, "dev_trca=%p\n", &dmadev->dev_trca); | ||
130 | seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start); | ||
131 | sz = resource_size(dmadev->trca_resource); | ||
132 | seq_printf(s, "dev_trca_size=%pa\n", &sz); | ||
133 | seq_printf(s, "dev_evca=%p\n", &dmadev->dev_evca); | ||
134 | seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start); | ||
135 | sz = resource_size(dmadev->evca_resource); | ||
136 | seq_printf(s, "dev_evca_size=%pa\n", &sz); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int hidma_chan_stats_open(struct inode *inode, struct file *file) | ||
141 | { | ||
142 | return single_open(file, hidma_chan_stats, inode->i_private); | ||
143 | } | ||
144 | |||
145 | static int hidma_dma_info_open(struct inode *inode, struct file *file) | ||
146 | { | ||
147 | return single_open(file, hidma_dma_info, inode->i_private); | ||
148 | } | ||
149 | |||
150 | static const struct file_operations hidma_chan_fops = { | ||
151 | .open = hidma_chan_stats_open, | ||
152 | .read = seq_read, | ||
153 | .llseek = seq_lseek, | ||
154 | .release = single_release, | ||
155 | }; | ||
156 | |||
157 | static const struct file_operations hidma_dma_fops = { | ||
158 | .open = hidma_dma_info_open, | ||
159 | .read = seq_read, | ||
160 | .llseek = seq_lseek, | ||
161 | .release = single_release, | ||
162 | }; | ||
163 | |||
164 | void hidma_debug_uninit(struct hidma_dev *dmadev) | ||
165 | { | ||
166 | debugfs_remove_recursive(dmadev->debugfs); | ||
167 | debugfs_remove_recursive(dmadev->stats); | ||
168 | } | ||
169 | |||
170 | int hidma_debug_init(struct hidma_dev *dmadev) | ||
171 | { | ||
172 | int rc = 0; | ||
173 | int chidx = 0; | ||
174 | struct list_head *position = NULL; | ||
175 | |||
176 | dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); | ||
177 | if (!dmadev->debugfs) { | ||
178 | rc = -ENODEV; | ||
179 | return rc; | ||
180 | } | ||
181 | |||
182 | /* walk through the virtual channel list */ | ||
183 | list_for_each(position, &dmadev->ddev.channels) { | ||
184 | struct hidma_chan *chan; | ||
185 | |||
186 | chan = list_entry(position, struct hidma_chan, | ||
187 | chan.device_node); | ||
188 | sprintf(chan->dbg_name, "chan%d", chidx); | ||
189 | chan->debugfs = debugfs_create_dir(chan->dbg_name, | ||
190 | dmadev->debugfs); | ||
191 | if (!chan->debugfs) { | ||
192 | rc = -ENOMEM; | ||
193 | goto cleanup; | ||
194 | } | ||
195 | chan->stats = debugfs_create_file("stats", S_IRUGO, | ||
196 | chan->debugfs, chan, | ||
197 | &hidma_chan_fops); | ||
198 | if (!chan->stats) { | ||
199 | rc = -ENOMEM; | ||
200 | goto cleanup; | ||
201 | } | ||
202 | chidx++; | ||
203 | } | ||
204 | |||
205 | dmadev->stats = debugfs_create_file("stats", S_IRUGO, | ||
206 | dmadev->debugfs, dmadev, | ||
207 | &hidma_dma_fops); | ||
208 | if (!dmadev->stats) { | ||
209 | rc = -ENOMEM; | ||
210 | goto cleanup; | ||
211 | } | ||
212 | |||
213 | return 0; | ||
214 | cleanup: | ||
215 | hidma_debug_uninit(dmadev); | ||
216 | return rc; | ||
217 | } | ||
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c new file mode 100644 index 000000000000..f3929001539b --- /dev/null +++ b/drivers/dma/qcom/hidma_ll.c | |||
@@ -0,0 +1,872 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA DMA engine low level code | ||
3 | * | ||
4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/highmem.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/atomic.h> | ||
24 | #include <linux/iopoll.h> | ||
25 | #include <linux/kfifo.h> | ||
26 | #include <linux/bitops.h> | ||
27 | |||
28 | #include "hidma.h" | ||
29 | |||
30 | #define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */ | ||
31 | |||
32 | #define HIDMA_TRCA_CTRLSTS_REG 0x000 | ||
33 | #define HIDMA_TRCA_RING_LOW_REG 0x008 | ||
34 | #define HIDMA_TRCA_RING_HIGH_REG 0x00C | ||
35 | #define HIDMA_TRCA_RING_LEN_REG 0x010 | ||
36 | #define HIDMA_TRCA_DOORBELL_REG 0x400 | ||
37 | |||
38 | #define HIDMA_EVCA_CTRLSTS_REG 0x000 | ||
39 | #define HIDMA_EVCA_INTCTRL_REG 0x004 | ||
40 | #define HIDMA_EVCA_RING_LOW_REG 0x008 | ||
41 | #define HIDMA_EVCA_RING_HIGH_REG 0x00C | ||
42 | #define HIDMA_EVCA_RING_LEN_REG 0x010 | ||
43 | #define HIDMA_EVCA_WRITE_PTR_REG 0x020 | ||
44 | #define HIDMA_EVCA_DOORBELL_REG 0x400 | ||
45 | |||
46 | #define HIDMA_EVCA_IRQ_STAT_REG 0x100 | ||
47 | #define HIDMA_EVCA_IRQ_CLR_REG 0x108 | ||
48 | #define HIDMA_EVCA_IRQ_EN_REG 0x110 | ||
49 | |||
50 | #define HIDMA_EVRE_CFG_IDX 0 | ||
51 | |||
52 | #define HIDMA_EVRE_ERRINFO_BIT_POS 24 | ||
53 | #define HIDMA_EVRE_CODE_BIT_POS 28 | ||
54 | |||
55 | #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0) | ||
56 | #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0) | ||
57 | |||
58 | #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0) | ||
59 | #define HIDMA_CH_STATE_MASK GENMASK(7, 0) | ||
60 | #define HIDMA_CH_STATE_BIT_POS 0x8 | ||
61 | |||
62 | #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0 | ||
63 | #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1 | ||
64 | #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9 | ||
65 | #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10 | ||
66 | #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11 | ||
67 | #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14 | ||
68 | |||
69 | #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \ | ||
70 | BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ | ||
71 | BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ | ||
72 | BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ | ||
73 | BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \ | ||
74 | BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)) | ||
75 | |||
76 | #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \ | ||
77 | do { \ | ||
78 | iter += size; \ | ||
79 | if (iter >= ring_size) \ | ||
80 | iter -= ring_size; \ | ||
81 | } while (0) | ||
82 | |||
83 | #define HIDMA_CH_STATE(val) \ | ||
84 | ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK) | ||
85 | |||
86 | #define HIDMA_ERR_INT_MASK \ | ||
87 | (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \ | ||
88 | BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \ | ||
89 | BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \ | ||
90 | BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \ | ||
91 | BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS)) | ||
92 | |||
93 | enum ch_command { | ||
94 | HIDMA_CH_DISABLE = 0, | ||
95 | HIDMA_CH_ENABLE = 1, | ||
96 | HIDMA_CH_SUSPEND = 2, | ||
97 | HIDMA_CH_RESET = 9, | ||
98 | }; | ||
99 | |||
100 | enum ch_state { | ||
101 | HIDMA_CH_DISABLED = 0, | ||
102 | HIDMA_CH_ENABLED = 1, | ||
103 | HIDMA_CH_RUNNING = 2, | ||
104 | HIDMA_CH_SUSPENDED = 3, | ||
105 | HIDMA_CH_STOPPED = 4, | ||
106 | }; | ||
107 | |||
108 | enum tre_type { | ||
109 | HIDMA_TRE_MEMCPY = 3, | ||
110 | }; | ||
111 | |||
112 | enum err_code { | ||
113 | HIDMA_EVRE_STATUS_COMPLETE = 1, | ||
114 | HIDMA_EVRE_STATUS_ERROR = 4, | ||
115 | }; | ||
116 | |||
117 | static int hidma_is_chan_enabled(int state) | ||
118 | { | ||
119 | switch (state) { | ||
120 | case HIDMA_CH_ENABLED: | ||
121 | case HIDMA_CH_RUNNING: | ||
122 | return true; | ||
123 | default: | ||
124 | return false; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch) | ||
129 | { | ||
130 | struct hidma_tre *tre; | ||
131 | |||
132 | if (tre_ch >= lldev->nr_tres) { | ||
133 | dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch); | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | tre = &lldev->trepool[tre_ch]; | ||
138 | if (atomic_read(&tre->allocated) != true) { | ||
139 | dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch); | ||
140 | return; | ||
141 | } | ||
142 | |||
143 | atomic_set(&tre->allocated, 0); | ||
144 | } | ||
145 | |||
146 | int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name, | ||
147 | void (*callback)(void *data), void *data, u32 *tre_ch) | ||
148 | { | ||
149 | unsigned int i; | ||
150 | struct hidma_tre *tre; | ||
151 | u32 *tre_local; | ||
152 | |||
153 | if (!tre_ch || !lldev) | ||
154 | return -EINVAL; | ||
155 | |||
156 | /* need to have at least one empty spot in the queue */ | ||
157 | for (i = 0; i < lldev->nr_tres - 1; i++) { | ||
158 | if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1)) | ||
159 | break; | ||
160 | } | ||
161 | |||
162 | if (i == (lldev->nr_tres - 1)) | ||
163 | return -ENOMEM; | ||
164 | |||
165 | tre = &lldev->trepool[i]; | ||
166 | tre->dma_sig = sig; | ||
167 | tre->dev_name = dev_name; | ||
168 | tre->callback = callback; | ||
169 | tre->data = data; | ||
170 | tre->idx = i; | ||
171 | tre->status = 0; | ||
172 | tre->queued = 0; | ||
173 | tre->err_code = 0; | ||
174 | tre->err_info = 0; | ||
175 | tre->lldev = lldev; | ||
176 | tre_local = &tre->tre_local[0]; | ||
177 | tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY; | ||
178 | tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8; | ||
179 | tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ | ||
180 | *tre_ch = i; | ||
181 | if (callback) | ||
182 | callback(data); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Multiple TREs may be queued and waiting in the pending queue. | ||
188 | */ | ||
189 | static void hidma_ll_tre_complete(unsigned long arg) | ||
190 | { | ||
191 | struct hidma_lldev *lldev = (struct hidma_lldev *)arg; | ||
192 | struct hidma_tre *tre; | ||
193 | |||
194 | while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) { | ||
195 | /* call the user if it has been read by the hardware */ | ||
196 | if (tre->callback) | ||
197 | tre->callback(tre->data); | ||
198 | } | ||
199 | } | ||
200 | |||
201 | static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator, | ||
202 | u8 err_info, u8 err_code) | ||
203 | { | ||
204 | struct hidma_tre *tre; | ||
205 | unsigned long flags; | ||
206 | |||
207 | spin_lock_irqsave(&lldev->lock, flags); | ||
208 | tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE]; | ||
209 | if (!tre) { | ||
210 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
211 | dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n", | ||
212 | tre_iterator / HIDMA_TRE_SIZE); | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | lldev->pending_tre_list[tre->tre_index] = NULL; | ||
216 | |||
217 | /* | ||
218 | * Keep track of pending TREs that SW is expecting to receive | ||
219 | * from HW. We got one now. Decrement our counter. | ||
220 | */ | ||
221 | lldev->pending_tre_count--; | ||
222 | if (lldev->pending_tre_count < 0) { | ||
223 | dev_warn(lldev->dev, "tre count mismatch on completion"); | ||
224 | lldev->pending_tre_count = 0; | ||
225 | } | ||
226 | |||
227 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
228 | |||
229 | tre->err_info = err_info; | ||
230 | tre->err_code = err_code; | ||
231 | tre->queued = 0; | ||
232 | |||
233 | kfifo_put(&lldev->handoff_fifo, tre); | ||
234 | tasklet_schedule(&lldev->task); | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * Called to handle the interrupt for the channel. | ||
241 | * Return a positive number if TRE or EVRE were consumed on this run. | ||
242 | * Return a positive number if there are pending TREs or EVREs. | ||
243 | * Return 0 if there is nothing to consume or no pending TREs/EVREs found. | ||
244 | */ | ||
245 | static int hidma_handle_tre_completion(struct hidma_lldev *lldev) | ||
246 | { | ||
247 | u32 evre_ring_size = lldev->evre_ring_size; | ||
248 | u32 tre_ring_size = lldev->tre_ring_size; | ||
249 | u32 err_info, err_code, evre_write_off; | ||
250 | u32 tre_iterator, evre_iterator; | ||
251 | u32 num_completed = 0; | ||
252 | |||
253 | evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | ||
254 | tre_iterator = lldev->tre_processed_off; | ||
255 | evre_iterator = lldev->evre_processed_off; | ||
256 | |||
257 | if ((evre_write_off > evre_ring_size) || | ||
258 | (evre_write_off % HIDMA_EVRE_SIZE)) { | ||
259 | dev_err(lldev->dev, "HW reports invalid EVRE write offset\n"); | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * By the time control reaches here the number of EVREs and TREs | ||
265 | * may not match. Only consume the ones that hardware told us. | ||
266 | */ | ||
267 | while ((evre_iterator != evre_write_off)) { | ||
268 | u32 *current_evre = lldev->evre_ring + evre_iterator; | ||
269 | u32 cfg; | ||
270 | |||
271 | cfg = current_evre[HIDMA_EVRE_CFG_IDX]; | ||
272 | err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS; | ||
273 | err_info &= HIDMA_EVRE_ERRINFO_MASK; | ||
274 | err_code = | ||
275 | (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK; | ||
276 | |||
277 | if (hidma_post_completed(lldev, tre_iterator, err_info, | ||
278 | err_code)) | ||
279 | break; | ||
280 | |||
281 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | ||
282 | tre_ring_size); | ||
283 | HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE, | ||
284 | evre_ring_size); | ||
285 | |||
286 | /* | ||
287 | * Read the new event descriptor written by the HW. | ||
288 | * As we are processing the delivered events, other events | ||
289 | * get queued to the SW for processing. | ||
290 | */ | ||
291 | evre_write_off = | ||
292 | readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG); | ||
293 | num_completed++; | ||
294 | } | ||
295 | |||
296 | if (num_completed) { | ||
297 | u32 evre_read_off = (lldev->evre_processed_off + | ||
298 | HIDMA_EVRE_SIZE * num_completed); | ||
299 | u32 tre_read_off = (lldev->tre_processed_off + | ||
300 | HIDMA_TRE_SIZE * num_completed); | ||
301 | |||
302 | evre_read_off = evre_read_off % evre_ring_size; | ||
303 | tre_read_off = tre_read_off % tre_ring_size; | ||
304 | |||
305 | writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG); | ||
306 | |||
307 | /* record the last processed tre offset */ | ||
308 | lldev->tre_processed_off = tre_read_off; | ||
309 | lldev->evre_processed_off = evre_read_off; | ||
310 | } | ||
311 | |||
312 | return num_completed; | ||
313 | } | ||
314 | |||
315 | void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info, | ||
316 | u8 err_code) | ||
317 | { | ||
318 | u32 tre_iterator; | ||
319 | u32 tre_ring_size = lldev->tre_ring_size; | ||
320 | int num_completed = 0; | ||
321 | u32 tre_read_off; | ||
322 | |||
323 | tre_iterator = lldev->tre_processed_off; | ||
324 | while (lldev->pending_tre_count) { | ||
325 | if (hidma_post_completed(lldev, tre_iterator, err_info, | ||
326 | err_code)) | ||
327 | break; | ||
328 | HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE, | ||
329 | tre_ring_size); | ||
330 | num_completed++; | ||
331 | } | ||
332 | tre_read_off = (lldev->tre_processed_off + | ||
333 | HIDMA_TRE_SIZE * num_completed); | ||
334 | |||
335 | tre_read_off = tre_read_off % tre_ring_size; | ||
336 | |||
337 | /* record the last processed tre offset */ | ||
338 | lldev->tre_processed_off = tre_read_off; | ||
339 | } | ||
340 | |||
341 | static int hidma_ll_reset(struct hidma_lldev *lldev) | ||
342 | { | ||
343 | u32 val; | ||
344 | int ret; | ||
345 | |||
346 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
347 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
348 | val |= HIDMA_CH_RESET << 16; | ||
349 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
350 | |||
351 | /* | ||
352 | * Delay 10ms after reset to allow DMA logic to quiesce. | ||
353 | * Do a polled read up to 1ms and 10ms maximum. | ||
354 | */ | ||
355 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | ||
356 | HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, | ||
357 | 1000, 10000); | ||
358 | if (ret) { | ||
359 | dev_err(lldev->dev, "transfer channel did not reset\n"); | ||
360 | return ret; | ||
361 | } | ||
362 | |||
363 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
364 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
365 | val |= HIDMA_CH_RESET << 16; | ||
366 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
367 | |||
368 | /* | ||
369 | * Delay 10ms after reset to allow DMA logic to quiesce. | ||
370 | * Do a polled read up to 1ms and 10ms maximum. | ||
371 | */ | ||
372 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | ||
373 | HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED, | ||
374 | 1000, 10000); | ||
375 | if (ret) | ||
376 | return ret; | ||
377 | |||
378 | lldev->trch_state = HIDMA_CH_DISABLED; | ||
379 | lldev->evch_state = HIDMA_CH_DISABLED; | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Abort all transactions and perform a reset. | ||
385 | */ | ||
386 | static void hidma_ll_abort(unsigned long arg) | ||
387 | { | ||
388 | struct hidma_lldev *lldev = (struct hidma_lldev *)arg; | ||
389 | u8 err_code = HIDMA_EVRE_STATUS_ERROR; | ||
390 | u8 err_info = 0xFF; | ||
391 | int rc; | ||
392 | |||
393 | hidma_cleanup_pending_tre(lldev, err_info, err_code); | ||
394 | |||
395 | /* reset the channel for recovery */ | ||
396 | rc = hidma_ll_setup(lldev); | ||
397 | if (rc) { | ||
398 | dev_err(lldev->dev, "channel reinitialize failed after error\n"); | ||
399 | return; | ||
400 | } | ||
401 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * The interrupt handler for HIDMA will try to consume as many pending | ||
406 | * EVRE from the event queue as possible. Each EVRE has an associated | ||
407 | * TRE that holds the user interface parameters. EVRE reports the | ||
408 | * result of the transaction. Hardware guarantees ordering between EVREs | ||
409 | * and TREs. We use last processed offset to figure out which TRE is | ||
410 | * associated with which EVRE. If two TREs are consumed by HW, the EVREs | ||
411 | * are in order in the event ring. | ||
412 | * | ||
413 | * This handler will do a one pass for consuming EVREs. Other EVREs may | ||
414 | * be delivered while we are working. It will try to consume incoming | ||
415 | * EVREs one more time and return. | ||
416 | * | ||
417 | * For unprocessed EVREs, hardware will trigger another interrupt until | ||
418 | * all the interrupt bits are cleared. | ||
419 | * | ||
420 | * Hardware guarantees that by the time interrupt is observed, all data | ||
421 | * transactions in flight are delivered to their respective places and | ||
422 | * are visible to the CPU. | ||
423 | * | ||
424 | * On demand paging for IOMMU is only supported for PCIe via PRI | ||
425 | * (Page Request Interface) not for HIDMA. All other hardware instances | ||
426 | * including HIDMA work on pinned DMA addresses. | ||
427 | * | ||
428 | * HIDMA is not aware of IOMMU presence since it follows the DMA API. All | ||
429 | * IOMMU latency will be built into the data movement time. By the time | ||
430 | * interrupt happens, IOMMU lookups + data movement has already taken place. | ||
431 | * | ||
432 | * While the first read in a typical PCI endpoint ISR flushes all outstanding | ||
433 | * requests traditionally to the destination, this concept does not apply | ||
434 | * here for this HW. | ||
435 | */ | ||
436 | irqreturn_t hidma_ll_inthandler(int chirq, void *arg) | ||
437 | { | ||
438 | struct hidma_lldev *lldev = arg; | ||
439 | u32 status; | ||
440 | u32 enable; | ||
441 | u32 cause; | ||
442 | |||
443 | /* | ||
444 | * Fine tuned for this HW... | ||
445 | * | ||
446 | * This ISR has been designed for this particular hardware. Relaxed | ||
447 | * read and write accessors are used for performance reasons due to | ||
448 | * interrupt delivery guarantees. Do not copy this code blindly and | ||
449 | * expect that to work. | ||
450 | */ | ||
451 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
452 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
453 | cause = status & enable; | ||
454 | |||
455 | while (cause) { | ||
456 | if (cause & HIDMA_ERR_INT_MASK) { | ||
457 | dev_err(lldev->dev, "error 0x%x, resetting...\n", | ||
458 | cause); | ||
459 | |||
460 | /* Clear out pending interrupts */ | ||
461 | writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
462 | |||
463 | tasklet_schedule(&lldev->rst_task); | ||
464 | goto out; | ||
465 | } | ||
466 | |||
467 | /* | ||
468 | * Try to consume as many EVREs as possible. | ||
469 | */ | ||
470 | hidma_handle_tre_completion(lldev); | ||
471 | |||
472 | /* We consumed TREs or there are pending TREs or EVREs. */ | ||
473 | writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
474 | |||
475 | /* | ||
476 | * Another interrupt might have arrived while we are | ||
477 | * processing this one. Read the new cause. | ||
478 | */ | ||
479 | status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
480 | enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
481 | cause = status & enable; | ||
482 | } | ||
483 | |||
484 | out: | ||
485 | return IRQ_HANDLED; | ||
486 | } | ||
487 | |||
488 | int hidma_ll_enable(struct hidma_lldev *lldev) | ||
489 | { | ||
490 | u32 val; | ||
491 | int ret; | ||
492 | |||
493 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
494 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
495 | val |= HIDMA_CH_ENABLE << 16; | ||
496 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
497 | |||
498 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | ||
499 | hidma_is_chan_enabled(HIDMA_CH_STATE(val)), | ||
500 | 1000, 10000); | ||
501 | if (ret) { | ||
502 | dev_err(lldev->dev, "event channel did not get enabled\n"); | ||
503 | return ret; | ||
504 | } | ||
505 | |||
506 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
507 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
508 | val |= HIDMA_CH_ENABLE << 16; | ||
509 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
510 | |||
511 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | ||
512 | hidma_is_chan_enabled(HIDMA_CH_STATE(val)), | ||
513 | 1000, 10000); | ||
514 | if (ret) { | ||
515 | dev_err(lldev->dev, "transfer channel did not get enabled\n"); | ||
516 | return ret; | ||
517 | } | ||
518 | |||
519 | lldev->trch_state = HIDMA_CH_ENABLED; | ||
520 | lldev->evch_state = HIDMA_CH_ENABLED; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | void hidma_ll_start(struct hidma_lldev *lldev) | ||
526 | { | ||
527 | unsigned long irqflags; | ||
528 | |||
529 | spin_lock_irqsave(&lldev->lock, irqflags); | ||
530 | writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG); | ||
531 | spin_unlock_irqrestore(&lldev->lock, irqflags); | ||
532 | } | ||
533 | |||
534 | bool hidma_ll_isenabled(struct hidma_lldev *lldev) | ||
535 | { | ||
536 | u32 val; | ||
537 | |||
538 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
539 | lldev->trch_state = HIDMA_CH_STATE(val); | ||
540 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
541 | lldev->evch_state = HIDMA_CH_STATE(val); | ||
542 | |||
543 | /* both channels have to be enabled before calling this function */ | ||
544 | if (hidma_is_chan_enabled(lldev->trch_state) && | ||
545 | hidma_is_chan_enabled(lldev->evch_state)) | ||
546 | return true; | ||
547 | |||
548 | return false; | ||
549 | } | ||
550 | |||
551 | void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch) | ||
552 | { | ||
553 | struct hidma_tre *tre; | ||
554 | unsigned long flags; | ||
555 | |||
556 | tre = &lldev->trepool[tre_ch]; | ||
557 | |||
558 | /* copy the TRE into its location in the TRE ring */ | ||
559 | spin_lock_irqsave(&lldev->lock, flags); | ||
560 | tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE; | ||
561 | lldev->pending_tre_list[tre->tre_index] = tre; | ||
562 | memcpy(lldev->tre_ring + lldev->tre_write_offset, | ||
563 | &tre->tre_local[0], HIDMA_TRE_SIZE); | ||
564 | tre->err_code = 0; | ||
565 | tre->err_info = 0; | ||
566 | tre->queued = 1; | ||
567 | lldev->pending_tre_count++; | ||
568 | lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE) | ||
569 | % lldev->tre_ring_size; | ||
570 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * Note that even though we stop this channel if there is a pending transaction | ||
575 | * in flight it will complete and follow the callback. This request will | ||
576 | * prevent further requests to be made. | ||
577 | */ | ||
578 | int hidma_ll_disable(struct hidma_lldev *lldev) | ||
579 | { | ||
580 | u32 val; | ||
581 | int ret; | ||
582 | |||
583 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
584 | lldev->evch_state = HIDMA_CH_STATE(val); | ||
585 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
586 | lldev->trch_state = HIDMA_CH_STATE(val); | ||
587 | |||
588 | /* already suspended by this OS */ | ||
589 | if ((lldev->trch_state == HIDMA_CH_SUSPENDED) || | ||
590 | (lldev->evch_state == HIDMA_CH_SUSPENDED)) | ||
591 | return 0; | ||
592 | |||
593 | /* already stopped by the manager */ | ||
594 | if ((lldev->trch_state == HIDMA_CH_STOPPED) || | ||
595 | (lldev->evch_state == HIDMA_CH_STOPPED)) | ||
596 | return 0; | ||
597 | |||
598 | val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
599 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
600 | val |= HIDMA_CH_SUSPEND << 16; | ||
601 | writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG); | ||
602 | |||
603 | /* | ||
604 | * Start the wait right after the suspend is confirmed. | ||
605 | * Do a polled read up to 1ms and 10ms maximum. | ||
606 | */ | ||
607 | ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val, | ||
608 | HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, | ||
609 | 1000, 10000); | ||
610 | if (ret) | ||
611 | return ret; | ||
612 | |||
613 | val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
614 | val &= ~(HIDMA_CH_CONTROL_MASK << 16); | ||
615 | val |= HIDMA_CH_SUSPEND << 16; | ||
616 | writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG); | ||
617 | |||
618 | /* | ||
619 | * Start the wait right after the suspend is confirmed | ||
620 | * Delay up to 10ms after reset to allow DMA logic to quiesce. | ||
621 | */ | ||
622 | ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val, | ||
623 | HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED, | ||
624 | 1000, 10000); | ||
625 | if (ret) | ||
626 | return ret; | ||
627 | |||
628 | lldev->trch_state = HIDMA_CH_SUSPENDED; | ||
629 | lldev->evch_state = HIDMA_CH_SUSPENDED; | ||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, | ||
634 | dma_addr_t src, dma_addr_t dest, u32 len, | ||
635 | u32 flags) | ||
636 | { | ||
637 | struct hidma_tre *tre; | ||
638 | u32 *tre_local; | ||
639 | |||
640 | if (tre_ch >= lldev->nr_tres) { | ||
641 | dev_err(lldev->dev, "invalid TRE number in transfer params:%d", | ||
642 | tre_ch); | ||
643 | return; | ||
644 | } | ||
645 | |||
646 | tre = &lldev->trepool[tre_ch]; | ||
647 | if (atomic_read(&tre->allocated) != true) { | ||
648 | dev_err(lldev->dev, "trying to set params on an unused TRE:%d", | ||
649 | tre_ch); | ||
650 | return; | ||
651 | } | ||
652 | |||
653 | tre_local = &tre->tre_local[0]; | ||
654 | tre_local[HIDMA_TRE_LEN_IDX] = len; | ||
655 | tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); | ||
656 | tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); | ||
657 | tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest); | ||
658 | tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest); | ||
659 | tre->int_flags = flags; | ||
660 | } | ||
661 | |||
662 | /* | ||
663 | * Called during initialization and after an error condition | ||
664 | * to restore hardware state. | ||
665 | */ | ||
666 | int hidma_ll_setup(struct hidma_lldev *lldev) | ||
667 | { | ||
668 | int rc; | ||
669 | u64 addr; | ||
670 | u32 val; | ||
671 | u32 nr_tres = lldev->nr_tres; | ||
672 | |||
673 | lldev->pending_tre_count = 0; | ||
674 | lldev->tre_processed_off = 0; | ||
675 | lldev->evre_processed_off = 0; | ||
676 | lldev->tre_write_offset = 0; | ||
677 | |||
678 | /* disable interrupts */ | ||
679 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
680 | |||
681 | /* clear all pending interrupts */ | ||
682 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
683 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
684 | |||
685 | rc = hidma_ll_reset(lldev); | ||
686 | if (rc) | ||
687 | return rc; | ||
688 | |||
689 | /* | ||
690 | * Clear all pending interrupts again. | ||
691 | * Otherwise, we observe reset complete interrupts. | ||
692 | */ | ||
693 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
694 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
695 | |||
696 | /* disable interrupts again after reset */ | ||
697 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
698 | |||
699 | addr = lldev->tre_dma; | ||
700 | writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG); | ||
701 | writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG); | ||
702 | writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG); | ||
703 | |||
704 | addr = lldev->evre_dma; | ||
705 | writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG); | ||
706 | writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG); | ||
707 | writel(HIDMA_EVRE_SIZE * nr_tres, | ||
708 | lldev->evca + HIDMA_EVCA_RING_LEN_REG); | ||
709 | |||
710 | /* support IRQ only for now */ | ||
711 | val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG); | ||
712 | val &= ~0xF; | ||
713 | val |= 0x1; | ||
714 | writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG); | ||
715 | |||
716 | /* clear all pending interrupts and enable them */ | ||
717 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
718 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
719 | |||
720 | return hidma_ll_enable(lldev); | ||
721 | } | ||
722 | |||
723 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, | ||
724 | void __iomem *trca, void __iomem *evca, | ||
725 | u8 chidx) | ||
726 | { | ||
727 | u32 required_bytes; | ||
728 | struct hidma_lldev *lldev; | ||
729 | int rc; | ||
730 | size_t sz; | ||
731 | |||
732 | if (!trca || !evca || !dev || !nr_tres) | ||
733 | return NULL; | ||
734 | |||
735 | /* need at least four TREs */ | ||
736 | if (nr_tres < 4) | ||
737 | return NULL; | ||
738 | |||
739 | /* need an extra space */ | ||
740 | nr_tres += 1; | ||
741 | |||
742 | lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL); | ||
743 | if (!lldev) | ||
744 | return NULL; | ||
745 | |||
746 | lldev->evca = evca; | ||
747 | lldev->trca = trca; | ||
748 | lldev->dev = dev; | ||
749 | sz = sizeof(struct hidma_tre); | ||
750 | lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL); | ||
751 | if (!lldev->trepool) | ||
752 | return NULL; | ||
753 | |||
754 | required_bytes = sizeof(lldev->pending_tre_list[0]); | ||
755 | lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes, | ||
756 | GFP_KERNEL); | ||
757 | if (!lldev->pending_tre_list) | ||
758 | return NULL; | ||
759 | |||
760 | sz = (HIDMA_TRE_SIZE + 1) * nr_tres; | ||
761 | lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma, | ||
762 | GFP_KERNEL); | ||
763 | if (!lldev->tre_ring) | ||
764 | return NULL; | ||
765 | |||
766 | memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres); | ||
767 | lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres; | ||
768 | lldev->nr_tres = nr_tres; | ||
769 | |||
770 | /* the TRE ring has to be TRE_SIZE aligned */ | ||
771 | if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) { | ||
772 | u8 tre_ring_shift; | ||
773 | |||
774 | tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE; | ||
775 | tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift; | ||
776 | lldev->tre_dma += tre_ring_shift; | ||
777 | lldev->tre_ring += tre_ring_shift; | ||
778 | } | ||
779 | |||
780 | sz = (HIDMA_EVRE_SIZE + 1) * nr_tres; | ||
781 | lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma, | ||
782 | GFP_KERNEL); | ||
783 | if (!lldev->evre_ring) | ||
784 | return NULL; | ||
785 | |||
786 | memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres); | ||
787 | lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres; | ||
788 | |||
789 | /* the EVRE ring has to be EVRE_SIZE aligned */ | ||
790 | if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) { | ||
791 | u8 evre_ring_shift; | ||
792 | |||
793 | evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE; | ||
794 | evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift; | ||
795 | lldev->evre_dma += evre_ring_shift; | ||
796 | lldev->evre_ring += evre_ring_shift; | ||
797 | } | ||
798 | lldev->nr_tres = nr_tres; | ||
799 | lldev->chidx = chidx; | ||
800 | |||
801 | sz = nr_tres * sizeof(struct hidma_tre *); | ||
802 | rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL); | ||
803 | if (rc) | ||
804 | return NULL; | ||
805 | |||
806 | rc = hidma_ll_setup(lldev); | ||
807 | if (rc) | ||
808 | return NULL; | ||
809 | |||
810 | spin_lock_init(&lldev->lock); | ||
811 | tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev); | ||
812 | tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); | ||
813 | lldev->initialized = 1; | ||
814 | writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
815 | return lldev; | ||
816 | } | ||
817 | |||
818 | int hidma_ll_uninit(struct hidma_lldev *lldev) | ||
819 | { | ||
820 | u32 required_bytes; | ||
821 | int rc = 0; | ||
822 | u32 val; | ||
823 | |||
824 | if (!lldev) | ||
825 | return -ENODEV; | ||
826 | |||
827 | if (!lldev->initialized) | ||
828 | return 0; | ||
829 | |||
830 | lldev->initialized = 0; | ||
831 | |||
832 | required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; | ||
833 | tasklet_kill(&lldev->task); | ||
834 | memset(lldev->trepool, 0, required_bytes); | ||
835 | lldev->trepool = NULL; | ||
836 | lldev->pending_tre_count = 0; | ||
837 | lldev->tre_write_offset = 0; | ||
838 | |||
839 | rc = hidma_ll_reset(lldev); | ||
840 | |||
841 | /* | ||
842 | * Clear all pending interrupts again. | ||
843 | * Otherwise, we observe reset complete interrupts. | ||
844 | */ | ||
845 | val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG); | ||
846 | writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); | ||
847 | writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); | ||
848 | return rc; | ||
849 | } | ||
850 | |||
851 | enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch) | ||
852 | { | ||
853 | enum dma_status ret = DMA_ERROR; | ||
854 | struct hidma_tre *tre; | ||
855 | unsigned long flags; | ||
856 | u8 err_code; | ||
857 | |||
858 | spin_lock_irqsave(&lldev->lock, flags); | ||
859 | |||
860 | tre = &lldev->trepool[tre_ch]; | ||
861 | err_code = tre->err_code; | ||
862 | |||
863 | if (err_code & HIDMA_EVRE_STATUS_COMPLETE) | ||
864 | ret = DMA_COMPLETE; | ||
865 | else if (err_code & HIDMA_EVRE_STATUS_ERROR) | ||
866 | ret = DMA_ERROR; | ||
867 | else | ||
868 | ret = DMA_IN_PROGRESS; | ||
869 | spin_unlock_irqrestore(&lldev->lock, flags); | ||
870 | |||
871 | return ret; | ||
872 | } | ||
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index ef491b893f40..c0e365321310 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Qualcomm Technologies HIDMA DMA engine Management interface | 2 | * Qualcomm Technologies HIDMA DMA engine Management interface |
3 | * | 3 | * |
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | 4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 and | 7 | * it under the terms of the GNU General Public License version 2 and |
@@ -17,13 +17,14 @@ | |||
17 | #include <linux/acpi.h> | 17 | #include <linux/acpi.h> |
18 | #include <linux/of.h> | 18 | #include <linux/of.h> |
19 | #include <linux/property.h> | 19 | #include <linux/property.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/of_irq.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/of_platform.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
26 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
27 | #include <linux/dma-mapping.h> | ||
27 | 28 | ||
28 | #include "hidma_mgmt.h" | 29 | #include "hidma_mgmt.h" |
29 | 30 | ||
@@ -298,5 +299,109 @@ static struct platform_driver hidma_mgmt_driver = { | |||
298 | }, | 299 | }, |
299 | }; | 300 | }; |
300 | 301 | ||
301 | module_platform_driver(hidma_mgmt_driver); | 302 | #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) |
303 | static int object_counter; | ||
304 | |||
305 | static int __init hidma_mgmt_of_populate_channels(struct device_node *np) | ||
306 | { | ||
307 | struct platform_device *pdev_parent = of_find_device_by_node(np); | ||
308 | struct platform_device_info pdevinfo; | ||
309 | struct of_phandle_args out_irq; | ||
310 | struct device_node *child; | ||
311 | struct resource *res; | ||
312 | const __be32 *cell; | ||
313 | int ret = 0, size, i, num; | ||
314 | u64 addr, addr_size; | ||
315 | |||
316 | for_each_available_child_of_node(np, child) { | ||
317 | struct resource *res_iter; | ||
318 | struct platform_device *new_pdev; | ||
319 | |||
320 | cell = of_get_property(child, "reg", &size); | ||
321 | if (!cell) { | ||
322 | ret = -EINVAL; | ||
323 | goto out; | ||
324 | } | ||
325 | |||
326 | size /= sizeof(*cell); | ||
327 | num = size / | ||
328 | (of_n_addr_cells(child) + of_n_size_cells(child)) + 1; | ||
329 | |||
330 | /* allocate a resource array */ | ||
331 | res = kcalloc(num, sizeof(*res), GFP_KERNEL); | ||
332 | if (!res) { | ||
333 | ret = -ENOMEM; | ||
334 | goto out; | ||
335 | } | ||
336 | |||
337 | /* read each reg value */ | ||
338 | i = 0; | ||
339 | res_iter = res; | ||
340 | while (i < size) { | ||
341 | addr = of_read_number(&cell[i], | ||
342 | of_n_addr_cells(child)); | ||
343 | i += of_n_addr_cells(child); | ||
344 | |||
345 | addr_size = of_read_number(&cell[i], | ||
346 | of_n_size_cells(child)); | ||
347 | i += of_n_size_cells(child); | ||
348 | |||
349 | res_iter->start = addr; | ||
350 | res_iter->end = res_iter->start + addr_size - 1; | ||
351 | res_iter->flags = IORESOURCE_MEM; | ||
352 | res_iter++; | ||
353 | } | ||
354 | |||
355 | ret = of_irq_parse_one(child, 0, &out_irq); | ||
356 | if (ret) | ||
357 | goto out; | ||
358 | |||
359 | res_iter->start = irq_create_of_mapping(&out_irq); | ||
360 | res_iter->name = "hidma event irq"; | ||
361 | res_iter->flags = IORESOURCE_IRQ; | ||
362 | |||
363 | memset(&pdevinfo, 0, sizeof(pdevinfo)); | ||
364 | pdevinfo.fwnode = &child->fwnode; | ||
365 | pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; | ||
366 | pdevinfo.name = child->name; | ||
367 | pdevinfo.id = object_counter++; | ||
368 | pdevinfo.res = res; | ||
369 | pdevinfo.num_res = num; | ||
370 | pdevinfo.data = NULL; | ||
371 | pdevinfo.size_data = 0; | ||
372 | pdevinfo.dma_mask = DMA_BIT_MASK(64); | ||
373 | new_pdev = platform_device_register_full(&pdevinfo); | ||
374 | if (!new_pdev) { | ||
375 | ret = -ENODEV; | ||
376 | goto out; | ||
377 | } | ||
378 | of_dma_configure(&new_pdev->dev, child); | ||
379 | |||
380 | kfree(res); | ||
381 | res = NULL; | ||
382 | } | ||
383 | out: | ||
384 | kfree(res); | ||
385 | |||
386 | return ret; | ||
387 | } | ||
388 | #endif | ||
389 | |||
390 | static int __init hidma_mgmt_init(void) | ||
391 | { | ||
392 | #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) | ||
393 | struct device_node *child; | ||
394 | |||
395 | for (child = of_find_matching_node(NULL, hidma_mgmt_match); child; | ||
396 | child = of_find_matching_node(child, hidma_mgmt_match)) { | ||
397 | /* device tree based firmware here */ | ||
398 | hidma_mgmt_of_populate_channels(child); | ||
399 | of_node_put(child); | ||
400 | } | ||
401 | #endif | ||
402 | platform_driver_register(&hidma_mgmt_driver); | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | module_init(hidma_mgmt_init); | ||
302 | MODULE_LICENSE("GPL v2"); | 407 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 520ed1dd5780..4fd7f9802f1b 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c | |||
@@ -144,16 +144,16 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, | |||
144 | struct dw_dma_slave *slave = c->tx_param; | 144 | struct dw_dma_slave *slave = c->tx_param; |
145 | 145 | ||
146 | slave->dma_dev = &dma_dev->dev; | 146 | slave->dma_dev = &dma_dev->dev; |
147 | slave->src_master = 1; | 147 | slave->m_master = 0; |
148 | slave->dst_master = 0; | 148 | slave->p_master = 1; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (c->rx_param) { | 151 | if (c->rx_param) { |
152 | struct dw_dma_slave *slave = c->rx_param; | 152 | struct dw_dma_slave *slave = c->rx_param; |
153 | 153 | ||
154 | slave->dma_dev = &dma_dev->dev; | 154 | slave->dma_dev = &dma_dev->dev; |
155 | slave->src_master = 1; | 155 | slave->m_master = 0; |
156 | slave->dst_master = 0; | 156 | slave->p_master = 1; |
157 | } | 157 | } |
158 | 158 | ||
159 | spi_pdata.dma_filter = lpss_dma_filter; | 159 | spi_pdata.dma_filter = lpss_dma_filter; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 98862aa5bb58..5eea74d7f9f4 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -1454,13 +1454,13 @@ byt_serial_setup(struct serial_private *priv, | |||
1454 | return -EINVAL; | 1454 | return -EINVAL; |
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | rx_param->src_master = 1; | 1457 | rx_param->m_master = 0; |
1458 | rx_param->dst_master = 0; | 1458 | rx_param->p_master = 1; |
1459 | 1459 | ||
1460 | dma->rxconf.src_maxburst = 16; | 1460 | dma->rxconf.src_maxburst = 16; |
1461 | 1461 | ||
1462 | tx_param->src_master = 1; | 1462 | tx_param->m_master = 0; |
1463 | tx_param->dst_master = 0; | 1463 | tx_param->p_master = 1; |
1464 | 1464 | ||
1465 | dma->txconf.dst_maxburst = 16; | 1465 | dma->txconf.dst_maxburst = 16; |
1466 | 1466 | ||
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 10fe2a211c2e..27e9ec8778eb 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
@@ -86,7 +86,7 @@ struct pl08x_channel_data { | |||
86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 | 86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 |
87 | */ | 87 | */ |
88 | struct pl08x_platform_data { | 88 | struct pl08x_platform_data { |
89 | const struct pl08x_channel_data *slave_channels; | 89 | struct pl08x_channel_data *slave_channels; |
90 | unsigned int num_slave_channels; | 90 | unsigned int num_slave_channels; |
91 | struct pl08x_channel_data memcpy_channel; | 91 | struct pl08x_channel_data memcpy_channel; |
92 | int (*get_xfer_signal)(const struct pl08x_channel_data *); | 92 | int (*get_xfer_signal)(const struct pl08x_channel_data *); |
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index 71456442ebe3..f2e538aaddad 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h | |||
@@ -27,6 +27,7 @@ struct dw_dma; | |||
27 | * @regs: memory mapped I/O space | 27 | * @regs: memory mapped I/O space |
28 | * @clk: hclk clock | 28 | * @clk: hclk clock |
29 | * @dw: struct dw_dma that is filed by dw_dma_probe() | 29 | * @dw: struct dw_dma that is filed by dw_dma_probe() |
30 | * @pdata: pointer to platform data | ||
30 | */ | 31 | */ |
31 | struct dw_dma_chip { | 32 | struct dw_dma_chip { |
32 | struct device *dev; | 33 | struct device *dev; |
@@ -34,10 +35,12 @@ struct dw_dma_chip { | |||
34 | void __iomem *regs; | 35 | void __iomem *regs; |
35 | struct clk *clk; | 36 | struct clk *clk; |
36 | struct dw_dma *dw; | 37 | struct dw_dma *dw; |
38 | |||
39 | const struct dw_dma_platform_data *pdata; | ||
37 | }; | 40 | }; |
38 | 41 | ||
39 | /* Export to the platform drivers */ | 42 | /* Export to the platform drivers */ |
40 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); | 43 | int dw_dma_probe(struct dw_dma_chip *chip); |
41 | int dw_dma_remove(struct dw_dma_chip *chip); | 44 | int dw_dma_remove(struct dw_dma_chip *chip); |
42 | 45 | ||
43 | /* DMA API extensions */ | 46 | /* DMA API extensions */ |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 017433712833..30de0197263a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( | |||
804 | sg_dma_address(&sg) = buf; | 804 | sg_dma_address(&sg) = buf; |
805 | sg_dma_len(&sg) = len; | 805 | sg_dma_len(&sg) = len; |
806 | 806 | ||
807 | if (!chan || !chan->device || !chan->device->device_prep_slave_sg) | ||
808 | return NULL; | ||
809 | |||
807 | return chan->device->device_prep_slave_sg(chan, &sg, 1, | 810 | return chan->device->device_prep_slave_sg(chan, &sg, 1, |
808 | dir, flags, NULL); | 811 | dir, flags, NULL); |
809 | } | 812 | } |
@@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( | |||
812 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 815 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
813 | enum dma_transfer_direction dir, unsigned long flags) | 816 | enum dma_transfer_direction dir, unsigned long flags) |
814 | { | 817 | { |
818 | if (!chan || !chan->device || !chan->device->device_prep_slave_sg) | ||
819 | return NULL; | ||
820 | |||
815 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, | 821 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, |
816 | dir, flags, NULL); | 822 | dir, flags, NULL); |
817 | } | 823 | } |
@@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( | |||
823 | enum dma_transfer_direction dir, unsigned long flags, | 829 | enum dma_transfer_direction dir, unsigned long flags, |
824 | struct rio_dma_ext *rio_ext) | 830 | struct rio_dma_ext *rio_ext) |
825 | { | 831 | { |
832 | if (!chan || !chan->device || !chan->device->device_prep_slave_sg) | ||
833 | return NULL; | ||
834 | |||
826 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, | 835 | return chan->device->device_prep_slave_sg(chan, sgl, sg_len, |
827 | dir, flags, rio_ext); | 836 | dir, flags, rio_ext); |
828 | } | 837 | } |
@@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( | |||
833 | size_t period_len, enum dma_transfer_direction dir, | 842 | size_t period_len, enum dma_transfer_direction dir, |
834 | unsigned long flags) | 843 | unsigned long flags) |
835 | { | 844 | { |
845 | if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) | ||
846 | return NULL; | ||
847 | |||
836 | return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, | 848 | return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, |
837 | period_len, dir, flags); | 849 | period_len, dir, flags); |
838 | } | 850 | } |
@@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( | |||
841 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 853 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
842 | unsigned long flags) | 854 | unsigned long flags) |
843 | { | 855 | { |
856 | if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) | ||
857 | return NULL; | ||
858 | |||
844 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); | 859 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); |
845 | } | 860 | } |
846 | 861 | ||
@@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( | |||
848 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 863 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
849 | unsigned long flags) | 864 | unsigned long flags) |
850 | { | 865 | { |
851 | if (!chan || !chan->device) | 866 | if (!chan || !chan->device || !chan->device->device_prep_dma_memset) |
852 | return NULL; | 867 | return NULL; |
853 | 868 | ||
854 | return chan->device->device_prep_dma_memset(chan, dest, value, | 869 | return chan->device->device_prep_dma_memset(chan, dest, value, |
@@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | |||
861 | struct scatterlist *src_sg, unsigned int src_nents, | 876 | struct scatterlist *src_sg, unsigned int src_nents, |
862 | unsigned long flags) | 877 | unsigned long flags) |
863 | { | 878 | { |
879 | if (!chan || !chan->device || !chan->device->device_prep_dma_sg) | ||
880 | return NULL; | ||
881 | |||
864 | return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, | 882 | return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, |
865 | src_sg, src_nents, flags); | 883 | src_sg, src_nents, flags); |
866 | } | 884 | } |
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 03b6095d3b18..d15d8ba8cc24 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
@@ -21,15 +21,15 @@ | |||
21 | * @dma_dev: required DMA master device | 21 | * @dma_dev: required DMA master device |
22 | * @src_id: src request line | 22 | * @src_id: src request line |
23 | * @dst_id: dst request line | 23 | * @dst_id: dst request line |
24 | * @src_master: src master for transfers on allocated channel. | 24 | * @m_master: memory master for transfers on allocated channel |
25 | * @dst_master: dest master for transfers on allocated channel. | 25 | * @p_master: peripheral master for transfers on allocated channel |
26 | */ | 26 | */ |
27 | struct dw_dma_slave { | 27 | struct dw_dma_slave { |
28 | struct device *dma_dev; | 28 | struct device *dma_dev; |
29 | u8 src_id; | 29 | u8 src_id; |
30 | u8 dst_id; | 30 | u8 dst_id; |
31 | u8 src_master; | 31 | u8 m_master; |
32 | u8 dst_master; | 32 | u8 p_master; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | /** | 35 | /** |
@@ -43,7 +43,7 @@ struct dw_dma_slave { | |||
43 | * @block_size: Maximum block size supported by the controller | 43 | * @block_size: Maximum block size supported by the controller |
44 | * @nr_masters: Number of AHB masters supported by the controller | 44 | * @nr_masters: Number of AHB masters supported by the controller |
45 | * @data_width: Maximum data width supported by hardware per AHB master | 45 | * @data_width: Maximum data width supported by hardware per AHB master |
46 | * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) | 46 | * (in bytes, power of 2) |
47 | */ | 47 | */ |
48 | struct dw_dma_platform_data { | 48 | struct dw_dma_platform_data { |
49 | unsigned int nr_channels; | 49 | unsigned int nr_channels; |
@@ -55,7 +55,7 @@ struct dw_dma_platform_data { | |||
55 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ | 55 | #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ |
56 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ | 56 | #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ |
57 | unsigned char chan_priority; | 57 | unsigned char chan_priority; |
58 | unsigned short block_size; | 58 | unsigned int block_size; |
59 | unsigned char nr_masters; | 59 | unsigned char nr_masters; |
60 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; | 60 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; |
61 | }; | 61 | }; |
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c index ef4881e7753a..25993527370b 100644 --- a/sound/soc/intel/common/sst-firmware.c +++ b/sound/soc/intel/common/sst-firmware.c | |||
@@ -203,7 +203,7 @@ static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem, | |||
203 | 203 | ||
204 | chip->dev = dev; | 204 | chip->dev = dev; |
205 | 205 | ||
206 | err = dw_dma_probe(chip, NULL); | 206 | err = dw_dma_probe(chip); |
207 | if (err) | 207 | if (err) |
208 | return ERR_PTR(err); | 208 | return ERR_PTR(err); |
209 | 209 | ||