diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 15:34:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 15:34:54 -0400 |
commit | b5b131c7473e17275debcdf1c226f452dc3876ed (patch) | |
tree | a272e947c38213d4ee989bb3f863a8091d50426b /drivers/dma/qcom | |
parent | c7eec380e85a427983782df744f0fb745d867170 (diff) | |
parent | 896e041e8e8efb34520d033a693ef25391f9c9f0 (diff) |
Merge tag 'dmaengine-4.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This is smallish update with minor changes to core and new driver and
usual updates. Nothing super exciting here..
- We have made slave address as physical to enable driver to do the
mapping.
- We now expose the maxburst for slave dma as new capability so
clients can know this and program accordingly
- addition of device synchronize callbacks on omap and edma.
- pl330 updates to support DMAFLUSHP for Rockchip platforms.
- Updates and improved sg handling in Xilinx VDMA driver.
- New hidma qualcomm dma driver, though some bits are still in
progress"
* tag 'dmaengine-4.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (40 commits)
dmaengine: IOATDMA: revise channel reset workaround on CB3.3 platforms
dmaengine: add Qualcomm Technologies HIDMA channel driver
dmaengine: add Qualcomm Technologies HIDMA management driver
dmaengine: hidma: Add Device Tree binding
dmaengine: qcom_bam_dma: move to qcom directory
dmaengine: tegra: Move of_device_id table near to its user
dmaengine: xilinx_vdma: Remove unnecessary variable initializations
dmaengine: sirf: use __maybe_unused to hide pm functions
dmaengine: rcar-dmac: clear pertinence number of channels
dmaengine: sh: shdmac: don't open code of_device_get_match_data()
dmaengine: tegra: don't open code of_device_get_match_data()
dmaengine: qcom_bam_dma: Make driver work for BE
dmaengine: sun4i: support module autoloading
dma/mic_x100_dma: IS_ERR() vs PTR_ERR() typo
dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
dmaengine: xilinx_vdma: Simplify spin lock handling
dmaengine: xilinx_vdma: Fix issues with non-parking mode
dmaengine: xilinx_vdma: Improve SG engine handling
dmaengine: pl330: fix to support the burst mode
dmaengine: make slave address physical
...
Diffstat (limited to 'drivers/dma/qcom')
-rw-r--r-- | drivers/dma/qcom/Kconfig | 29 | ||||
-rw-r--r-- | drivers/dma/qcom/Makefile | 3 | ||||
-rw-r--r-- | drivers/dma/qcom/bam_dma.c | 1262 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.c | 706 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma.h | 160 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_mgmt.c | 302 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_mgmt.h | 39 | ||||
-rw-r--r-- | drivers/dma/qcom/hidma_mgmt_sys.c | 295 |
8 files changed, 2796 insertions, 0 deletions
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig new file mode 100644 index 000000000000..a7761c4025f4 --- /dev/null +++ b/drivers/dma/qcom/Kconfig | |||
@@ -0,0 +1,29 @@ | |||
1 | config QCOM_BAM_DMA | ||
2 | tristate "QCOM BAM DMA support" | ||
3 | depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) | ||
4 | select DMA_ENGINE | ||
5 | select DMA_VIRTUAL_CHANNELS | ||
6 | ---help--- | ||
7 | Enable support for the QCOM BAM DMA controller. This controller | ||
8 | provides DMA capabilities for a variety of on-chip devices. | ||
9 | |||
10 | config QCOM_HIDMA_MGMT | ||
11 | tristate "Qualcomm Technologies HIDMA Management support" | ||
12 | select DMA_ENGINE | ||
13 | help | ||
14 | Enable support for the Qualcomm Technologies HIDMA Management. | ||
15 | Each DMA device requires one management interface driver | ||
16 | for basic initialization before QCOM_HIDMA channel driver can | ||
17 | start managing the channels. In a virtualized environment, | ||
18 | the guest OS would run QCOM_HIDMA channel driver and the | ||
19 | host would run the QCOM_HIDMA_MGMT management driver. | ||
20 | |||
21 | config QCOM_HIDMA | ||
22 | tristate "Qualcomm Technologies HIDMA Channel support" | ||
23 | select DMA_ENGINE | ||
24 | help | ||
25 | Enable support for the Qualcomm Technologies HIDMA controller. | ||
26 | The HIDMA controller supports optimized buffer copies | ||
27 | (user to kernel, kernel to kernel, etc.). It only supports | ||
28 | memcpy interface. The core is not intended for general | ||
29 | purpose slave DMA. | ||
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile new file mode 100644 index 000000000000..bfea6990229f --- /dev/null +++ b/drivers/dma/qcom/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o | ||
2 | obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o | ||
3 | hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o | ||
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c new file mode 100644 index 000000000000..d5e0a9c3ad5d --- /dev/null +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -0,0 +1,1262 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | /* | ||
15 | * QCOM BAM DMA engine driver | ||
16 | * | ||
17 | * QCOM BAM DMA blocks are distributed amongst a number of the on-chip | ||
18 | * peripherals on the MSM 8x74. The configuration of the channels are dependent | ||
19 | * on the way they are hard wired to that specific peripheral. The peripheral | ||
20 | * device tree entries specify the configuration of each channel. | ||
21 | * | ||
22 | * The DMA controller requires the use of external memory for storage of the | ||
23 | * hardware descriptors for each channel. The descriptor FIFO is accessed as a | ||
24 | * circular buffer and operations are managed according to the offset within the | ||
25 | * FIFO. After pipe/channel reset, all of the pipe registers and internal state | ||
26 | * are back to defaults. | ||
27 | * | ||
28 | * During DMA operations, we write descriptors to the FIFO, being careful to | ||
29 | * handle wrapping and then write the last FIFO offset to that channel's | ||
30 | * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register | ||
31 | * indicates the current FIFO offset that is being processed, so there is some | ||
32 | * indication of where the hardware is currently working. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/dma-mapping.h> | ||
42 | #include <linux/scatterlist.h> | ||
43 | #include <linux/device.h> | ||
44 | #include <linux/platform_device.h> | ||
45 | #include <linux/of.h> | ||
46 | #include <linux/of_address.h> | ||
47 | #include <linux/of_irq.h> | ||
48 | #include <linux/of_dma.h> | ||
49 | #include <linux/clk.h> | ||
50 | #include <linux/dmaengine.h> | ||
51 | |||
52 | #include "../dmaengine.h" | ||
53 | #include "../virt-dma.h" | ||
54 | |||
55 | struct bam_desc_hw { | ||
56 | __le32 addr; /* Buffer physical address */ | ||
57 | __le16 size; /* Buffer size in bytes */ | ||
58 | __le16 flags; | ||
59 | }; | ||
60 | |||
61 | #define DESC_FLAG_INT BIT(15) | ||
62 | #define DESC_FLAG_EOT BIT(14) | ||
63 | #define DESC_FLAG_EOB BIT(13) | ||
64 | #define DESC_FLAG_NWD BIT(12) | ||
65 | |||
66 | struct bam_async_desc { | ||
67 | struct virt_dma_desc vd; | ||
68 | |||
69 | u32 num_desc; | ||
70 | u32 xfer_len; | ||
71 | |||
72 | /* transaction flags, EOT|EOB|NWD */ | ||
73 | u16 flags; | ||
74 | |||
75 | struct bam_desc_hw *curr_desc; | ||
76 | |||
77 | enum dma_transfer_direction dir; | ||
78 | size_t length; | ||
79 | struct bam_desc_hw desc[0]; | ||
80 | }; | ||
81 | |||
82 | enum bam_reg { | ||
83 | BAM_CTRL, | ||
84 | BAM_REVISION, | ||
85 | BAM_NUM_PIPES, | ||
86 | BAM_DESC_CNT_TRSHLD, | ||
87 | BAM_IRQ_SRCS, | ||
88 | BAM_IRQ_SRCS_MSK, | ||
89 | BAM_IRQ_SRCS_UNMASKED, | ||
90 | BAM_IRQ_STTS, | ||
91 | BAM_IRQ_CLR, | ||
92 | BAM_IRQ_EN, | ||
93 | BAM_CNFG_BITS, | ||
94 | BAM_IRQ_SRCS_EE, | ||
95 | BAM_IRQ_SRCS_MSK_EE, | ||
96 | BAM_P_CTRL, | ||
97 | BAM_P_RST, | ||
98 | BAM_P_HALT, | ||
99 | BAM_P_IRQ_STTS, | ||
100 | BAM_P_IRQ_CLR, | ||
101 | BAM_P_IRQ_EN, | ||
102 | BAM_P_EVNT_DEST_ADDR, | ||
103 | BAM_P_EVNT_REG, | ||
104 | BAM_P_SW_OFSTS, | ||
105 | BAM_P_DATA_FIFO_ADDR, | ||
106 | BAM_P_DESC_FIFO_ADDR, | ||
107 | BAM_P_EVNT_GEN_TRSHLD, | ||
108 | BAM_P_FIFO_SIZES, | ||
109 | }; | ||
110 | |||
111 | struct reg_offset_data { | ||
112 | u32 base_offset; | ||
113 | unsigned int pipe_mult, evnt_mult, ee_mult; | ||
114 | }; | ||
115 | |||
116 | static const struct reg_offset_data bam_v1_3_reg_info[] = { | ||
117 | [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 }, | ||
118 | [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 }, | ||
119 | [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 }, | ||
120 | [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 }, | ||
121 | [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 }, | ||
122 | [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 }, | ||
123 | [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 }, | ||
124 | [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 }, | ||
125 | [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 }, | ||
126 | [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 }, | ||
127 | [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 }, | ||
128 | [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 }, | ||
129 | [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 }, | ||
130 | [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 }, | ||
131 | [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 }, | ||
132 | [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 }, | ||
133 | [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 }, | ||
134 | [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 }, | ||
135 | [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 }, | ||
136 | [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 }, | ||
137 | [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 }, | ||
138 | [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 }, | ||
139 | [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 }, | ||
140 | [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 }, | ||
141 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 }, | ||
142 | [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 }, | ||
143 | }; | ||
144 | |||
145 | static const struct reg_offset_data bam_v1_4_reg_info[] = { | ||
146 | [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 }, | ||
147 | [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 }, | ||
148 | [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 }, | ||
149 | [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 }, | ||
150 | [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 }, | ||
151 | [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 }, | ||
152 | [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 }, | ||
153 | [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 }, | ||
154 | [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 }, | ||
155 | [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 }, | ||
156 | [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 }, | ||
157 | [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 }, | ||
158 | [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 }, | ||
159 | [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 }, | ||
160 | [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 }, | ||
161 | [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 }, | ||
162 | [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, | ||
163 | [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, | ||
164 | [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, | ||
165 | [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 }, | ||
166 | [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 }, | ||
167 | [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 }, | ||
168 | [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, | ||
169 | [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, | ||
170 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, | ||
171 | [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 }, | ||
172 | }; | ||
173 | |||
174 | static const struct reg_offset_data bam_v1_7_reg_info[] = { | ||
175 | [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 }, | ||
176 | [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 }, | ||
177 | [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 }, | ||
178 | [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 }, | ||
179 | [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 }, | ||
180 | [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 }, | ||
181 | [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 }, | ||
182 | [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 }, | ||
183 | [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 }, | ||
184 | [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 }, | ||
185 | [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 }, | ||
186 | [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 }, | ||
187 | [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 }, | ||
188 | [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 }, | ||
189 | [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 }, | ||
190 | [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 }, | ||
191 | [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 }, | ||
192 | [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 }, | ||
193 | [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 }, | ||
194 | [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 }, | ||
195 | [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 }, | ||
196 | [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 }, | ||
197 | [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 }, | ||
198 | [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 }, | ||
199 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 }, | ||
200 | [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 }, | ||
201 | }; | ||
202 | |||
203 | /* BAM CTRL */ | ||
204 | #define BAM_SW_RST BIT(0) | ||
205 | #define BAM_EN BIT(1) | ||
206 | #define BAM_EN_ACCUM BIT(4) | ||
207 | #define BAM_TESTBUS_SEL_SHIFT 5 | ||
208 | #define BAM_TESTBUS_SEL_MASK 0x3F | ||
209 | #define BAM_DESC_CACHE_SEL_SHIFT 13 | ||
210 | #define BAM_DESC_CACHE_SEL_MASK 0x3 | ||
211 | #define BAM_CACHED_DESC_STORE BIT(15) | ||
212 | #define IBC_DISABLE BIT(16) | ||
213 | |||
214 | /* BAM REVISION */ | ||
215 | #define REVISION_SHIFT 0 | ||
216 | #define REVISION_MASK 0xFF | ||
217 | #define NUM_EES_SHIFT 8 | ||
218 | #define NUM_EES_MASK 0xF | ||
219 | #define CE_BUFFER_SIZE BIT(13) | ||
220 | #define AXI_ACTIVE BIT(14) | ||
221 | #define USE_VMIDMT BIT(15) | ||
222 | #define SECURED BIT(16) | ||
223 | #define BAM_HAS_NO_BYPASS BIT(17) | ||
224 | #define HIGH_FREQUENCY_BAM BIT(18) | ||
225 | #define INACTIV_TMRS_EXST BIT(19) | ||
226 | #define NUM_INACTIV_TMRS BIT(20) | ||
227 | #define DESC_CACHE_DEPTH_SHIFT 21 | ||
228 | #define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT) | ||
229 | #define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT) | ||
230 | #define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT) | ||
231 | #define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT) | ||
232 | #define CMD_DESC_EN BIT(23) | ||
233 | #define INACTIV_TMR_BASE_SHIFT 24 | ||
234 | #define INACTIV_TMR_BASE_MASK 0xFF | ||
235 | |||
236 | /* BAM NUM PIPES */ | ||
237 | #define BAM_NUM_PIPES_SHIFT 0 | ||
238 | #define BAM_NUM_PIPES_MASK 0xFF | ||
239 | #define PERIPH_NON_PIPE_GRP_SHIFT 16 | ||
240 | #define PERIPH_NON_PIP_GRP_MASK 0xFF | ||
241 | #define BAM_NON_PIPE_GRP_SHIFT 24 | ||
242 | #define BAM_NON_PIPE_GRP_MASK 0xFF | ||
243 | |||
244 | /* BAM CNFG BITS */ | ||
245 | #define BAM_PIPE_CNFG BIT(2) | ||
246 | #define BAM_FULL_PIPE BIT(11) | ||
247 | #define BAM_NO_EXT_P_RST BIT(12) | ||
248 | #define BAM_IBC_DISABLE BIT(13) | ||
249 | #define BAM_SB_CLK_REQ BIT(14) | ||
250 | #define BAM_PSM_CSW_REQ BIT(15) | ||
251 | #define BAM_PSM_P_RES BIT(16) | ||
252 | #define BAM_AU_P_RES BIT(17) | ||
253 | #define BAM_SI_P_RES BIT(18) | ||
254 | #define BAM_WB_P_RES BIT(19) | ||
255 | #define BAM_WB_BLK_CSW BIT(20) | ||
256 | #define BAM_WB_CSW_ACK_IDL BIT(21) | ||
257 | #define BAM_WB_RETR_SVPNT BIT(22) | ||
258 | #define BAM_WB_DSC_AVL_P_RST BIT(23) | ||
259 | #define BAM_REG_P_EN BIT(24) | ||
260 | #define BAM_PSM_P_HD_DATA BIT(25) | ||
261 | #define BAM_AU_ACCUMED BIT(26) | ||
262 | #define BAM_CMD_ENABLE BIT(27) | ||
263 | |||
264 | #define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \ | ||
265 | BAM_NO_EXT_P_RST | \ | ||
266 | BAM_IBC_DISABLE | \ | ||
267 | BAM_SB_CLK_REQ | \ | ||
268 | BAM_PSM_CSW_REQ | \ | ||
269 | BAM_PSM_P_RES | \ | ||
270 | BAM_AU_P_RES | \ | ||
271 | BAM_SI_P_RES | \ | ||
272 | BAM_WB_P_RES | \ | ||
273 | BAM_WB_BLK_CSW | \ | ||
274 | BAM_WB_CSW_ACK_IDL | \ | ||
275 | BAM_WB_RETR_SVPNT | \ | ||
276 | BAM_WB_DSC_AVL_P_RST | \ | ||
277 | BAM_REG_P_EN | \ | ||
278 | BAM_PSM_P_HD_DATA | \ | ||
279 | BAM_AU_ACCUMED | \ | ||
280 | BAM_CMD_ENABLE) | ||
281 | |||
282 | /* PIPE CTRL */ | ||
283 | #define P_EN BIT(1) | ||
284 | #define P_DIRECTION BIT(3) | ||
285 | #define P_SYS_STRM BIT(4) | ||
286 | #define P_SYS_MODE BIT(5) | ||
287 | #define P_AUTO_EOB BIT(6) | ||
288 | #define P_AUTO_EOB_SEL_SHIFT 7 | ||
289 | #define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT) | ||
290 | #define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT) | ||
291 | #define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT) | ||
292 | #define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT) | ||
293 | #define P_PREFETCH_LIMIT_SHIFT 9 | ||
294 | #define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT) | ||
295 | #define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT) | ||
296 | #define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT) | ||
297 | #define P_WRITE_NWD BIT(11) | ||
298 | #define P_LOCK_GROUP_SHIFT 16 | ||
299 | #define P_LOCK_GROUP_MASK 0x1F | ||
300 | |||
301 | /* BAM_DESC_CNT_TRSHLD */ | ||
302 | #define CNT_TRSHLD 0xffff | ||
303 | #define DEFAULT_CNT_THRSHLD 0x4 | ||
304 | |||
305 | /* BAM_IRQ_SRCS */ | ||
306 | #define BAM_IRQ BIT(31) | ||
307 | #define P_IRQ 0x7fffffff | ||
308 | |||
309 | /* BAM_IRQ_SRCS_MSK */ | ||
310 | #define BAM_IRQ_MSK BAM_IRQ | ||
311 | #define P_IRQ_MSK P_IRQ | ||
312 | |||
313 | /* BAM_IRQ_STTS */ | ||
314 | #define BAM_TIMER_IRQ BIT(4) | ||
315 | #define BAM_EMPTY_IRQ BIT(3) | ||
316 | #define BAM_ERROR_IRQ BIT(2) | ||
317 | #define BAM_HRESP_ERR_IRQ BIT(1) | ||
318 | |||
319 | /* BAM_IRQ_CLR */ | ||
320 | #define BAM_TIMER_CLR BIT(4) | ||
321 | #define BAM_EMPTY_CLR BIT(3) | ||
322 | #define BAM_ERROR_CLR BIT(2) | ||
323 | #define BAM_HRESP_ERR_CLR BIT(1) | ||
324 | |||
325 | /* BAM_IRQ_EN */ | ||
326 | #define BAM_TIMER_EN BIT(4) | ||
327 | #define BAM_EMPTY_EN BIT(3) | ||
328 | #define BAM_ERROR_EN BIT(2) | ||
329 | #define BAM_HRESP_ERR_EN BIT(1) | ||
330 | |||
331 | /* BAM_P_IRQ_EN */ | ||
332 | #define P_PRCSD_DESC_EN BIT(0) | ||
333 | #define P_TIMER_EN BIT(1) | ||
334 | #define P_WAKE_EN BIT(2) | ||
335 | #define P_OUT_OF_DESC_EN BIT(3) | ||
336 | #define P_ERR_EN BIT(4) | ||
337 | #define P_TRNSFR_END_EN BIT(5) | ||
338 | #define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN) | ||
339 | |||
340 | /* BAM_P_SW_OFSTS */ | ||
341 | #define P_SW_OFSTS_MASK 0xffff | ||
342 | |||
343 | #define BAM_DESC_FIFO_SIZE SZ_32K | ||
344 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) | ||
345 | #define BAM_MAX_DATA_SIZE (SZ_32K - 8) | ||
346 | |||
347 | struct bam_chan { | ||
348 | struct virt_dma_chan vc; | ||
349 | |||
350 | struct bam_device *bdev; | ||
351 | |||
352 | /* configuration from device tree */ | ||
353 | u32 id; | ||
354 | |||
355 | struct bam_async_desc *curr_txd; /* current running dma */ | ||
356 | |||
357 | /* runtime configuration */ | ||
358 | struct dma_slave_config slave; | ||
359 | |||
360 | /* fifo storage */ | ||
361 | struct bam_desc_hw *fifo_virt; | ||
362 | dma_addr_t fifo_phys; | ||
363 | |||
364 | /* fifo markers */ | ||
365 | unsigned short head; /* start of active descriptor entries */ | ||
366 | unsigned short tail; /* end of active descriptor entries */ | ||
367 | |||
368 | unsigned int initialized; /* is the channel hw initialized? */ | ||
369 | unsigned int paused; /* is the channel paused? */ | ||
370 | unsigned int reconfigure; /* new slave config? */ | ||
371 | |||
372 | struct list_head node; | ||
373 | }; | ||
374 | |||
375 | static inline struct bam_chan *to_bam_chan(struct dma_chan *common) | ||
376 | { | ||
377 | return container_of(common, struct bam_chan, vc.chan); | ||
378 | } | ||
379 | |||
380 | struct bam_device { | ||
381 | void __iomem *regs; | ||
382 | struct device *dev; | ||
383 | struct dma_device common; | ||
384 | struct device_dma_parameters dma_parms; | ||
385 | struct bam_chan *channels; | ||
386 | u32 num_channels; | ||
387 | |||
388 | /* execution environment ID, from DT */ | ||
389 | u32 ee; | ||
390 | |||
391 | const struct reg_offset_data *layout; | ||
392 | |||
393 | struct clk *bamclk; | ||
394 | int irq; | ||
395 | |||
396 | /* dma start transaction tasklet */ | ||
397 | struct tasklet_struct task; | ||
398 | }; | ||
399 | |||
400 | /** | ||
401 | * bam_addr - returns BAM register address | ||
402 | * @bdev: bam device | ||
403 | * @pipe: pipe instance (ignored when register doesn't have multiple instances) | ||
404 | * @reg: register enum | ||
405 | */ | ||
406 | static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, | ||
407 | enum bam_reg reg) | ||
408 | { | ||
409 | const struct reg_offset_data r = bdev->layout[reg]; | ||
410 | |||
411 | return bdev->regs + r.base_offset + | ||
412 | r.pipe_mult * pipe + | ||
413 | r.evnt_mult * pipe + | ||
414 | r.ee_mult * bdev->ee; | ||
415 | } | ||
416 | |||
417 | /** | ||
418 | * bam_reset_channel - Reset individual BAM DMA channel | ||
419 | * @bchan: bam channel | ||
420 | * | ||
421 | * This function resets a specific BAM channel | ||
422 | */ | ||
423 | static void bam_reset_channel(struct bam_chan *bchan) | ||
424 | { | ||
425 | struct bam_device *bdev = bchan->bdev; | ||
426 | |||
427 | lockdep_assert_held(&bchan->vc.lock); | ||
428 | |||
429 | /* reset channel */ | ||
430 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); | ||
431 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); | ||
432 | |||
433 | /* don't allow cpu to reorder BAM register accesses done after this */ | ||
434 | wmb(); | ||
435 | |||
436 | /* make sure hw is initialized when channel is used the first time */ | ||
437 | bchan->initialized = 0; | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * bam_chan_init_hw - Initialize channel hardware | ||
442 | * @bchan: bam channel | ||
443 | * | ||
444 | * This function resets and initializes the BAM channel | ||
445 | */ | ||
446 | static void bam_chan_init_hw(struct bam_chan *bchan, | ||
447 | enum dma_transfer_direction dir) | ||
448 | { | ||
449 | struct bam_device *bdev = bchan->bdev; | ||
450 | u32 val; | ||
451 | |||
452 | /* Reset the channel to clear internal state of the FIFO */ | ||
453 | bam_reset_channel(bchan); | ||
454 | |||
455 | /* | ||
456 | * write out 8 byte aligned address. We have enough space for this | ||
457 | * because we allocated 1 more descriptor (8 bytes) than we can use | ||
458 | */ | ||
459 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), | ||
460 | bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); | ||
461 | writel_relaxed(BAM_DESC_FIFO_SIZE, | ||
462 | bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); | ||
463 | |||
464 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ | ||
465 | writel_relaxed(P_DEFAULT_IRQS_EN, | ||
466 | bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); | ||
467 | |||
468 | /* unmask the specific pipe and EE combo */ | ||
469 | val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); | ||
470 | val |= BIT(bchan->id); | ||
471 | writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); | ||
472 | |||
473 | /* don't allow cpu to reorder the channel enable done below */ | ||
474 | wmb(); | ||
475 | |||
476 | /* set fixed direction and mode, then enable channel */ | ||
477 | val = P_EN | P_SYS_MODE; | ||
478 | if (dir == DMA_DEV_TO_MEM) | ||
479 | val |= P_DIRECTION; | ||
480 | |||
481 | writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); | ||
482 | |||
483 | bchan->initialized = 1; | ||
484 | |||
485 | /* init FIFO pointers */ | ||
486 | bchan->head = 0; | ||
487 | bchan->tail = 0; | ||
488 | } | ||
489 | |||
490 | /** | ||
491 | * bam_alloc_chan - Allocate channel resources for DMA channel. | ||
492 | * @chan: specified channel | ||
493 | * | ||
494 | * This function allocates the FIFO descriptor memory | ||
495 | */ | ||
496 | static int bam_alloc_chan(struct dma_chan *chan) | ||
497 | { | ||
498 | struct bam_chan *bchan = to_bam_chan(chan); | ||
499 | struct bam_device *bdev = bchan->bdev; | ||
500 | |||
501 | if (bchan->fifo_virt) | ||
502 | return 0; | ||
503 | |||
504 | /* allocate FIFO descriptor space, but only if necessary */ | ||
505 | bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE, | ||
506 | &bchan->fifo_phys, GFP_KERNEL); | ||
507 | |||
508 | if (!bchan->fifo_virt) { | ||
509 | dev_err(bdev->dev, "Failed to allocate desc fifo\n"); | ||
510 | return -ENOMEM; | ||
511 | } | ||
512 | |||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * bam_free_chan - Frees dma resources associated with specific channel | ||
518 | * @chan: specified channel | ||
519 | * | ||
520 | * Free the allocated fifo descriptor memory and channel resources | ||
521 | * | ||
522 | */ | ||
523 | static void bam_free_chan(struct dma_chan *chan) | ||
524 | { | ||
525 | struct bam_chan *bchan = to_bam_chan(chan); | ||
526 | struct bam_device *bdev = bchan->bdev; | ||
527 | u32 val; | ||
528 | unsigned long flags; | ||
529 | |||
530 | vchan_free_chan_resources(to_virt_chan(chan)); | ||
531 | |||
532 | if (bchan->curr_txd) { | ||
533 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); | ||
534 | return; | ||
535 | } | ||
536 | |||
537 | spin_lock_irqsave(&bchan->vc.lock, flags); | ||
538 | bam_reset_channel(bchan); | ||
539 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | ||
540 | |||
541 | dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, | ||
542 | bchan->fifo_phys); | ||
543 | bchan->fifo_virt = NULL; | ||
544 | |||
545 | /* mask irq for pipe/channel */ | ||
546 | val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); | ||
547 | val &= ~BIT(bchan->id); | ||
548 | writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); | ||
549 | |||
550 | /* disable irq */ | ||
551 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); | ||
552 | } | ||
553 | |||
554 | /** | ||
555 | * bam_slave_config - set slave configuration for channel | ||
556 | * @chan: dma channel | ||
557 | * @cfg: slave configuration | ||
558 | * | ||
559 | * Sets slave configuration for channel | ||
560 | * | ||
561 | */ | ||
562 | static int bam_slave_config(struct dma_chan *chan, | ||
563 | struct dma_slave_config *cfg) | ||
564 | { | ||
565 | struct bam_chan *bchan = to_bam_chan(chan); | ||
566 | unsigned long flag; | ||
567 | |||
568 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
569 | memcpy(&bchan->slave, cfg, sizeof(*cfg)); | ||
570 | bchan->reconfigure = 1; | ||
571 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | /** | ||
577 | * bam_prep_slave_sg - Prep slave sg transaction | ||
578 | * | ||
579 | * @chan: dma channel | ||
580 | * @sgl: scatter gather list | ||
581 | * @sg_len: length of sg | ||
582 | * @direction: DMA transfer direction | ||
583 | * @flags: DMA flags | ||
584 | * @context: transfer context (unused) | ||
585 | */ | ||
586 | static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | ||
587 | struct scatterlist *sgl, unsigned int sg_len, | ||
588 | enum dma_transfer_direction direction, unsigned long flags, | ||
589 | void *context) | ||
590 | { | ||
591 | struct bam_chan *bchan = to_bam_chan(chan); | ||
592 | struct bam_device *bdev = bchan->bdev; | ||
593 | struct bam_async_desc *async_desc; | ||
594 | struct scatterlist *sg; | ||
595 | u32 i; | ||
596 | struct bam_desc_hw *desc; | ||
597 | unsigned int num_alloc = 0; | ||
598 | |||
599 | |||
600 | if (!is_slave_direction(direction)) { | ||
601 | dev_err(bdev->dev, "invalid dma direction\n"); | ||
602 | return NULL; | ||
603 | } | ||
604 | |||
605 | /* calculate number of required entries */ | ||
606 | for_each_sg(sgl, sg, sg_len, i) | ||
607 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); | ||
608 | |||
609 | /* allocate enough room to accomodate the number of entries */ | ||
610 | async_desc = kzalloc(sizeof(*async_desc) + | ||
611 | (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); | ||
612 | |||
613 | if (!async_desc) | ||
614 | goto err_out; | ||
615 | |||
616 | if (flags & DMA_PREP_FENCE) | ||
617 | async_desc->flags |= DESC_FLAG_NWD; | ||
618 | |||
619 | if (flags & DMA_PREP_INTERRUPT) | ||
620 | async_desc->flags |= DESC_FLAG_EOT; | ||
621 | else | ||
622 | async_desc->flags |= DESC_FLAG_INT; | ||
623 | |||
624 | async_desc->num_desc = num_alloc; | ||
625 | async_desc->curr_desc = async_desc->desc; | ||
626 | async_desc->dir = direction; | ||
627 | |||
628 | /* fill in temporary descriptors */ | ||
629 | desc = async_desc->desc; | ||
630 | for_each_sg(sgl, sg, sg_len, i) { | ||
631 | unsigned int remainder = sg_dma_len(sg); | ||
632 | unsigned int curr_offset = 0; | ||
633 | |||
634 | do { | ||
635 | desc->addr = cpu_to_le32(sg_dma_address(sg) + | ||
636 | curr_offset); | ||
637 | |||
638 | if (remainder > BAM_MAX_DATA_SIZE) { | ||
639 | desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); | ||
640 | remainder -= BAM_MAX_DATA_SIZE; | ||
641 | curr_offset += BAM_MAX_DATA_SIZE; | ||
642 | } else { | ||
643 | desc->size = cpu_to_le16(remainder); | ||
644 | remainder = 0; | ||
645 | } | ||
646 | |||
647 | async_desc->length += desc->size; | ||
648 | desc++; | ||
649 | } while (remainder > 0); | ||
650 | } | ||
651 | |||
652 | return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); | ||
653 | |||
654 | err_out: | ||
655 | kfree(async_desc); | ||
656 | return NULL; | ||
657 | } | ||
658 | |||
659 | /** | ||
660 | * bam_dma_terminate_all - terminate all transactions on a channel | ||
661 | * @bchan: bam dma channel | ||
662 | * | ||
663 | * Dequeues and frees all transactions | ||
664 | * No callbacks are done | ||
665 | * | ||
666 | */ | ||
667 | static int bam_dma_terminate_all(struct dma_chan *chan) | ||
668 | { | ||
669 | struct bam_chan *bchan = to_bam_chan(chan); | ||
670 | unsigned long flag; | ||
671 | LIST_HEAD(head); | ||
672 | |||
673 | /* remove all transactions, including active transaction */ | ||
674 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
675 | if (bchan->curr_txd) { | ||
676 | list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); | ||
677 | bchan->curr_txd = NULL; | ||
678 | } | ||
679 | |||
680 | vchan_get_all_descriptors(&bchan->vc, &head); | ||
681 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
682 | |||
683 | vchan_dma_desc_free_list(&bchan->vc, &head); | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | /** | ||
689 | * bam_pause - Pause DMA channel | ||
690 | * @chan: dma channel | ||
691 | * | ||
692 | */ | ||
693 | static int bam_pause(struct dma_chan *chan) | ||
694 | { | ||
695 | struct bam_chan *bchan = to_bam_chan(chan); | ||
696 | struct bam_device *bdev = bchan->bdev; | ||
697 | unsigned long flag; | ||
698 | |||
699 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
700 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); | ||
701 | bchan->paused = 1; | ||
702 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | /** | ||
708 | * bam_resume - Resume DMA channel operations | ||
709 | * @chan: dma channel | ||
710 | * | ||
711 | */ | ||
712 | static int bam_resume(struct dma_chan *chan) | ||
713 | { | ||
714 | struct bam_chan *bchan = to_bam_chan(chan); | ||
715 | struct bam_device *bdev = bchan->bdev; | ||
716 | unsigned long flag; | ||
717 | |||
718 | spin_lock_irqsave(&bchan->vc.lock, flag); | ||
719 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); | ||
720 | bchan->paused = 0; | ||
721 | spin_unlock_irqrestore(&bchan->vc.lock, flag); | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * process_channel_irqs - processes the channel interrupts | ||
728 | * @bdev: bam controller | ||
729 | * | ||
730 | * This function processes the channel interrupts | ||
731 | * | ||
732 | */ | ||
733 | static u32 process_channel_irqs(struct bam_device *bdev) | ||
734 | { | ||
735 | u32 i, srcs, pipe_stts; | ||
736 | unsigned long flags; | ||
737 | struct bam_async_desc *async_desc; | ||
738 | |||
739 | srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); | ||
740 | |||
741 | /* return early if no pipe/channel interrupts are present */ | ||
742 | if (!(srcs & P_IRQ)) | ||
743 | return srcs; | ||
744 | |||
745 | for (i = 0; i < bdev->num_channels; i++) { | ||
746 | struct bam_chan *bchan = &bdev->channels[i]; | ||
747 | |||
748 | if (!(srcs & BIT(i))) | ||
749 | continue; | ||
750 | |||
751 | /* clear pipe irq */ | ||
752 | pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS)); | ||
753 | |||
754 | writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); | ||
755 | |||
756 | spin_lock_irqsave(&bchan->vc.lock, flags); | ||
757 | async_desc = bchan->curr_txd; | ||
758 | |||
759 | if (async_desc) { | ||
760 | async_desc->num_desc -= async_desc->xfer_len; | ||
761 | async_desc->curr_desc += async_desc->xfer_len; | ||
762 | bchan->curr_txd = NULL; | ||
763 | |||
764 | /* manage FIFO */ | ||
765 | bchan->head += async_desc->xfer_len; | ||
766 | bchan->head %= MAX_DESCRIPTORS; | ||
767 | |||
768 | /* | ||
769 | * if complete, process cookie. Otherwise | ||
770 | * push back to front of desc_issued so that | ||
771 | * it gets restarted by the tasklet | ||
772 | */ | ||
773 | if (!async_desc->num_desc) | ||
774 | vchan_cookie_complete(&async_desc->vd); | ||
775 | else | ||
776 | list_add(&async_desc->vd.node, | ||
777 | &bchan->vc.desc_issued); | ||
778 | } | ||
779 | |||
780 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | ||
781 | } | ||
782 | |||
783 | return srcs; | ||
784 | } | ||
785 | |||
786 | /** | ||
787 | * bam_dma_irq - irq handler for bam controller | ||
788 | * @irq: IRQ of interrupt | ||
789 | * @data: callback data | ||
790 | * | ||
791 | * IRQ handler for the bam controller | ||
792 | */ | ||
793 | static irqreturn_t bam_dma_irq(int irq, void *data) | ||
794 | { | ||
795 | struct bam_device *bdev = data; | ||
796 | u32 clr_mask = 0, srcs = 0; | ||
797 | |||
798 | srcs |= process_channel_irqs(bdev); | ||
799 | |||
800 | /* kick off tasklet to start next dma transfer */ | ||
801 | if (srcs & P_IRQ) | ||
802 | tasklet_schedule(&bdev->task); | ||
803 | |||
804 | if (srcs & BAM_IRQ) | ||
805 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); | ||
806 | |||
807 | /* don't allow reorder of the various accesses to the BAM registers */ | ||
808 | mb(); | ||
809 | |||
810 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); | ||
811 | |||
812 | return IRQ_HANDLED; | ||
813 | } | ||
814 | |||
815 | /** | ||
816 | * bam_tx_status - returns status of transaction | ||
817 | * @chan: dma channel | ||
818 | * @cookie: transaction cookie | ||
819 | * @txstate: DMA transaction state | ||
820 | * | ||
821 | * Return status of dma transaction | ||
822 | */ | ||
823 | static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | ||
824 | struct dma_tx_state *txstate) | ||
825 | { | ||
826 | struct bam_chan *bchan = to_bam_chan(chan); | ||
827 | struct virt_dma_desc *vd; | ||
828 | int ret; | ||
829 | size_t residue = 0; | ||
830 | unsigned int i; | ||
831 | unsigned long flags; | ||
832 | |||
833 | ret = dma_cookie_status(chan, cookie, txstate); | ||
834 | if (ret == DMA_COMPLETE) | ||
835 | return ret; | ||
836 | |||
837 | if (!txstate) | ||
838 | return bchan->paused ? DMA_PAUSED : ret; | ||
839 | |||
840 | spin_lock_irqsave(&bchan->vc.lock, flags); | ||
841 | vd = vchan_find_desc(&bchan->vc, cookie); | ||
842 | if (vd) | ||
843 | residue = container_of(vd, struct bam_async_desc, vd)->length; | ||
844 | else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) | ||
845 | for (i = 0; i < bchan->curr_txd->num_desc; i++) | ||
846 | residue += bchan->curr_txd->curr_desc[i].size; | ||
847 | |||
848 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | ||
849 | |||
850 | dma_set_residue(txstate, residue); | ||
851 | |||
852 | if (ret == DMA_IN_PROGRESS && bchan->paused) | ||
853 | ret = DMA_PAUSED; | ||
854 | |||
855 | return ret; | ||
856 | } | ||
857 | |||
858 | /** | ||
859 | * bam_apply_new_config | ||
860 | * @bchan: bam dma channel | ||
861 | * @dir: DMA direction | ||
862 | */ | ||
863 | static void bam_apply_new_config(struct bam_chan *bchan, | ||
864 | enum dma_transfer_direction dir) | ||
865 | { | ||
866 | struct bam_device *bdev = bchan->bdev; | ||
867 | u32 maxburst; | ||
868 | |||
869 | if (dir == DMA_DEV_TO_MEM) | ||
870 | maxburst = bchan->slave.src_maxburst; | ||
871 | else | ||
872 | maxburst = bchan->slave.dst_maxburst; | ||
873 | |||
874 | writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); | ||
875 | |||
876 | bchan->reconfigure = 0; | ||
877 | } | ||
878 | |||
879 | /** | ||
880 | * bam_start_dma - start next transaction | ||
881 | * @bchan - bam dma channel | ||
882 | */ | ||
883 | static void bam_start_dma(struct bam_chan *bchan) | ||
884 | { | ||
885 | struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); | ||
886 | struct bam_device *bdev = bchan->bdev; | ||
887 | struct bam_async_desc *async_desc; | ||
888 | struct bam_desc_hw *desc; | ||
889 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, | ||
890 | sizeof(struct bam_desc_hw)); | ||
891 | |||
892 | lockdep_assert_held(&bchan->vc.lock); | ||
893 | |||
894 | if (!vd) | ||
895 | return; | ||
896 | |||
897 | list_del(&vd->node); | ||
898 | |||
899 | async_desc = container_of(vd, struct bam_async_desc, vd); | ||
900 | bchan->curr_txd = async_desc; | ||
901 | |||
902 | /* on first use, initialize the channel hardware */ | ||
903 | if (!bchan->initialized) | ||
904 | bam_chan_init_hw(bchan, async_desc->dir); | ||
905 | |||
906 | /* apply new slave config changes, if necessary */ | ||
907 | if (bchan->reconfigure) | ||
908 | bam_apply_new_config(bchan, async_desc->dir); | ||
909 | |||
910 | desc = bchan->curr_txd->curr_desc; | ||
911 | |||
912 | if (async_desc->num_desc > MAX_DESCRIPTORS) | ||
913 | async_desc->xfer_len = MAX_DESCRIPTORS; | ||
914 | else | ||
915 | async_desc->xfer_len = async_desc->num_desc; | ||
916 | |||
917 | /* set any special flags on the last descriptor */ | ||
918 | if (async_desc->num_desc == async_desc->xfer_len) | ||
919 | desc[async_desc->xfer_len - 1].flags = | ||
920 | cpu_to_le16(async_desc->flags); | ||
921 | else | ||
922 | desc[async_desc->xfer_len - 1].flags |= | ||
923 | cpu_to_le16(DESC_FLAG_INT); | ||
924 | |||
925 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { | ||
926 | u32 partial = MAX_DESCRIPTORS - bchan->tail; | ||
927 | |||
928 | memcpy(&fifo[bchan->tail], desc, | ||
929 | partial * sizeof(struct bam_desc_hw)); | ||
930 | memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * | ||
931 | sizeof(struct bam_desc_hw)); | ||
932 | } else { | ||
933 | memcpy(&fifo[bchan->tail], desc, | ||
934 | async_desc->xfer_len * sizeof(struct bam_desc_hw)); | ||
935 | } | ||
936 | |||
937 | bchan->tail += async_desc->xfer_len; | ||
938 | bchan->tail %= MAX_DESCRIPTORS; | ||
939 | |||
940 | /* ensure descriptor writes and dma start not reordered */ | ||
941 | wmb(); | ||
942 | writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), | ||
943 | bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); | ||
944 | } | ||
945 | |||
946 | /** | ||
947 | * dma_tasklet - DMA IRQ tasklet | ||
948 | * @data: tasklet argument (bam controller structure) | ||
949 | * | ||
950 | * Sets up next DMA operation and then processes all completed transactions | ||
951 | */ | ||
952 | static void dma_tasklet(unsigned long data) | ||
953 | { | ||
954 | struct bam_device *bdev = (struct bam_device *)data; | ||
955 | struct bam_chan *bchan; | ||
956 | unsigned long flags; | ||
957 | unsigned int i; | ||
958 | |||
959 | /* go through the channels and kick off transactions */ | ||
960 | for (i = 0; i < bdev->num_channels; i++) { | ||
961 | bchan = &bdev->channels[i]; | ||
962 | spin_lock_irqsave(&bchan->vc.lock, flags); | ||
963 | |||
964 | if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) | ||
965 | bam_start_dma(bchan); | ||
966 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | ||
967 | } | ||
968 | } | ||
969 | |||
970 | /** | ||
971 | * bam_issue_pending - starts pending transactions | ||
972 | * @chan: dma channel | ||
973 | * | ||
974 | * Calls tasklet directly which in turn starts any pending transactions | ||
975 | */ | ||
976 | static void bam_issue_pending(struct dma_chan *chan) | ||
977 | { | ||
978 | struct bam_chan *bchan = to_bam_chan(chan); | ||
979 | unsigned long flags; | ||
980 | |||
981 | spin_lock_irqsave(&bchan->vc.lock, flags); | ||
982 | |||
983 | /* if work pending and idle, start a transaction */ | ||
984 | if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) | ||
985 | bam_start_dma(bchan); | ||
986 | |||
987 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | ||
988 | } | ||
989 | |||
990 | /** | ||
991 | * bam_dma_free_desc - free descriptor memory | ||
992 | * @vd: virtual descriptor | ||
993 | * | ||
994 | */ | ||
995 | static void bam_dma_free_desc(struct virt_dma_desc *vd) | ||
996 | { | ||
997 | struct bam_async_desc *async_desc = container_of(vd, | ||
998 | struct bam_async_desc, vd); | ||
999 | |||
1000 | kfree(async_desc); | ||
1001 | } | ||
1002 | |||
1003 | static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec, | ||
1004 | struct of_dma *of) | ||
1005 | { | ||
1006 | struct bam_device *bdev = container_of(of->of_dma_data, | ||
1007 | struct bam_device, common); | ||
1008 | unsigned int request; | ||
1009 | |||
1010 | if (dma_spec->args_count != 1) | ||
1011 | return NULL; | ||
1012 | |||
1013 | request = dma_spec->args[0]; | ||
1014 | if (request >= bdev->num_channels) | ||
1015 | return NULL; | ||
1016 | |||
1017 | return dma_get_slave_channel(&(bdev->channels[request].vc.chan)); | ||
1018 | } | ||
1019 | |||
1020 | /** | ||
1021 | * bam_init | ||
1022 | * @bdev: bam device | ||
1023 | * | ||
1024 | * Initialization helper for global bam registers | ||
1025 | */ | ||
1026 | static int bam_init(struct bam_device *bdev) | ||
1027 | { | ||
1028 | u32 val; | ||
1029 | |||
1030 | /* read revision and configuration information */ | ||
1031 | val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; | ||
1032 | val &= NUM_EES_MASK; | ||
1033 | |||
1034 | /* check that configured EE is within range */ | ||
1035 | if (bdev->ee >= val) | ||
1036 | return -EINVAL; | ||
1037 | |||
1038 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); | ||
1039 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; | ||
1040 | |||
1041 | /* s/w reset bam */ | ||
1042 | /* after reset all pipes are disabled and idle */ | ||
1043 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); | ||
1044 | val |= BAM_SW_RST; | ||
1045 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); | ||
1046 | val &= ~BAM_SW_RST; | ||
1047 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); | ||
1048 | |||
1049 | /* make sure previous stores are visible before enabling BAM */ | ||
1050 | wmb(); | ||
1051 | |||
1052 | /* enable bam */ | ||
1053 | val |= BAM_EN; | ||
1054 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); | ||
1055 | |||
1056 | /* set descriptor threshhold, start with 4 bytes */ | ||
1057 | writel_relaxed(DEFAULT_CNT_THRSHLD, | ||
1058 | bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); | ||
1059 | |||
1060 | /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ | ||
1061 | writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); | ||
1062 | |||
1063 | /* enable irqs for errors */ | ||
1064 | writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, | ||
1065 | bam_addr(bdev, 0, BAM_IRQ_EN)); | ||
1066 | |||
1067 | /* unmask global bam interrupt */ | ||
1068 | writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); | ||
1069 | |||
1070 | return 0; | ||
1071 | } | ||
1072 | |||
1073 | static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, | ||
1074 | u32 index) | ||
1075 | { | ||
1076 | bchan->id = index; | ||
1077 | bchan->bdev = bdev; | ||
1078 | |||
1079 | vchan_init(&bchan->vc, &bdev->common); | ||
1080 | bchan->vc.desc_free = bam_dma_free_desc; | ||
1081 | } | ||
1082 | |||
1083 | static const struct of_device_id bam_of_match[] = { | ||
1084 | { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info }, | ||
1085 | { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info }, | ||
1086 | { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info }, | ||
1087 | {} | ||
1088 | }; | ||
1089 | |||
1090 | MODULE_DEVICE_TABLE(of, bam_of_match); | ||
1091 | |||
1092 | static int bam_dma_probe(struct platform_device *pdev) | ||
1093 | { | ||
1094 | struct bam_device *bdev; | ||
1095 | const struct of_device_id *match; | ||
1096 | struct resource *iores; | ||
1097 | int ret, i; | ||
1098 | |||
1099 | bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); | ||
1100 | if (!bdev) | ||
1101 | return -ENOMEM; | ||
1102 | |||
1103 | bdev->dev = &pdev->dev; | ||
1104 | |||
1105 | match = of_match_node(bam_of_match, pdev->dev.of_node); | ||
1106 | if (!match) { | ||
1107 | dev_err(&pdev->dev, "Unsupported BAM module\n"); | ||
1108 | return -ENODEV; | ||
1109 | } | ||
1110 | |||
1111 | bdev->layout = match->data; | ||
1112 | |||
1113 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1114 | bdev->regs = devm_ioremap_resource(&pdev->dev, iores); | ||
1115 | if (IS_ERR(bdev->regs)) | ||
1116 | return PTR_ERR(bdev->regs); | ||
1117 | |||
1118 | bdev->irq = platform_get_irq(pdev, 0); | ||
1119 | if (bdev->irq < 0) | ||
1120 | return bdev->irq; | ||
1121 | |||
1122 | ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee); | ||
1123 | if (ret) { | ||
1124 | dev_err(bdev->dev, "Execution environment unspecified\n"); | ||
1125 | return ret; | ||
1126 | } | ||
1127 | |||
1128 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); | ||
1129 | if (IS_ERR(bdev->bamclk)) | ||
1130 | return PTR_ERR(bdev->bamclk); | ||
1131 | |||
1132 | ret = clk_prepare_enable(bdev->bamclk); | ||
1133 | if (ret) { | ||
1134 | dev_err(bdev->dev, "failed to prepare/enable clock\n"); | ||
1135 | return ret; | ||
1136 | } | ||
1137 | |||
1138 | ret = bam_init(bdev); | ||
1139 | if (ret) | ||
1140 | goto err_disable_clk; | ||
1141 | |||
1142 | tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); | ||
1143 | |||
1144 | bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels, | ||
1145 | sizeof(*bdev->channels), GFP_KERNEL); | ||
1146 | |||
1147 | if (!bdev->channels) { | ||
1148 | ret = -ENOMEM; | ||
1149 | goto err_tasklet_kill; | ||
1150 | } | ||
1151 | |||
1152 | /* allocate and initialize channels */ | ||
1153 | INIT_LIST_HEAD(&bdev->common.channels); | ||
1154 | |||
1155 | for (i = 0; i < bdev->num_channels; i++) | ||
1156 | bam_channel_init(bdev, &bdev->channels[i], i); | ||
1157 | |||
1158 | ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, | ||
1159 | IRQF_TRIGGER_HIGH, "bam_dma", bdev); | ||
1160 | if (ret) | ||
1161 | goto err_bam_channel_exit; | ||
1162 | |||
1163 | /* set max dma segment size */ | ||
1164 | bdev->common.dev = bdev->dev; | ||
1165 | bdev->common.dev->dma_parms = &bdev->dma_parms; | ||
1166 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); | ||
1167 | if (ret) { | ||
1168 | dev_err(bdev->dev, "cannot set maximum segment size\n"); | ||
1169 | goto err_bam_channel_exit; | ||
1170 | } | ||
1171 | |||
1172 | platform_set_drvdata(pdev, bdev); | ||
1173 | |||
1174 | /* set capabilities */ | ||
1175 | dma_cap_zero(bdev->common.cap_mask); | ||
1176 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); | ||
1177 | |||
1178 | /* initialize dmaengine apis */ | ||
1179 | bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1180 | bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
1181 | bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1182 | bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1183 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; | ||
1184 | bdev->common.device_free_chan_resources = bam_free_chan; | ||
1185 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; | ||
1186 | bdev->common.device_config = bam_slave_config; | ||
1187 | bdev->common.device_pause = bam_pause; | ||
1188 | bdev->common.device_resume = bam_resume; | ||
1189 | bdev->common.device_terminate_all = bam_dma_terminate_all; | ||
1190 | bdev->common.device_issue_pending = bam_issue_pending; | ||
1191 | bdev->common.device_tx_status = bam_tx_status; | ||
1192 | bdev->common.dev = bdev->dev; | ||
1193 | |||
1194 | ret = dma_async_device_register(&bdev->common); | ||
1195 | if (ret) { | ||
1196 | dev_err(bdev->dev, "failed to register dma async device\n"); | ||
1197 | goto err_bam_channel_exit; | ||
1198 | } | ||
1199 | |||
1200 | ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, | ||
1201 | &bdev->common); | ||
1202 | if (ret) | ||
1203 | goto err_unregister_dma; | ||
1204 | |||
1205 | return 0; | ||
1206 | |||
1207 | err_unregister_dma: | ||
1208 | dma_async_device_unregister(&bdev->common); | ||
1209 | err_bam_channel_exit: | ||
1210 | for (i = 0; i < bdev->num_channels; i++) | ||
1211 | tasklet_kill(&bdev->channels[i].vc.task); | ||
1212 | err_tasklet_kill: | ||
1213 | tasklet_kill(&bdev->task); | ||
1214 | err_disable_clk: | ||
1215 | clk_disable_unprepare(bdev->bamclk); | ||
1216 | |||
1217 | return ret; | ||
1218 | } | ||
1219 | |||
1220 | static int bam_dma_remove(struct platform_device *pdev) | ||
1221 | { | ||
1222 | struct bam_device *bdev = platform_get_drvdata(pdev); | ||
1223 | u32 i; | ||
1224 | |||
1225 | of_dma_controller_free(pdev->dev.of_node); | ||
1226 | dma_async_device_unregister(&bdev->common); | ||
1227 | |||
1228 | /* mask all interrupts for this execution environment */ | ||
1229 | writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); | ||
1230 | |||
1231 | devm_free_irq(bdev->dev, bdev->irq, bdev); | ||
1232 | |||
1233 | for (i = 0; i < bdev->num_channels; i++) { | ||
1234 | bam_dma_terminate_all(&bdev->channels[i].vc.chan); | ||
1235 | tasklet_kill(&bdev->channels[i].vc.task); | ||
1236 | |||
1237 | dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, | ||
1238 | bdev->channels[i].fifo_virt, | ||
1239 | bdev->channels[i].fifo_phys); | ||
1240 | } | ||
1241 | |||
1242 | tasklet_kill(&bdev->task); | ||
1243 | |||
1244 | clk_disable_unprepare(bdev->bamclk); | ||
1245 | |||
1246 | return 0; | ||
1247 | } | ||
1248 | |||
1249 | static struct platform_driver bam_dma_driver = { | ||
1250 | .probe = bam_dma_probe, | ||
1251 | .remove = bam_dma_remove, | ||
1252 | .driver = { | ||
1253 | .name = "bam-dma-engine", | ||
1254 | .of_match_table = bam_of_match, | ||
1255 | }, | ||
1256 | }; | ||
1257 | |||
1258 | module_platform_driver(bam_dma_driver); | ||
1259 | |||
1260 | MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); | ||
1261 | MODULE_DESCRIPTION("QCOM BAM DMA engine driver"); | ||
1262 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c new file mode 100644 index 000000000000..cccc78efbca9 --- /dev/null +++ b/drivers/dma/qcom/hidma.c | |||
@@ -0,0 +1,706 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA DMA engine interface | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | ||
18 | * Copyright (C) Semihalf 2009 | ||
19 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | ||
20 | * Copyright (C) Alexander Popov, Promcontroller 2014 | ||
21 | * | ||
22 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | ||
23 | * (defines, structures and comments) was taken from MPC5121 DMA driver | ||
24 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. | ||
25 | * | ||
26 | * Approved as OSADL project by a majority of OSADL members and funded | ||
27 | * by OSADL membership fees in 2009; for details see www.osadl.org. | ||
28 | * | ||
29 | * This program is free software; you can redistribute it and/or modify it | ||
30 | * under the terms of the GNU General Public License as published by the Free | ||
31 | * Software Foundation; either version 2 of the License, or (at your option) | ||
32 | * any later version. | ||
33 | * | ||
34 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
35 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
36 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
37 | * more details. | ||
38 | * | ||
39 | * The full GNU General Public License is included in this distribution in the | ||
40 | * file called COPYING. | ||
41 | */ | ||
42 | |||
43 | /* Linux Foundation elects GPLv2 license only. */ | ||
44 | |||
45 | #include <linux/dmaengine.h> | ||
46 | #include <linux/dma-mapping.h> | ||
47 | #include <linux/list.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/platform_device.h> | ||
50 | #include <linux/slab.h> | ||
51 | #include <linux/spinlock.h> | ||
52 | #include <linux/of_dma.h> | ||
53 | #include <linux/property.h> | ||
54 | #include <linux/delay.h> | ||
55 | #include <linux/acpi.h> | ||
56 | #include <linux/irq.h> | ||
57 | #include <linux/atomic.h> | ||
58 | #include <linux/pm_runtime.h> | ||
59 | |||
60 | #include "../dmaengine.h" | ||
61 | #include "hidma.h" | ||
62 | |||
63 | /* | ||
64 | * Default idle time is 2 seconds. This parameter can | ||
65 | * be overridden by changing the following | ||
66 | * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms | ||
67 | * during kernel boot. | ||
68 | */ | ||
69 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 | ||
70 | #define HIDMA_ERR_INFO_SW 0xFF | ||
71 | #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 | ||
72 | #define HIDMA_NR_DEFAULT_DESC 10 | ||
73 | |||
74 | static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) | ||
75 | { | ||
76 | return container_of(dmadev, struct hidma_dev, ddev); | ||
77 | } | ||
78 | |||
79 | static inline | ||
80 | struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) | ||
81 | { | ||
82 | return container_of(_lldevp, struct hidma_dev, lldev); | ||
83 | } | ||
84 | |||
85 | static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) | ||
86 | { | ||
87 | return container_of(dmach, struct hidma_chan, chan); | ||
88 | } | ||
89 | |||
90 | static inline | ||
91 | struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) | ||
92 | { | ||
93 | return container_of(t, struct hidma_desc, desc); | ||
94 | } | ||
95 | |||
96 | static void hidma_free(struct hidma_dev *dmadev) | ||
97 | { | ||
98 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
99 | } | ||
100 | |||
101 | static unsigned int nr_desc_prm; | ||
102 | module_param(nr_desc_prm, uint, 0644); | ||
103 | MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); | ||
104 | |||
105 | |||
106 | /* process completed descriptors */ | ||
107 | static void hidma_process_completed(struct hidma_chan *mchan) | ||
108 | { | ||
109 | struct dma_device *ddev = mchan->chan.device; | ||
110 | struct hidma_dev *mdma = to_hidma_dev(ddev); | ||
111 | struct dma_async_tx_descriptor *desc; | ||
112 | dma_cookie_t last_cookie; | ||
113 | struct hidma_desc *mdesc; | ||
114 | unsigned long irqflags; | ||
115 | struct list_head list; | ||
116 | |||
117 | INIT_LIST_HEAD(&list); | ||
118 | |||
119 | /* Get all completed descriptors */ | ||
120 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
121 | list_splice_tail_init(&mchan->completed, &list); | ||
122 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
123 | |||
124 | /* Execute callbacks and run dependencies */ | ||
125 | list_for_each_entry(mdesc, &list, node) { | ||
126 | enum dma_status llstat; | ||
127 | |||
128 | desc = &mdesc->desc; | ||
129 | |||
130 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
131 | dma_cookie_complete(desc); | ||
132 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
133 | |||
134 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | ||
135 | if (desc->callback && (llstat == DMA_COMPLETE)) | ||
136 | desc->callback(desc->callback_param); | ||
137 | |||
138 | last_cookie = desc->cookie; | ||
139 | dma_run_dependencies(desc); | ||
140 | } | ||
141 | |||
142 | /* Free descriptors */ | ||
143 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
144 | list_splice_tail_init(&list, &mchan->free); | ||
145 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
146 | |||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Called once for each submitted descriptor. | ||
151 | * PM is locked once for each descriptor that is currently | ||
152 | * in execution. | ||
153 | */ | ||
154 | static void hidma_callback(void *data) | ||
155 | { | ||
156 | struct hidma_desc *mdesc = data; | ||
157 | struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); | ||
158 | struct dma_device *ddev = mchan->chan.device; | ||
159 | struct hidma_dev *dmadev = to_hidma_dev(ddev); | ||
160 | unsigned long irqflags; | ||
161 | bool queued = false; | ||
162 | |||
163 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
164 | if (mdesc->node.next) { | ||
165 | /* Delete from the active list, add to completed list */ | ||
166 | list_move_tail(&mdesc->node, &mchan->completed); | ||
167 | queued = true; | ||
168 | |||
169 | /* calculate the next running descriptor */ | ||
170 | mchan->running = list_first_entry(&mchan->active, | ||
171 | struct hidma_desc, node); | ||
172 | } | ||
173 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
174 | |||
175 | hidma_process_completed(mchan); | ||
176 | |||
177 | if (queued) { | ||
178 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
179 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) | ||
184 | { | ||
185 | struct hidma_chan *mchan; | ||
186 | struct dma_device *ddev; | ||
187 | |||
188 | mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); | ||
189 | if (!mchan) | ||
190 | return -ENOMEM; | ||
191 | |||
192 | ddev = &dmadev->ddev; | ||
193 | mchan->dma_sig = dma_sig; | ||
194 | mchan->dmadev = dmadev; | ||
195 | mchan->chan.device = ddev; | ||
196 | dma_cookie_init(&mchan->chan); | ||
197 | |||
198 | INIT_LIST_HEAD(&mchan->free); | ||
199 | INIT_LIST_HEAD(&mchan->prepared); | ||
200 | INIT_LIST_HEAD(&mchan->active); | ||
201 | INIT_LIST_HEAD(&mchan->completed); | ||
202 | |||
203 | spin_lock_init(&mchan->lock); | ||
204 | list_add_tail(&mchan->chan.device_node, &ddev->channels); | ||
205 | dmadev->ddev.chancnt++; | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static void hidma_issue_task(unsigned long arg) | ||
210 | { | ||
211 | struct hidma_dev *dmadev = (struct hidma_dev *)arg; | ||
212 | |||
213 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
214 | hidma_ll_start(dmadev->lldev); | ||
215 | } | ||
216 | |||
217 | static void hidma_issue_pending(struct dma_chan *dmach) | ||
218 | { | ||
219 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
220 | struct hidma_dev *dmadev = mchan->dmadev; | ||
221 | unsigned long flags; | ||
222 | int status; | ||
223 | |||
224 | spin_lock_irqsave(&mchan->lock, flags); | ||
225 | if (!mchan->running) { | ||
226 | struct hidma_desc *desc = list_first_entry(&mchan->active, | ||
227 | struct hidma_desc, | ||
228 | node); | ||
229 | mchan->running = desc; | ||
230 | } | ||
231 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
232 | |||
233 | /* PM will be released in hidma_callback function. */ | ||
234 | status = pm_runtime_get(dmadev->ddev.dev); | ||
235 | if (status < 0) | ||
236 | tasklet_schedule(&dmadev->task); | ||
237 | else | ||
238 | hidma_ll_start(dmadev->lldev); | ||
239 | } | ||
240 | |||
241 | static enum dma_status hidma_tx_status(struct dma_chan *dmach, | ||
242 | dma_cookie_t cookie, | ||
243 | struct dma_tx_state *txstate) | ||
244 | { | ||
245 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
246 | enum dma_status ret; | ||
247 | |||
248 | ret = dma_cookie_status(dmach, cookie, txstate); | ||
249 | if (ret == DMA_COMPLETE) | ||
250 | return ret; | ||
251 | |||
252 | if (mchan->paused && (ret == DMA_IN_PROGRESS)) { | ||
253 | unsigned long flags; | ||
254 | dma_cookie_t runcookie; | ||
255 | |||
256 | spin_lock_irqsave(&mchan->lock, flags); | ||
257 | if (mchan->running) | ||
258 | runcookie = mchan->running->desc.cookie; | ||
259 | else | ||
260 | runcookie = -EINVAL; | ||
261 | |||
262 | if (runcookie == cookie) | ||
263 | ret = DMA_PAUSED; | ||
264 | |||
265 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
266 | } | ||
267 | |||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Submit descriptor to hardware. | ||
273 | * Lock the PM for each descriptor we are sending. | ||
274 | */ | ||
275 | static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
276 | { | ||
277 | struct hidma_chan *mchan = to_hidma_chan(txd->chan); | ||
278 | struct hidma_dev *dmadev = mchan->dmadev; | ||
279 | struct hidma_desc *mdesc; | ||
280 | unsigned long irqflags; | ||
281 | dma_cookie_t cookie; | ||
282 | |||
283 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
284 | if (!hidma_ll_isenabled(dmadev->lldev)) { | ||
285 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
286 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
287 | return -ENODEV; | ||
288 | } | ||
289 | |||
290 | mdesc = container_of(txd, struct hidma_desc, desc); | ||
291 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
292 | |||
293 | /* Move descriptor to active */ | ||
294 | list_move_tail(&mdesc->node, &mchan->active); | ||
295 | |||
296 | /* Update cookie */ | ||
297 | cookie = dma_cookie_assign(txd); | ||
298 | |||
299 | hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); | ||
300 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
301 | |||
302 | return cookie; | ||
303 | } | ||
304 | |||
305 | static int hidma_alloc_chan_resources(struct dma_chan *dmach) | ||
306 | { | ||
307 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
308 | struct hidma_dev *dmadev = mchan->dmadev; | ||
309 | struct hidma_desc *mdesc, *tmp; | ||
310 | unsigned long irqflags; | ||
311 | LIST_HEAD(descs); | ||
312 | unsigned int i; | ||
313 | int rc = 0; | ||
314 | |||
315 | if (mchan->allocated) | ||
316 | return 0; | ||
317 | |||
318 | /* Alloc descriptors for this channel */ | ||
319 | for (i = 0; i < dmadev->nr_descriptors; i++) { | ||
320 | mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); | ||
321 | if (!mdesc) { | ||
322 | rc = -ENOMEM; | ||
323 | break; | ||
324 | } | ||
325 | dma_async_tx_descriptor_init(&mdesc->desc, dmach); | ||
326 | mdesc->desc.tx_submit = hidma_tx_submit; | ||
327 | |||
328 | rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, | ||
329 | "DMA engine", hidma_callback, mdesc, | ||
330 | &mdesc->tre_ch); | ||
331 | if (rc) { | ||
332 | dev_err(dmach->device->dev, | ||
333 | "channel alloc failed at %u\n", i); | ||
334 | kfree(mdesc); | ||
335 | break; | ||
336 | } | ||
337 | list_add_tail(&mdesc->node, &descs); | ||
338 | } | ||
339 | |||
340 | if (rc) { | ||
341 | /* return the allocated descriptors */ | ||
342 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | ||
343 | hidma_ll_free(dmadev->lldev, mdesc->tre_ch); | ||
344 | kfree(mdesc); | ||
345 | } | ||
346 | return rc; | ||
347 | } | ||
348 | |||
349 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
350 | list_splice_tail_init(&descs, &mchan->free); | ||
351 | mchan->allocated = true; | ||
352 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
353 | return 1; | ||
354 | } | ||
355 | |||
356 | static struct dma_async_tx_descriptor * | ||
357 | hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | ||
358 | size_t len, unsigned long flags) | ||
359 | { | ||
360 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
361 | struct hidma_desc *mdesc = NULL; | ||
362 | struct hidma_dev *mdma = mchan->dmadev; | ||
363 | unsigned long irqflags; | ||
364 | |||
365 | /* Get free descriptor */ | ||
366 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
367 | if (!list_empty(&mchan->free)) { | ||
368 | mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); | ||
369 | list_del(&mdesc->node); | ||
370 | } | ||
371 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
372 | |||
373 | if (!mdesc) | ||
374 | return NULL; | ||
375 | |||
376 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | ||
377 | src, dest, len, flags); | ||
378 | |||
379 | /* Place descriptor in prepared list */ | ||
380 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
381 | list_add_tail(&mdesc->node, &mchan->prepared); | ||
382 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
383 | |||
384 | return &mdesc->desc; | ||
385 | } | ||
386 | |||
387 | static int hidma_terminate_channel(struct dma_chan *chan) | ||
388 | { | ||
389 | struct hidma_chan *mchan = to_hidma_chan(chan); | ||
390 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | ||
391 | struct hidma_desc *tmp, *mdesc; | ||
392 | unsigned long irqflags; | ||
393 | LIST_HEAD(list); | ||
394 | int rc; | ||
395 | |||
396 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
397 | /* give completed requests a chance to finish */ | ||
398 | hidma_process_completed(mchan); | ||
399 | |||
400 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
401 | list_splice_init(&mchan->active, &list); | ||
402 | list_splice_init(&mchan->prepared, &list); | ||
403 | list_splice_init(&mchan->completed, &list); | ||
404 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
405 | |||
406 | /* this suspends the existing transfer */ | ||
407 | rc = hidma_ll_pause(dmadev->lldev); | ||
408 | if (rc) { | ||
409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); | ||
410 | goto out; | ||
411 | } | ||
412 | |||
413 | /* return all user requests */ | ||
414 | list_for_each_entry_safe(mdesc, tmp, &list, node) { | ||
415 | struct dma_async_tx_descriptor *txd = &mdesc->desc; | ||
416 | dma_async_tx_callback callback = mdesc->desc.callback; | ||
417 | void *param = mdesc->desc.callback_param; | ||
418 | |||
419 | dma_descriptor_unmap(txd); | ||
420 | |||
421 | if (callback) | ||
422 | callback(param); | ||
423 | |||
424 | dma_run_dependencies(txd); | ||
425 | |||
426 | /* move myself to free_list */ | ||
427 | list_move(&mdesc->node, &mchan->free); | ||
428 | } | ||
429 | |||
430 | rc = hidma_ll_resume(dmadev->lldev); | ||
431 | out: | ||
432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
434 | return rc; | ||
435 | } | ||
436 | |||
437 | static int hidma_terminate_all(struct dma_chan *chan) | ||
438 | { | ||
439 | struct hidma_chan *mchan = to_hidma_chan(chan); | ||
440 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | ||
441 | int rc; | ||
442 | |||
443 | rc = hidma_terminate_channel(chan); | ||
444 | if (rc) | ||
445 | return rc; | ||
446 | |||
447 | /* reinitialize the hardware */ | ||
448 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
449 | rc = hidma_ll_setup(dmadev->lldev); | ||
450 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
451 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
452 | return rc; | ||
453 | } | ||
454 | |||
455 | static void hidma_free_chan_resources(struct dma_chan *dmach) | ||
456 | { | ||
457 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
458 | struct hidma_dev *mdma = mchan->dmadev; | ||
459 | struct hidma_desc *mdesc, *tmp; | ||
460 | unsigned long irqflags; | ||
461 | LIST_HEAD(descs); | ||
462 | |||
463 | /* terminate running transactions and free descriptors */ | ||
464 | hidma_terminate_channel(dmach); | ||
465 | |||
466 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
467 | |||
468 | /* Move data */ | ||
469 | list_splice_tail_init(&mchan->free, &descs); | ||
470 | |||
471 | /* Free descriptors */ | ||
472 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | ||
473 | hidma_ll_free(mdma->lldev, mdesc->tre_ch); | ||
474 | list_del(&mdesc->node); | ||
475 | kfree(mdesc); | ||
476 | } | ||
477 | |||
478 | mchan->allocated = 0; | ||
479 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
480 | } | ||
481 | |||
482 | static int hidma_pause(struct dma_chan *chan) | ||
483 | { | ||
484 | struct hidma_chan *mchan; | ||
485 | struct hidma_dev *dmadev; | ||
486 | |||
487 | mchan = to_hidma_chan(chan); | ||
488 | dmadev = to_hidma_dev(mchan->chan.device); | ||
489 | if (!mchan->paused) { | ||
490 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
491 | if (hidma_ll_pause(dmadev->lldev)) | ||
492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); | ||
493 | mchan->paused = true; | ||
494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
495 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
496 | } | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int hidma_resume(struct dma_chan *chan) | ||
501 | { | ||
502 | struct hidma_chan *mchan; | ||
503 | struct hidma_dev *dmadev; | ||
504 | int rc = 0; | ||
505 | |||
506 | mchan = to_hidma_chan(chan); | ||
507 | dmadev = to_hidma_dev(mchan->chan.device); | ||
508 | if (mchan->paused) { | ||
509 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
510 | rc = hidma_ll_resume(dmadev->lldev); | ||
511 | if (!rc) | ||
512 | mchan->paused = false; | ||
513 | else | ||
514 | dev_err(dmadev->ddev.dev, | ||
515 | "failed to resume the channel"); | ||
516 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
517 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
518 | } | ||
519 | return rc; | ||
520 | } | ||
521 | |||
522 | static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | ||
523 | { | ||
524 | struct hidma_lldev *lldev = arg; | ||
525 | |||
526 | /* | ||
527 | * All interrupts are request driven. | ||
528 | * HW doesn't send an interrupt by itself. | ||
529 | */ | ||
530 | return hidma_ll_inthandler(chirq, lldev); | ||
531 | } | ||
532 | |||
533 | static int hidma_probe(struct platform_device *pdev) | ||
534 | { | ||
535 | struct hidma_dev *dmadev; | ||
536 | struct resource *trca_resource; | ||
537 | struct resource *evca_resource; | ||
538 | int chirq; | ||
539 | void __iomem *evca; | ||
540 | void __iomem *trca; | ||
541 | int rc; | ||
542 | |||
543 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | ||
544 | pm_runtime_use_autosuspend(&pdev->dev); | ||
545 | pm_runtime_set_active(&pdev->dev); | ||
546 | pm_runtime_enable(&pdev->dev); | ||
547 | |||
548 | trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
549 | trca = devm_ioremap_resource(&pdev->dev, trca_resource); | ||
550 | if (IS_ERR(trca)) { | ||
551 | rc = -ENOMEM; | ||
552 | goto bailout; | ||
553 | } | ||
554 | |||
555 | evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
556 | evca = devm_ioremap_resource(&pdev->dev, evca_resource); | ||
557 | if (IS_ERR(evca)) { | ||
558 | rc = -ENOMEM; | ||
559 | goto bailout; | ||
560 | } | ||
561 | |||
562 | /* | ||
563 | * This driver only handles the channel IRQs. | ||
564 | * Common IRQ is handled by the management driver. | ||
565 | */ | ||
566 | chirq = platform_get_irq(pdev, 0); | ||
567 | if (chirq < 0) { | ||
568 | rc = -ENODEV; | ||
569 | goto bailout; | ||
570 | } | ||
571 | |||
572 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | ||
573 | if (!dmadev) { | ||
574 | rc = -ENOMEM; | ||
575 | goto bailout; | ||
576 | } | ||
577 | |||
578 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
579 | spin_lock_init(&dmadev->lock); | ||
580 | dmadev->ddev.dev = &pdev->dev; | ||
581 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
582 | |||
583 | dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); | ||
584 | if (WARN_ON(!pdev->dev.dma_mask)) { | ||
585 | rc = -ENXIO; | ||
586 | goto dmafree; | ||
587 | } | ||
588 | |||
589 | dmadev->dev_evca = evca; | ||
590 | dmadev->evca_resource = evca_resource; | ||
591 | dmadev->dev_trca = trca; | ||
592 | dmadev->trca_resource = trca_resource; | ||
593 | dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; | ||
594 | dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; | ||
595 | dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; | ||
596 | dmadev->ddev.device_tx_status = hidma_tx_status; | ||
597 | dmadev->ddev.device_issue_pending = hidma_issue_pending; | ||
598 | dmadev->ddev.device_pause = hidma_pause; | ||
599 | dmadev->ddev.device_resume = hidma_resume; | ||
600 | dmadev->ddev.device_terminate_all = hidma_terminate_all; | ||
601 | dmadev->ddev.copy_align = 8; | ||
602 | |||
603 | device_property_read_u32(&pdev->dev, "desc-count", | ||
604 | &dmadev->nr_descriptors); | ||
605 | |||
606 | if (!dmadev->nr_descriptors && nr_desc_prm) | ||
607 | dmadev->nr_descriptors = nr_desc_prm; | ||
608 | |||
609 | if (!dmadev->nr_descriptors) | ||
610 | dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; | ||
611 | |||
612 | dmadev->chidx = readl(dmadev->dev_trca + 0x28); | ||
613 | |||
614 | /* Set DMA mask to 64 bits. */ | ||
615 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | ||
616 | if (rc) { | ||
617 | dev_warn(&pdev->dev, "unable to set coherent mask to 64"); | ||
618 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | ||
619 | if (rc) | ||
620 | goto dmafree; | ||
621 | } | ||
622 | |||
623 | dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, | ||
624 | dmadev->nr_descriptors, dmadev->dev_trca, | ||
625 | dmadev->dev_evca, dmadev->chidx); | ||
626 | if (!dmadev->lldev) { | ||
627 | rc = -EPROBE_DEFER; | ||
628 | goto dmafree; | ||
629 | } | ||
630 | |||
631 | rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, | ||
632 | "qcom-hidma", dmadev->lldev); | ||
633 | if (rc) | ||
634 | goto uninit; | ||
635 | |||
636 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
637 | rc = hidma_chan_init(dmadev, 0); | ||
638 | if (rc) | ||
639 | goto uninit; | ||
640 | |||
641 | rc = dma_async_device_register(&dmadev->ddev); | ||
642 | if (rc) | ||
643 | goto uninit; | ||
644 | |||
645 | dmadev->irq = chirq; | ||
646 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | ||
647 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); | ||
648 | platform_set_drvdata(pdev, dmadev); | ||
649 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
650 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
651 | return 0; | ||
652 | |||
653 | uninit: | ||
654 | hidma_ll_uninit(dmadev->lldev); | ||
655 | dmafree: | ||
656 | if (dmadev) | ||
657 | hidma_free(dmadev); | ||
658 | bailout: | ||
659 | pm_runtime_put_sync(&pdev->dev); | ||
660 | pm_runtime_disable(&pdev->dev); | ||
661 | return rc; | ||
662 | } | ||
663 | |||
664 | static int hidma_remove(struct platform_device *pdev) | ||
665 | { | ||
666 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); | ||
667 | |||
668 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
669 | dma_async_device_unregister(&dmadev->ddev); | ||
670 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | ||
671 | hidma_ll_uninit(dmadev->lldev); | ||
672 | hidma_free(dmadev); | ||
673 | |||
674 | dev_info(&pdev->dev, "HI-DMA engine removed\n"); | ||
675 | pm_runtime_put_sync_suspend(&pdev->dev); | ||
676 | pm_runtime_disable(&pdev->dev); | ||
677 | |||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | #if IS_ENABLED(CONFIG_ACPI) | ||
682 | static const struct acpi_device_id hidma_acpi_ids[] = { | ||
683 | {"QCOM8061"}, | ||
684 | {}, | ||
685 | }; | ||
686 | #endif | ||
687 | |||
688 | static const struct of_device_id hidma_match[] = { | ||
689 | {.compatible = "qcom,hidma-1.0",}, | ||
690 | {}, | ||
691 | }; | ||
692 | |||
693 | MODULE_DEVICE_TABLE(of, hidma_match); | ||
694 | |||
695 | static struct platform_driver hidma_driver = { | ||
696 | .probe = hidma_probe, | ||
697 | .remove = hidma_remove, | ||
698 | .driver = { | ||
699 | .name = "hidma", | ||
700 | .of_match_table = hidma_match, | ||
701 | .acpi_match_table = ACPI_PTR(hidma_acpi_ids), | ||
702 | }, | ||
703 | }; | ||
704 | |||
705 | module_platform_driver(hidma_driver); | ||
706 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h new file mode 100644 index 000000000000..231e306f6d87 --- /dev/null +++ b/drivers/dma/qcom/hidma.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA data structures | ||
3 | * | ||
4 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef QCOM_HIDMA_H | ||
17 | #define QCOM_HIDMA_H | ||
18 | |||
19 | #include <linux/kfifo.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/dmaengine.h> | ||
22 | |||
23 | #define TRE_SIZE 32 /* each TRE is 32 bytes */ | ||
24 | #define TRE_CFG_IDX 0 | ||
25 | #define TRE_LEN_IDX 1 | ||
26 | #define TRE_SRC_LOW_IDX 2 | ||
27 | #define TRE_SRC_HI_IDX 3 | ||
28 | #define TRE_DEST_LOW_IDX 4 | ||
29 | #define TRE_DEST_HI_IDX 5 | ||
30 | |||
31 | struct hidma_tx_status { | ||
32 | u8 err_info; /* error record in this transfer */ | ||
33 | u8 err_code; /* completion code */ | ||
34 | }; | ||
35 | |||
36 | struct hidma_tre { | ||
37 | atomic_t allocated; /* if this channel is allocated */ | ||
38 | bool queued; /* flag whether this is pending */ | ||
39 | u16 status; /* status */ | ||
40 | u32 chidx; /* index of the tre */ | ||
41 | u32 dma_sig; /* signature of the tre */ | ||
42 | const char *dev_name; /* name of the device */ | ||
43 | void (*callback)(void *data); /* requester callback */ | ||
44 | void *data; /* Data associated with this channel*/ | ||
45 | struct hidma_lldev *lldev; /* lldma device pointer */ | ||
46 | u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ | ||
47 | u32 tre_index; /* the offset where this was written*/ | ||
48 | u32 int_flags; /* interrupt flags */ | ||
49 | }; | ||
50 | |||
51 | struct hidma_lldev { | ||
52 | bool initialized; /* initialized flag */ | ||
53 | u8 trch_state; /* trch_state of the device */ | ||
54 | u8 evch_state; /* evch_state of the device */ | ||
55 | u8 chidx; /* channel index in the core */ | ||
56 | u32 nr_tres; /* max number of configs */ | ||
57 | spinlock_t lock; /* reentrancy */ | ||
58 | struct hidma_tre *trepool; /* trepool of user configs */ | ||
59 | struct device *dev; /* device */ | ||
60 | void __iomem *trca; /* Transfer Channel address */ | ||
61 | void __iomem *evca; /* Event Channel address */ | ||
62 | struct hidma_tre | ||
63 | **pending_tre_list; /* Pointers to pending TREs */ | ||
64 | struct hidma_tx_status | ||
65 | *tx_status_list; /* Pointers to pending TREs status*/ | ||
66 | s32 pending_tre_count; /* Number of TREs pending */ | ||
67 | |||
68 | void *tre_ring; /* TRE ring */ | ||
69 | dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */ | ||
70 | u32 tre_ring_size; /* Byte size of the ring */ | ||
71 | u32 tre_processed_off; /* last processed TRE */ | ||
72 | |||
73 | void *evre_ring; /* EVRE ring */ | ||
74 | dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */ | ||
75 | u32 evre_ring_size; /* Byte size of the ring */ | ||
76 | u32 evre_processed_off; /* last processed EVRE */ | ||
77 | |||
78 | u32 tre_write_offset; /* TRE write location */ | ||
79 | struct tasklet_struct task; /* task delivering notifications */ | ||
80 | DECLARE_KFIFO_PTR(handoff_fifo, | ||
81 | struct hidma_tre *); /* pending TREs FIFO */ | ||
82 | }; | ||
83 | |||
84 | struct hidma_desc { | ||
85 | struct dma_async_tx_descriptor desc; | ||
86 | /* link list node for this channel*/ | ||
87 | struct list_head node; | ||
88 | u32 tre_ch; | ||
89 | }; | ||
90 | |||
91 | struct hidma_chan { | ||
92 | bool paused; | ||
93 | bool allocated; | ||
94 | char dbg_name[16]; | ||
95 | u32 dma_sig; | ||
96 | |||
97 | /* | ||
98 | * active descriptor on this channel | ||
99 | * It is used by the DMA complete notification to | ||
100 | * locate the descriptor that initiated the transfer. | ||
101 | */ | ||
102 | struct dentry *debugfs; | ||
103 | struct dentry *stats; | ||
104 | struct hidma_dev *dmadev; | ||
105 | struct hidma_desc *running; | ||
106 | |||
107 | struct dma_chan chan; | ||
108 | struct list_head free; | ||
109 | struct list_head prepared; | ||
110 | struct list_head active; | ||
111 | struct list_head completed; | ||
112 | |||
113 | /* Lock for this structure */ | ||
114 | spinlock_t lock; | ||
115 | }; | ||
116 | |||
117 | struct hidma_dev { | ||
118 | int irq; | ||
119 | int chidx; | ||
120 | u32 nr_descriptors; | ||
121 | |||
122 | struct hidma_lldev *lldev; | ||
123 | void __iomem *dev_trca; | ||
124 | struct resource *trca_resource; | ||
125 | void __iomem *dev_evca; | ||
126 | struct resource *evca_resource; | ||
127 | |||
128 | /* used to protect the pending channel list*/ | ||
129 | spinlock_t lock; | ||
130 | struct dma_device ddev; | ||
131 | |||
132 | struct dentry *debugfs; | ||
133 | struct dentry *stats; | ||
134 | |||
135 | /* Task delivering issue_pending */ | ||
136 | struct tasklet_struct task; | ||
137 | }; | ||
138 | |||
139 | int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id, | ||
140 | const char *dev_name, | ||
141 | void (*callback)(void *data), void *data, u32 *tre_ch); | ||
142 | |||
143 | void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch); | ||
144 | enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); | ||
145 | bool hidma_ll_isenabled(struct hidma_lldev *llhndl); | ||
146 | void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); | ||
147 | void hidma_ll_start(struct hidma_lldev *llhndl); | ||
148 | int hidma_ll_pause(struct hidma_lldev *llhndl); | ||
149 | int hidma_ll_resume(struct hidma_lldev *llhndl); | ||
150 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, | ||
151 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); | ||
152 | int hidma_ll_setup(struct hidma_lldev *lldev); | ||
153 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, | ||
154 | void __iomem *trca, void __iomem *evca, | ||
155 | u8 chidx); | ||
156 | int hidma_ll_uninit(struct hidma_lldev *llhndl); | ||
157 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); | ||
158 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, | ||
159 | u8 err_code); | ||
160 | #endif | ||
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c new file mode 100644 index 000000000000..ef491b893f40 --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA DMA engine Management interface | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/acpi.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/property.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/pm_runtime.h> | ||
26 | #include <linux/bitops.h> | ||
27 | |||
28 | #include "hidma_mgmt.h" | ||
29 | |||
30 | #define HIDMA_QOS_N_OFFSET 0x300 | ||
31 | #define HIDMA_CFG_OFFSET 0x400 | ||
32 | #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C | ||
33 | #define HIDMA_MAX_XACTIONS_OFFSET 0x420 | ||
34 | #define HIDMA_HW_VERSION_OFFSET 0x424 | ||
35 | #define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418 | ||
36 | |||
37 | #define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0) | ||
38 | #define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0) | ||
39 | #define HIDMA_WEIGHT_MASK GENMASK(6, 0) | ||
40 | #define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0) | ||
41 | #define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0) | ||
42 | |||
43 | #define HIDMA_MAX_WR_XACTIONS_BIT_POS 16 | ||
44 | #define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16 | ||
45 | #define HIDMA_WRR_BIT_POS 8 | ||
46 | #define HIDMA_PRIORITY_BIT_POS 15 | ||
47 | |||
48 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 | ||
49 | #define HIDMA_MAX_CHANNEL_WEIGHT 15 | ||
50 | |||
51 | int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev) | ||
52 | { | ||
53 | unsigned int i; | ||
54 | u32 val; | ||
55 | |||
56 | if (!is_power_of_2(mgmtdev->max_write_request) || | ||
57 | (mgmtdev->max_write_request < 128) || | ||
58 | (mgmtdev->max_write_request > 1024)) { | ||
59 | dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n", | ||
60 | mgmtdev->max_write_request); | ||
61 | return -EINVAL; | ||
62 | } | ||
63 | |||
64 | if (!is_power_of_2(mgmtdev->max_read_request) || | ||
65 | (mgmtdev->max_read_request < 128) || | ||
66 | (mgmtdev->max_read_request > 1024)) { | ||
67 | dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n", | ||
68 | mgmtdev->max_read_request); | ||
69 | return -EINVAL; | ||
70 | } | ||
71 | |||
72 | if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) { | ||
73 | dev_err(&mgmtdev->pdev->dev, | ||
74 | "max_wr_xactions cannot be bigger than %ld\n", | ||
75 | HIDMA_MAX_WR_XACTIONS_MASK); | ||
76 | return -EINVAL; | ||
77 | } | ||
78 | |||
79 | if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) { | ||
80 | dev_err(&mgmtdev->pdev->dev, | ||
81 | "max_rd_xactions cannot be bigger than %ld\n", | ||
82 | HIDMA_MAX_RD_XACTIONS_MASK); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
86 | for (i = 0; i < mgmtdev->dma_channels; i++) { | ||
87 | if (mgmtdev->priority[i] > 1) { | ||
88 | dev_err(&mgmtdev->pdev->dev, | ||
89 | "priority can be 0 or 1\n"); | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
93 | if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) { | ||
94 | dev_err(&mgmtdev->pdev->dev, | ||
95 | "max value of weight can be %d.\n", | ||
96 | HIDMA_MAX_CHANNEL_WEIGHT); | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | /* weight needs to be at least one */ | ||
101 | if (mgmtdev->weight[i] == 0) | ||
102 | mgmtdev->weight[i] = 1; | ||
103 | } | ||
104 | |||
105 | pm_runtime_get_sync(&mgmtdev->pdev->dev); | ||
106 | val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); | ||
107 | val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS); | ||
108 | val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS; | ||
109 | val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK; | ||
110 | val |= mgmtdev->max_read_request; | ||
111 | writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); | ||
112 | |||
113 | val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); | ||
114 | val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS); | ||
115 | val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS; | ||
116 | val &= ~HIDMA_MAX_RD_XACTIONS_MASK; | ||
117 | val |= mgmtdev->max_rd_xactions; | ||
118 | writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); | ||
119 | |||
120 | mgmtdev->hw_version = | ||
121 | readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET); | ||
122 | mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF; | ||
123 | mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF; | ||
124 | |||
125 | for (i = 0; i < mgmtdev->dma_channels; i++) { | ||
126 | u32 weight = mgmtdev->weight[i]; | ||
127 | u32 priority = mgmtdev->priority[i]; | ||
128 | |||
129 | val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); | ||
130 | val &= ~(1 << HIDMA_PRIORITY_BIT_POS); | ||
131 | val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS; | ||
132 | val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS); | ||
133 | val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS; | ||
134 | writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); | ||
135 | } | ||
136 | |||
137 | val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); | ||
138 | val &= ~HIDMA_CHRESET_TIMEOUT_MASK; | ||
139 | val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK; | ||
140 | writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); | ||
141 | |||
142 | pm_runtime_mark_last_busy(&mgmtdev->pdev->dev); | ||
143 | pm_runtime_put_autosuspend(&mgmtdev->pdev->dev); | ||
144 | return 0; | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(hidma_mgmt_setup); | ||
147 | |||
148 | static int hidma_mgmt_probe(struct platform_device *pdev) | ||
149 | { | ||
150 | struct hidma_mgmt_dev *mgmtdev; | ||
151 | struct resource *res; | ||
152 | void __iomem *virtaddr; | ||
153 | int irq; | ||
154 | int rc; | ||
155 | u32 val; | ||
156 | |||
157 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | ||
158 | pm_runtime_use_autosuspend(&pdev->dev); | ||
159 | pm_runtime_set_active(&pdev->dev); | ||
160 | pm_runtime_enable(&pdev->dev); | ||
161 | pm_runtime_get_sync(&pdev->dev); | ||
162 | |||
163 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
164 | virtaddr = devm_ioremap_resource(&pdev->dev, res); | ||
165 | if (IS_ERR(virtaddr)) { | ||
166 | rc = -ENOMEM; | ||
167 | goto out; | ||
168 | } | ||
169 | |||
170 | irq = platform_get_irq(pdev, 0); | ||
171 | if (irq < 0) { | ||
172 | dev_err(&pdev->dev, "irq resources not found\n"); | ||
173 | rc = irq; | ||
174 | goto out; | ||
175 | } | ||
176 | |||
177 | mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL); | ||
178 | if (!mgmtdev) { | ||
179 | rc = -ENOMEM; | ||
180 | goto out; | ||
181 | } | ||
182 | |||
183 | mgmtdev->pdev = pdev; | ||
184 | mgmtdev->addrsize = resource_size(res); | ||
185 | mgmtdev->virtaddr = virtaddr; | ||
186 | |||
187 | rc = device_property_read_u32(&pdev->dev, "dma-channels", | ||
188 | &mgmtdev->dma_channels); | ||
189 | if (rc) { | ||
190 | dev_err(&pdev->dev, "number of channels missing\n"); | ||
191 | goto out; | ||
192 | } | ||
193 | |||
194 | rc = device_property_read_u32(&pdev->dev, | ||
195 | "channel-reset-timeout-cycles", | ||
196 | &mgmtdev->chreset_timeout_cycles); | ||
197 | if (rc) { | ||
198 | dev_err(&pdev->dev, "channel reset timeout missing\n"); | ||
199 | goto out; | ||
200 | } | ||
201 | |||
202 | rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes", | ||
203 | &mgmtdev->max_write_request); | ||
204 | if (rc) { | ||
205 | dev_err(&pdev->dev, "max-write-burst-bytes missing\n"); | ||
206 | goto out; | ||
207 | } | ||
208 | |||
209 | rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes", | ||
210 | &mgmtdev->max_read_request); | ||
211 | if (rc) { | ||
212 | dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); | ||
213 | goto out; | ||
214 | } | ||
215 | |||
216 | rc = device_property_read_u32(&pdev->dev, "max-write-transactions", | ||
217 | &mgmtdev->max_wr_xactions); | ||
218 | if (rc) { | ||
219 | dev_err(&pdev->dev, "max-write-transactions missing\n"); | ||
220 | goto out; | ||
221 | } | ||
222 | |||
223 | rc = device_property_read_u32(&pdev->dev, "max-read-transactions", | ||
224 | &mgmtdev->max_rd_xactions); | ||
225 | if (rc) { | ||
226 | dev_err(&pdev->dev, "max-read-transactions missing\n"); | ||
227 | goto out; | ||
228 | } | ||
229 | |||
230 | mgmtdev->priority = devm_kcalloc(&pdev->dev, | ||
231 | mgmtdev->dma_channels, | ||
232 | sizeof(*mgmtdev->priority), | ||
233 | GFP_KERNEL); | ||
234 | if (!mgmtdev->priority) { | ||
235 | rc = -ENOMEM; | ||
236 | goto out; | ||
237 | } | ||
238 | |||
239 | mgmtdev->weight = devm_kcalloc(&pdev->dev, | ||
240 | mgmtdev->dma_channels, | ||
241 | sizeof(*mgmtdev->weight), GFP_KERNEL); | ||
242 | if (!mgmtdev->weight) { | ||
243 | rc = -ENOMEM; | ||
244 | goto out; | ||
245 | } | ||
246 | |||
247 | rc = hidma_mgmt_setup(mgmtdev); | ||
248 | if (rc) { | ||
249 | dev_err(&pdev->dev, "setup failed\n"); | ||
250 | goto out; | ||
251 | } | ||
252 | |||
253 | /* start the HW */ | ||
254 | val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET); | ||
255 | val |= 1; | ||
256 | writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET); | ||
257 | |||
258 | rc = hidma_mgmt_init_sys(mgmtdev); | ||
259 | if (rc) { | ||
260 | dev_err(&pdev->dev, "sysfs setup failed\n"); | ||
261 | goto out; | ||
262 | } | ||
263 | |||
264 | dev_info(&pdev->dev, | ||
265 | "HW rev: %d.%d @ %pa with %d physical channels\n", | ||
266 | mgmtdev->hw_version_major, mgmtdev->hw_version_minor, | ||
267 | &res->start, mgmtdev->dma_channels); | ||
268 | |||
269 | platform_set_drvdata(pdev, mgmtdev); | ||
270 | pm_runtime_mark_last_busy(&pdev->dev); | ||
271 | pm_runtime_put_autosuspend(&pdev->dev); | ||
272 | return 0; | ||
273 | out: | ||
274 | pm_runtime_put_sync_suspend(&pdev->dev); | ||
275 | pm_runtime_disable(&pdev->dev); | ||
276 | return rc; | ||
277 | } | ||
278 | |||
279 | #if IS_ENABLED(CONFIG_ACPI) | ||
280 | static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { | ||
281 | {"QCOM8060"}, | ||
282 | {}, | ||
283 | }; | ||
284 | #endif | ||
285 | |||
286 | static const struct of_device_id hidma_mgmt_match[] = { | ||
287 | {.compatible = "qcom,hidma-mgmt-1.0",}, | ||
288 | {}, | ||
289 | }; | ||
290 | MODULE_DEVICE_TABLE(of, hidma_mgmt_match); | ||
291 | |||
292 | static struct platform_driver hidma_mgmt_driver = { | ||
293 | .probe = hidma_mgmt_probe, | ||
294 | .driver = { | ||
295 | .name = "hidma-mgmt", | ||
296 | .of_match_table = hidma_mgmt_match, | ||
297 | .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), | ||
298 | }, | ||
299 | }; | ||
300 | |||
301 | module_platform_driver(hidma_mgmt_driver); | ||
302 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h new file mode 100644 index 000000000000..f7daf33769f4 --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA Management common header | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | struct hidma_mgmt_dev { | ||
17 | u8 hw_version_major; | ||
18 | u8 hw_version_minor; | ||
19 | |||
20 | u32 max_wr_xactions; | ||
21 | u32 max_rd_xactions; | ||
22 | u32 max_write_request; | ||
23 | u32 max_read_request; | ||
24 | u32 dma_channels; | ||
25 | u32 chreset_timeout_cycles; | ||
26 | u32 hw_version; | ||
27 | u32 *priority; | ||
28 | u32 *weight; | ||
29 | |||
30 | /* Hardware device constants */ | ||
31 | void __iomem *virtaddr; | ||
32 | resource_size_t addrsize; | ||
33 | |||
34 | struct kobject **chroots; | ||
35 | struct platform_device *pdev; | ||
36 | }; | ||
37 | |||
38 | int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev); | ||
39 | int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev); | ||
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c new file mode 100644 index 000000000000..d61f1068a34b --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt_sys.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA Management SYS interface | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/sysfs.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | |||
19 | #include "hidma_mgmt.h" | ||
20 | |||
21 | struct hidma_chan_attr { | ||
22 | struct hidma_mgmt_dev *mdev; | ||
23 | int index; | ||
24 | struct kobj_attribute attr; | ||
25 | }; | ||
26 | |||
27 | struct hidma_mgmt_fileinfo { | ||
28 | char *name; | ||
29 | int mode; | ||
30 | int (*get)(struct hidma_mgmt_dev *mdev); | ||
31 | int (*set)(struct hidma_mgmt_dev *mdev, u64 val); | ||
32 | }; | ||
33 | |||
34 | #define IMPLEMENT_GETSET(name) \ | ||
35 | static int get_##name(struct hidma_mgmt_dev *mdev) \ | ||
36 | { \ | ||
37 | return mdev->name; \ | ||
38 | } \ | ||
39 | static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \ | ||
40 | { \ | ||
41 | u64 tmp; \ | ||
42 | int rc; \ | ||
43 | \ | ||
44 | tmp = mdev->name; \ | ||
45 | mdev->name = val; \ | ||
46 | rc = hidma_mgmt_setup(mdev); \ | ||
47 | if (rc) \ | ||
48 | mdev->name = tmp; \ | ||
49 | return rc; \ | ||
50 | } | ||
51 | |||
52 | #define DECLARE_ATTRIBUTE(name, mode) \ | ||
53 | {#name, mode, get_##name, set_##name} | ||
54 | |||
55 | IMPLEMENT_GETSET(hw_version_major) | ||
56 | IMPLEMENT_GETSET(hw_version_minor) | ||
57 | IMPLEMENT_GETSET(max_wr_xactions) | ||
58 | IMPLEMENT_GETSET(max_rd_xactions) | ||
59 | IMPLEMENT_GETSET(max_write_request) | ||
60 | IMPLEMENT_GETSET(max_read_request) | ||
61 | IMPLEMENT_GETSET(dma_channels) | ||
62 | IMPLEMENT_GETSET(chreset_timeout_cycles) | ||
63 | |||
64 | static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val) | ||
65 | { | ||
66 | u64 tmp; | ||
67 | int rc; | ||
68 | |||
69 | if (i >= mdev->dma_channels) | ||
70 | return -EINVAL; | ||
71 | |||
72 | tmp = mdev->priority[i]; | ||
73 | mdev->priority[i] = val; | ||
74 | rc = hidma_mgmt_setup(mdev); | ||
75 | if (rc) | ||
76 | mdev->priority[i] = tmp; | ||
77 | return rc; | ||
78 | } | ||
79 | |||
80 | static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val) | ||
81 | { | ||
82 | u64 tmp; | ||
83 | int rc; | ||
84 | |||
85 | if (i >= mdev->dma_channels) | ||
86 | return -EINVAL; | ||
87 | |||
88 | tmp = mdev->weight[i]; | ||
89 | mdev->weight[i] = val; | ||
90 | rc = hidma_mgmt_setup(mdev); | ||
91 | if (rc) | ||
92 | mdev->weight[i] = tmp; | ||
93 | return rc; | ||
94 | } | ||
95 | |||
96 | static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = { | ||
97 | DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO), | ||
98 | DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO), | ||
99 | DECLARE_ATTRIBUTE(dma_channels, S_IRUGO), | ||
100 | DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO), | ||
101 | DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO), | ||
102 | DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO), | ||
103 | DECLARE_ATTRIBUTE(max_write_request, S_IRUGO), | ||
104 | DECLARE_ATTRIBUTE(max_read_request, S_IRUGO), | ||
105 | }; | ||
106 | |||
107 | static ssize_t show_values(struct device *dev, struct device_attribute *attr, | ||
108 | char *buf) | ||
109 | { | ||
110 | struct platform_device *pdev = to_platform_device(dev); | ||
111 | struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev); | ||
112 | unsigned int i; | ||
113 | |||
114 | buf[0] = 0; | ||
115 | |||
116 | for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { | ||
117 | if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) { | ||
118 | sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev)); | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | return strlen(buf); | ||
123 | } | ||
124 | |||
125 | static ssize_t set_values(struct device *dev, struct device_attribute *attr, | ||
126 | const char *buf, size_t count) | ||
127 | { | ||
128 | struct platform_device *pdev = to_platform_device(dev); | ||
129 | struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev); | ||
130 | unsigned long tmp; | ||
131 | unsigned int i; | ||
132 | int rc; | ||
133 | |||
134 | rc = kstrtoul(buf, 0, &tmp); | ||
135 | if (rc) | ||
136 | return rc; | ||
137 | |||
138 | for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { | ||
139 | if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) { | ||
140 | rc = hidma_mgmt_files[i].set(mdev, tmp); | ||
141 | if (rc) | ||
142 | return rc; | ||
143 | |||
144 | break; | ||
145 | } | ||
146 | } | ||
147 | return count; | ||
148 | } | ||
149 | |||
150 | static ssize_t show_values_channel(struct kobject *kobj, | ||
151 | struct kobj_attribute *attr, char *buf) | ||
152 | { | ||
153 | struct hidma_chan_attr *chattr; | ||
154 | struct hidma_mgmt_dev *mdev; | ||
155 | |||
156 | buf[0] = 0; | ||
157 | chattr = container_of(attr, struct hidma_chan_attr, attr); | ||
158 | mdev = chattr->mdev; | ||
159 | if (strcmp(attr->attr.name, "priority") == 0) | ||
160 | sprintf(buf, "%d\n", mdev->priority[chattr->index]); | ||
161 | else if (strcmp(attr->attr.name, "weight") == 0) | ||
162 | sprintf(buf, "%d\n", mdev->weight[chattr->index]); | ||
163 | |||
164 | return strlen(buf); | ||
165 | } | ||
166 | |||
167 | static ssize_t set_values_channel(struct kobject *kobj, | ||
168 | struct kobj_attribute *attr, const char *buf, | ||
169 | size_t count) | ||
170 | { | ||
171 | struct hidma_chan_attr *chattr; | ||
172 | struct hidma_mgmt_dev *mdev; | ||
173 | unsigned long tmp; | ||
174 | int rc; | ||
175 | |||
176 | chattr = container_of(attr, struct hidma_chan_attr, attr); | ||
177 | mdev = chattr->mdev; | ||
178 | |||
179 | rc = kstrtoul(buf, 0, &tmp); | ||
180 | if (rc) | ||
181 | return rc; | ||
182 | |||
183 | if (strcmp(attr->attr.name, "priority") == 0) { | ||
184 | rc = set_priority(mdev, chattr->index, tmp); | ||
185 | if (rc) | ||
186 | return rc; | ||
187 | } else if (strcmp(attr->attr.name, "weight") == 0) { | ||
188 | rc = set_weight(mdev, chattr->index, tmp); | ||
189 | if (rc) | ||
190 | return rc; | ||
191 | } | ||
192 | return count; | ||
193 | } | ||
194 | |||
195 | static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode) | ||
196 | { | ||
197 | struct device_attribute *attrs; | ||
198 | char *name_copy; | ||
199 | |||
200 | attrs = devm_kmalloc(&dev->pdev->dev, | ||
201 | sizeof(struct device_attribute), GFP_KERNEL); | ||
202 | if (!attrs) | ||
203 | return -ENOMEM; | ||
204 | |||
205 | name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL); | ||
206 | if (!name_copy) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | attrs->attr.name = name_copy; | ||
210 | attrs->attr.mode = mode; | ||
211 | attrs->show = show_values; | ||
212 | attrs->store = set_values; | ||
213 | sysfs_attr_init(&attrs->attr); | ||
214 | |||
215 | return device_create_file(&dev->pdev->dev, attrs); | ||
216 | } | ||
217 | |||
218 | static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name, | ||
219 | int mode, int index, | ||
220 | struct kobject *parent) | ||
221 | { | ||
222 | struct hidma_chan_attr *chattr; | ||
223 | char *name_copy; | ||
224 | |||
225 | chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL); | ||
226 | if (!chattr) | ||
227 | return -ENOMEM; | ||
228 | |||
229 | name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL); | ||
230 | if (!name_copy) | ||
231 | return -ENOMEM; | ||
232 | |||
233 | chattr->mdev = mdev; | ||
234 | chattr->index = index; | ||
235 | chattr->attr.attr.name = name_copy; | ||
236 | chattr->attr.attr.mode = mode; | ||
237 | chattr->attr.show = show_values_channel; | ||
238 | chattr->attr.store = set_values_channel; | ||
239 | sysfs_attr_init(&chattr->attr.attr); | ||
240 | |||
241 | return sysfs_create_file(parent, &chattr->attr.attr); | ||
242 | } | ||
243 | |||
244 | int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev) | ||
245 | { | ||
246 | unsigned int i; | ||
247 | int rc; | ||
248 | int required; | ||
249 | struct kobject *chanops; | ||
250 | |||
251 | required = sizeof(*mdev->chroots) * mdev->dma_channels; | ||
252 | mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL); | ||
253 | if (!mdev->chroots) | ||
254 | return -ENOMEM; | ||
255 | |||
256 | chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj); | ||
257 | if (!chanops) | ||
258 | return -ENOMEM; | ||
259 | |||
260 | /* create each channel directory here */ | ||
261 | for (i = 0; i < mdev->dma_channels; i++) { | ||
262 | char name[20]; | ||
263 | |||
264 | snprintf(name, sizeof(name), "chan%d", i); | ||
265 | mdev->chroots[i] = kobject_create_and_add(name, chanops); | ||
266 | if (!mdev->chroots[i]) | ||
267 | return -ENOMEM; | ||
268 | } | ||
269 | |||
270 | /* populate common parameters */ | ||
271 | for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { | ||
272 | rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name, | ||
273 | hidma_mgmt_files[i].mode); | ||
274 | if (rc) | ||
275 | return rc; | ||
276 | } | ||
277 | |||
278 | /* populate parameters that are per channel */ | ||
279 | for (i = 0; i < mdev->dma_channels; i++) { | ||
280 | rc = create_sysfs_entry_channel(mdev, "priority", | ||
281 | (S_IRUGO | S_IWUGO), i, | ||
282 | mdev->chroots[i]); | ||
283 | if (rc) | ||
284 | return rc; | ||
285 | |||
286 | rc = create_sysfs_entry_channel(mdev, "weight", | ||
287 | (S_IRUGO | S_IWUGO), i, | ||
288 | mdev->chroots[i]); | ||
289 | if (rc) | ||
290 | return rc; | ||
291 | } | ||
292 | |||
293 | return 0; | ||
294 | } | ||
295 | EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys); | ||