diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 11 | ||||
-rw-r--r-- | drivers/dma/Makefile | 4 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 1215 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 21 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 10 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 9 | ||||
-rw-r--r-- | drivers/dma/imx-dma.c | 28 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 264 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 47 | ||||
-rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 4 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 50 | ||||
-rw-r--r-- | drivers/dma/mpc512x_dma.c | 187 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 2 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 34 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 5 | ||||
-rw-r--r-- | drivers/dma/shdma.c | 131 | ||||
-rw-r--r-- | drivers/dma/shdma.h | 1 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 1437 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 302 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 66 |
21 files changed, 1965 insertions, 1865 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6ee23592700..1c28816152f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -109,7 +109,7 @@ config FSL_DMA | |||
109 | 109 | ||
110 | config MPC512X_DMA | 110 | config MPC512X_DMA |
111 | tristate "Freescale MPC512x built-in DMA engine support" | 111 | tristate "Freescale MPC512x built-in DMA engine support" |
112 | depends on PPC_MPC512x | 112 | depends on PPC_MPC512x || PPC_MPC831x |
113 | select DMA_ENGINE | 113 | select DMA_ENGINE |
114 | ---help--- | 114 | ---help--- |
115 | Enable support for the Freescale MPC512x built-in DMA engine. | 115 | Enable support for the Freescale MPC512x built-in DMA engine. |
@@ -200,11 +200,16 @@ config PL330_DMA | |||
200 | platform_data for a dma-pl330 device. | 200 | platform_data for a dma-pl330 device. |
201 | 201 | ||
202 | config PCH_DMA | 202 | config PCH_DMA |
203 | tristate "Topcliff (Intel EG20T) PCH DMA support" | 203 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support" |
204 | depends on PCI && X86 | 204 | depends on PCI && X86 |
205 | select DMA_ENGINE | 205 | select DMA_ENGINE |
206 | help | 206 | help |
207 | Enable support for the Topcliff (Intel EG20T) PCH DMA engine. | 207 | Enable support for Intel EG20T PCH DMA engine. |
208 | |||
209 | This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/ | ||
210 | Output Hub) which is for IVI(In-Vehicle Infotainment) use. | ||
211 | ML7213 is companion chip for Intel Atom E6xx series. | ||
212 | ML7213 is completely compatible for Intel EG20T PCH. | ||
208 | 213 | ||
209 | config IMX_SDMA | 214 | config IMX_SDMA |
210 | tristate "i.MX SDMA support" | 215 | tristate "i.MX SDMA support" |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a8a84f4587f..64b21f5cd74 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -1,8 +1,8 @@ | |||
1 | ifeq ($(CONFIG_DMADEVICES_DEBUG),y) | 1 | ifeq ($(CONFIG_DMADEVICES_DEBUG),y) |
2 | EXTRA_CFLAGS += -DDEBUG | 2 | ccflags-y += -DDEBUG |
3 | endif | 3 | endif |
4 | ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) | 4 | ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) |
5 | EXTRA_CFLAGS += -DVERBOSE_DEBUG | 5 | ccflags-y += -DVERBOSE_DEBUG |
6 | endif | 6 | endif |
7 | 7 | ||
8 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 8 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index b605cc9ac3a..07bca4970e5 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -19,14 +19,14 @@ | |||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 |
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
21 | * | 21 | * |
22 | * The full GNU General Public License is iin this distribution in the | 22 | * The full GNU General Public License is in this distribution in the file |
23 | * file called COPYING. | 23 | * called COPYING. |
24 | * | 24 | * |
25 | * Documentation: ARM DDI 0196G == PL080 | 25 | * Documentation: ARM DDI 0196G == PL080 |
26 | * Documentation: ARM DDI 0218E == PL081 | 26 | * Documentation: ARM DDI 0218E == PL081 |
27 | * | 27 | * |
28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to | 28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any |
29 | * any channel. | 29 | * channel. |
30 | * | 30 | * |
31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 | 31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 |
32 | * has only two channels. So on these DMA controllers the number of channels | 32 | * has only two channels. So on these DMA controllers the number of channels |
@@ -53,7 +53,23 @@ | |||
53 | * | 53 | * |
54 | * ASSUMES default (little) endianness for DMA transfers | 54 | * ASSUMES default (little) endianness for DMA transfers |
55 | * | 55 | * |
56 | * Only DMAC flow control is implemented | 56 | * The PL08x has two flow control settings: |
57 | * - DMAC flow control: the transfer size defines the number of transfers | ||
58 | * which occur for the current LLI entry, and the DMAC raises TC at the | ||
59 | * end of every LLI entry. Observed behaviour shows the DMAC listening | ||
60 | * to both the BREQ and SREQ signals (contrary to documented), | ||
61 | * transferring data if either is active. The LBREQ and LSREQ signals | ||
62 | * are ignored. | ||
63 | * | ||
64 | * - Peripheral flow control: the transfer size is ignored (and should be | ||
65 | * zero). The data is transferred from the current LLI entry, until | ||
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | ||
67 | * will then move to the next LLI entry. | ||
68 | * | ||
69 | * Only the former works sanely with scatter lists, so we only implement | ||
70 | * the DMAC flow control method. However, peripherals which use the LBREQ | ||
71 | * and LSREQ signals (eg, MMCI) are unable to use this mode, which through | ||
72 | * these hardware restrictions prevents them from using scatter DMA. | ||
57 | * | 73 | * |
58 | * Global TODO: | 74 | * Global TODO: |
59 | * - Break out common code from arch/arm/mach-s3c64xx and share | 75 | * - Break out common code from arch/arm/mach-s3c64xx and share |
@@ -61,50 +77,40 @@ | |||
61 | #include <linux/device.h> | 77 | #include <linux/device.h> |
62 | #include <linux/init.h> | 78 | #include <linux/init.h> |
63 | #include <linux/module.h> | 79 | #include <linux/module.h> |
64 | #include <linux/pci.h> | ||
65 | #include <linux/interrupt.h> | 80 | #include <linux/interrupt.h> |
66 | #include <linux/slab.h> | 81 | #include <linux/slab.h> |
82 | #include <linux/delay.h> | ||
67 | #include <linux/dmapool.h> | 83 | #include <linux/dmapool.h> |
68 | #include <linux/amba/bus.h> | ||
69 | #include <linux/dmaengine.h> | 84 | #include <linux/dmaengine.h> |
85 | #include <linux/amba/bus.h> | ||
70 | #include <linux/amba/pl08x.h> | 86 | #include <linux/amba/pl08x.h> |
71 | #include <linux/debugfs.h> | 87 | #include <linux/debugfs.h> |
72 | #include <linux/seq_file.h> | 88 | #include <linux/seq_file.h> |
73 | 89 | ||
74 | #include <asm/hardware/pl080.h> | 90 | #include <asm/hardware/pl080.h> |
75 | #include <asm/dma.h> | ||
76 | #include <asm/mach/dma.h> | ||
77 | #include <asm/atomic.h> | ||
78 | #include <asm/processor.h> | ||
79 | #include <asm/cacheflush.h> | ||
80 | 91 | ||
81 | #define DRIVER_NAME "pl08xdmac" | 92 | #define DRIVER_NAME "pl08xdmac" |
82 | 93 | ||
83 | /** | 94 | /** |
84 | * struct vendor_data - vendor-specific config parameters | 95 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives |
85 | * for PL08x derivates | ||
86 | * @name: the name of this specific variant | ||
87 | * @channels: the number of channels available in this variant | 96 | * @channels: the number of channels available in this variant |
88 | * @dualmaster: whether this version supports dual AHB masters | 97 | * @dualmaster: whether this version supports dual AHB masters or not. |
89 | * or not. | ||
90 | */ | 98 | */ |
91 | struct vendor_data { | 99 | struct vendor_data { |
92 | char *name; | ||
93 | u8 channels; | 100 | u8 channels; |
94 | bool dualmaster; | 101 | bool dualmaster; |
95 | }; | 102 | }; |
96 | 103 | ||
97 | /* | 104 | /* |
98 | * PL08X private data structures | 105 | * PL08X private data structures |
99 | * An LLI struct - see pl08x TRM | 106 | * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, |
100 | * Note that next uses bit[0] as a bus bit, | 107 | * start & end do not - their bus bit info is in cctl. Also note that these |
101 | * start & end do not - their bus bit info | 108 | * are fixed 32-bit quantities. |
102 | * is in cctl | ||
103 | */ | 109 | */ |
104 | struct lli { | 110 | struct pl08x_lli { |
105 | dma_addr_t src; | 111 | u32 src; |
106 | dma_addr_t dst; | 112 | u32 dst; |
107 | dma_addr_t next; | 113 | u32 lli; |
108 | u32 cctl; | 114 | u32 cctl; |
109 | }; | 115 | }; |
110 | 116 | ||
@@ -119,6 +125,8 @@ struct lli { | |||
119 | * @phy_chans: array of data for the physical channels | 125 | * @phy_chans: array of data for the physical channels |
120 | * @pool: a pool for the LLI descriptors | 126 | * @pool: a pool for the LLI descriptors |
121 | * @pool_ctr: counter of LLIs in the pool | 127 | * @pool_ctr: counter of LLIs in the pool |
128 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches | ||
129 | * @mem_buses: set to indicate memory transfers on AHB2. | ||
122 | * @lock: a spinlock for this struct | 130 | * @lock: a spinlock for this struct |
123 | */ | 131 | */ |
124 | struct pl08x_driver_data { | 132 | struct pl08x_driver_data { |
@@ -126,11 +134,13 @@ struct pl08x_driver_data { | |||
126 | struct dma_device memcpy; | 134 | struct dma_device memcpy; |
127 | void __iomem *base; | 135 | void __iomem *base; |
128 | struct amba_device *adev; | 136 | struct amba_device *adev; |
129 | struct vendor_data *vd; | 137 | const struct vendor_data *vd; |
130 | struct pl08x_platform_data *pd; | 138 | struct pl08x_platform_data *pd; |
131 | struct pl08x_phy_chan *phy_chans; | 139 | struct pl08x_phy_chan *phy_chans; |
132 | struct dma_pool *pool; | 140 | struct dma_pool *pool; |
133 | int pool_ctr; | 141 | int pool_ctr; |
142 | u8 lli_buses; | ||
143 | u8 mem_buses; | ||
134 | spinlock_t lock; | 144 | spinlock_t lock; |
135 | }; | 145 | }; |
136 | 146 | ||
@@ -152,9 +162,9 @@ struct pl08x_driver_data { | |||
152 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 162 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
153 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 163 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
154 | 164 | ||
155 | /* Maximimum times we call dma_pool_alloc on this pool without freeing */ | 165 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
156 | #define PL08X_MAX_ALLOCS 0x40 | 166 | #define PL08X_MAX_ALLOCS 0x40 |
157 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) | 167 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) |
158 | #define PL08X_ALIGN 8 | 168 | #define PL08X_ALIGN 8 |
159 | 169 | ||
160 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | 170 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) |
@@ -162,6 +172,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | |||
162 | return container_of(chan, struct pl08x_dma_chan, chan); | 172 | return container_of(chan, struct pl08x_dma_chan, chan); |
163 | } | 173 | } |
164 | 174 | ||
175 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) | ||
176 | { | ||
177 | return container_of(tx, struct pl08x_txd, tx); | ||
178 | } | ||
179 | |||
165 | /* | 180 | /* |
166 | * Physical channel handling | 181 | * Physical channel handling |
167 | */ | 182 | */ |
@@ -177,103 +192,63 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |||
177 | 192 | ||
178 | /* | 193 | /* |
179 | * Set the initial DMA register values i.e. those for the first LLI | 194 | * Set the initial DMA register values i.e. those for the first LLI |
180 | * The next lli pointer and the configuration interrupt bit have | 195 | * The next LLI pointer and the configuration interrupt bit have |
181 | * been set when the LLIs were constructed | 196 | * been set when the LLIs were constructed. Poke them into the hardware |
197 | * and start the transfer. | ||
182 | */ | 198 | */ |
183 | static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, | 199 | static void pl08x_start_txd(struct pl08x_dma_chan *plchan, |
184 | struct pl08x_phy_chan *ch) | 200 | struct pl08x_txd *txd) |
185 | { | 201 | { |
186 | /* Wait for channel inactive */ | 202 | struct pl08x_driver_data *pl08x = plchan->host; |
187 | while (pl08x_phy_channel_busy(ch)) | ||
188 | ; | ||
189 | |||
190 | dev_vdbg(&pl08x->adev->dev, | ||
191 | "WRITE channel %d: csrc=%08x, cdst=%08x, " | ||
192 | "cctl=%08x, clli=%08x, ccfg=%08x\n", | ||
193 | ch->id, | ||
194 | ch->csrc, | ||
195 | ch->cdst, | ||
196 | ch->cctl, | ||
197 | ch->clli, | ||
198 | ch->ccfg); | ||
199 | |||
200 | writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); | ||
201 | writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); | ||
202 | writel(ch->clli, ch->base + PL080_CH_LLI); | ||
203 | writel(ch->cctl, ch->base + PL080_CH_CONTROL); | ||
204 | writel(ch->ccfg, ch->base + PL080_CH_CONFIG); | ||
205 | } | ||
206 | |||
207 | static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) | ||
208 | { | ||
209 | struct pl08x_channel_data *cd = plchan->cd; | ||
210 | struct pl08x_phy_chan *phychan = plchan->phychan; | 203 | struct pl08x_phy_chan *phychan = plchan->phychan; |
211 | struct pl08x_txd *txd = plchan->at; | 204 | struct pl08x_lli *lli = &txd->llis_va[0]; |
212 | |||
213 | /* Copy the basic control register calculated at transfer config */ | ||
214 | phychan->csrc = txd->csrc; | ||
215 | phychan->cdst = txd->cdst; | ||
216 | phychan->clli = txd->clli; | ||
217 | phychan->cctl = txd->cctl; | ||
218 | |||
219 | /* Assign the signal to the proper control registers */ | ||
220 | phychan->ccfg = cd->ccfg; | ||
221 | phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; | ||
222 | phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; | ||
223 | /* If it wasn't set from AMBA, ignore it */ | ||
224 | if (txd->direction == DMA_TO_DEVICE) | ||
225 | /* Select signal as destination */ | ||
226 | phychan->ccfg |= | ||
227 | (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); | ||
228 | else if (txd->direction == DMA_FROM_DEVICE) | ||
229 | /* Select signal as source */ | ||
230 | phychan->ccfg |= | ||
231 | (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); | ||
232 | /* Always enable error interrupts */ | ||
233 | phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; | ||
234 | /* Always enable terminal interrupts */ | ||
235 | phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Enable the DMA channel | ||
240 | * Assumes all other configuration bits have been set | ||
241 | * as desired before this code is called | ||
242 | */ | ||
243 | static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, | ||
244 | struct pl08x_phy_chan *ch) | ||
245 | { | ||
246 | u32 val; | 205 | u32 val; |
247 | 206 | ||
248 | /* | 207 | plchan->at = txd; |
249 | * Do not access config register until channel shows as disabled | ||
250 | */ | ||
251 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) | ||
252 | ; | ||
253 | 208 | ||
254 | /* | 209 | /* Wait for channel inactive */ |
255 | * Do not access config register until channel shows as inactive | 210 | while (pl08x_phy_channel_busy(phychan)) |
256 | */ | 211 | cpu_relax(); |
257 | val = readl(ch->base + PL080_CH_CONFIG); | 212 | |
213 | dev_vdbg(&pl08x->adev->dev, | ||
214 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | ||
215 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | ||
216 | phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, | ||
217 | txd->ccfg); | ||
218 | |||
219 | writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); | ||
220 | writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); | ||
221 | writel(lli->lli, phychan->base + PL080_CH_LLI); | ||
222 | writel(lli->cctl, phychan->base + PL080_CH_CONTROL); | ||
223 | writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); | ||
224 | |||
225 | /* Enable the DMA channel */ | ||
226 | /* Do not access config register until channel shows as disabled */ | ||
227 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) | ||
228 | cpu_relax(); | ||
229 | |||
230 | /* Do not access config register until channel shows as inactive */ | ||
231 | val = readl(phychan->base + PL080_CH_CONFIG); | ||
258 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | 232 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) |
259 | val = readl(ch->base + PL080_CH_CONFIG); | 233 | val = readl(phychan->base + PL080_CH_CONFIG); |
260 | 234 | ||
261 | writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); | 235 | writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); |
262 | } | 236 | } |
263 | 237 | ||
264 | /* | 238 | /* |
265 | * Overall DMAC remains enabled always. | 239 | * Pause the channel by setting the HALT bit. |
266 | * | ||
267 | * Disabling individual channels could lose data. | ||
268 | * | 240 | * |
269 | * Disable the peripheral DMA after disabling the DMAC | 241 | * For M->P transfers, pause the DMAC first and then stop the peripheral - |
270 | * in order to allow the DMAC FIFO to drain, and | 242 | * the FIFO can only drain if the peripheral is still requesting data. |
271 | * hence allow the channel to show inactive | 243 | * (note: this can still timeout if the DMAC FIFO never drains of data.) |
272 | * | 244 | * |
245 | * For P->M transfers, disable the peripheral first to stop it filling | ||
246 | * the DMAC FIFO, and then pause the DMAC. | ||
273 | */ | 247 | */ |
274 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | 248 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) |
275 | { | 249 | { |
276 | u32 val; | 250 | u32 val; |
251 | int timeout; | ||
277 | 252 | ||
278 | /* Set the HALT bit and wait for the FIFO to drain */ | 253 | /* Set the HALT bit and wait for the FIFO to drain */ |
279 | val = readl(ch->base + PL080_CH_CONFIG); | 254 | val = readl(ch->base + PL080_CH_CONFIG); |
@@ -281,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |||
281 | writel(val, ch->base + PL080_CH_CONFIG); | 256 | writel(val, ch->base + PL080_CH_CONFIG); |
282 | 257 | ||
283 | /* Wait for channel inactive */ | 258 | /* Wait for channel inactive */ |
284 | while (pl08x_phy_channel_busy(ch)) | 259 | for (timeout = 1000; timeout; timeout--) { |
285 | ; | 260 | if (!pl08x_phy_channel_busy(ch)) |
261 | break; | ||
262 | udelay(1); | ||
263 | } | ||
264 | if (pl08x_phy_channel_busy(ch)) | ||
265 | pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); | ||
286 | } | 266 | } |
287 | 267 | ||
288 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | 268 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) |
@@ -296,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |||
296 | } | 276 | } |
297 | 277 | ||
298 | 278 | ||
299 | /* Stops the channel */ | 279 | /* |
300 | static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) | 280 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and |
281 | * clears any pending interrupt status. This should not be used for | ||
282 | * an on-going transfer, but as a method of shutting down a channel | ||
283 | * (eg, when it's no longer used) or terminating a transfer. | ||
284 | */ | ||
285 | static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, | ||
286 | struct pl08x_phy_chan *ch) | ||
301 | { | 287 | { |
302 | u32 val; | 288 | u32 val = readl(ch->base + PL080_CH_CONFIG); |
303 | 289 | ||
304 | pl08x_pause_phy_chan(ch); | 290 | val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | |
291 | PL080_CONFIG_TC_IRQ_MASK); | ||
305 | 292 | ||
306 | /* Disable channel */ | ||
307 | val = readl(ch->base + PL080_CH_CONFIG); | ||
308 | val &= ~PL080_CONFIG_ENABLE; | ||
309 | val &= ~PL080_CONFIG_ERR_IRQ_MASK; | ||
310 | val &= ~PL080_CONFIG_TC_IRQ_MASK; | ||
311 | writel(val, ch->base + PL080_CH_CONFIG); | 293 | writel(val, ch->base + PL080_CH_CONFIG); |
294 | |||
295 | writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); | ||
296 | writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); | ||
312 | } | 297 | } |
313 | 298 | ||
314 | static inline u32 get_bytes_in_cctl(u32 cctl) | 299 | static inline u32 get_bytes_in_cctl(u32 cctl) |
@@ -333,54 +318,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl) | |||
333 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | 318 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) |
334 | { | 319 | { |
335 | struct pl08x_phy_chan *ch; | 320 | struct pl08x_phy_chan *ch; |
336 | struct pl08x_txd *txdi = NULL; | ||
337 | struct pl08x_txd *txd; | 321 | struct pl08x_txd *txd; |
338 | unsigned long flags; | 322 | unsigned long flags; |
339 | u32 bytes = 0; | 323 | size_t bytes = 0; |
340 | 324 | ||
341 | spin_lock_irqsave(&plchan->lock, flags); | 325 | spin_lock_irqsave(&plchan->lock, flags); |
342 | |||
343 | ch = plchan->phychan; | 326 | ch = plchan->phychan; |
344 | txd = plchan->at; | 327 | txd = plchan->at; |
345 | 328 | ||
346 | /* | 329 | /* |
347 | * Next follow the LLIs to get the number of pending bytes in the | 330 | * Follow the LLIs to get the number of remaining |
348 | * currently active transaction. | 331 | * bytes in the currently active transaction. |
349 | */ | 332 | */ |
350 | if (ch && txd) { | 333 | if (ch && txd) { |
351 | struct lli *llis_va = txd->llis_va; | 334 | u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; |
352 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | ||
353 | u32 clli = readl(ch->base + PL080_CH_LLI); | ||
354 | 335 | ||
355 | /* First get the bytes in the current active LLI */ | 336 | /* First get the remaining bytes in the active transfer */ |
356 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | 337 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); |
357 | 338 | ||
358 | if (clli) { | 339 | if (clli) { |
359 | int i = 0; | 340 | struct pl08x_lli *llis_va = txd->llis_va; |
341 | dma_addr_t llis_bus = txd->llis_bus; | ||
342 | int index; | ||
343 | |||
344 | BUG_ON(clli < llis_bus || clli >= llis_bus + | ||
345 | sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); | ||
346 | |||
347 | /* | ||
348 | * Locate the next LLI - as this is an array, | ||
349 | * it's simple maths to find. | ||
350 | */ | ||
351 | index = (clli - llis_bus) / sizeof(struct pl08x_lli); | ||
360 | 352 | ||
361 | /* Forward to the LLI pointed to by clli */ | 353 | for (; index < MAX_NUM_TSFR_LLIS; index++) { |
362 | while ((clli != (u32) &(llis_bus[i])) && | 354 | bytes += get_bytes_in_cctl(llis_va[index].cctl); |
363 | (i < MAX_NUM_TSFR_LLIS)) | ||
364 | i++; | ||
365 | 355 | ||
366 | while (clli) { | ||
367 | bytes += get_bytes_in_cctl(llis_va[i].cctl); | ||
368 | /* | 356 | /* |
369 | * A clli of 0x00000000 will terminate the | 357 | * A LLI pointer of 0 terminates the LLI list |
370 | * LLI list | ||
371 | */ | 358 | */ |
372 | clli = llis_va[i].next; | 359 | if (!llis_va[index].lli) |
373 | i++; | 360 | break; |
374 | } | 361 | } |
375 | } | 362 | } |
376 | } | 363 | } |
377 | 364 | ||
378 | /* Sum up all queued transactions */ | 365 | /* Sum up all queued transactions */ |
379 | if (!list_empty(&plchan->desc_list)) { | 366 | if (!list_empty(&plchan->pend_list)) { |
380 | list_for_each_entry(txdi, &plchan->desc_list, node) { | 367 | struct pl08x_txd *txdi; |
368 | list_for_each_entry(txdi, &plchan->pend_list, node) { | ||
381 | bytes += txdi->len; | 369 | bytes += txdi->len; |
382 | } | 370 | } |
383 | |||
384 | } | 371 | } |
385 | 372 | ||
386 | spin_unlock_irqrestore(&plchan->lock, flags); | 373 | spin_unlock_irqrestore(&plchan->lock, flags); |
@@ -390,6 +377,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |||
390 | 377 | ||
391 | /* | 378 | /* |
392 | * Allocate a physical channel for a virtual channel | 379 | * Allocate a physical channel for a virtual channel |
380 | * | ||
381 | * Try to locate a physical channel to be used for this transfer. If all | ||
382 | * are taken return NULL and the requester will have to cope by using | ||
383 | * some fallback PIO mode or retrying later. | ||
393 | */ | 384 | */ |
394 | static struct pl08x_phy_chan * | 385 | static struct pl08x_phy_chan * |
395 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | 386 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, |
@@ -399,12 +390,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |||
399 | unsigned long flags; | 390 | unsigned long flags; |
400 | int i; | 391 | int i; |
401 | 392 | ||
402 | /* | ||
403 | * Try to locate a physical channel to be used for | ||
404 | * this transfer. If all are taken return NULL and | ||
405 | * the requester will have to cope by using some fallback | ||
406 | * PIO mode or retrying later. | ||
407 | */ | ||
408 | for (i = 0; i < pl08x->vd->channels; i++) { | 393 | for (i = 0; i < pl08x->vd->channels; i++) { |
409 | ch = &pl08x->phy_chans[i]; | 394 | ch = &pl08x->phy_chans[i]; |
410 | 395 | ||
@@ -433,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | |||
433 | { | 418 | { |
434 | unsigned long flags; | 419 | unsigned long flags; |
435 | 420 | ||
421 | spin_lock_irqsave(&ch->lock, flags); | ||
422 | |||
436 | /* Stop the channel and clear its interrupts */ | 423 | /* Stop the channel and clear its interrupts */ |
437 | pl08x_stop_phy_chan(ch); | 424 | pl08x_terminate_phy_chan(pl08x, ch); |
438 | writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); | ||
439 | writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); | ||
440 | 425 | ||
441 | /* Mark it as free */ | 426 | /* Mark it as free */ |
442 | spin_lock_irqsave(&ch->lock, flags); | ||
443 | ch->serving = NULL; | 427 | ch->serving = NULL; |
444 | spin_unlock_irqrestore(&ch->lock, flags); | 428 | spin_unlock_irqrestore(&ch->lock, flags); |
445 | } | 429 | } |
@@ -465,11 +449,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) | |||
465 | } | 449 | } |
466 | 450 | ||
467 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | 451 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, |
468 | u32 tsize) | 452 | size_t tsize) |
469 | { | 453 | { |
470 | u32 retbits = cctl; | 454 | u32 retbits = cctl; |
471 | 455 | ||
472 | /* Remove all src, dst and transfersize bits */ | 456 | /* Remove all src, dst and transfer size bits */ |
473 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; | 457 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; |
474 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; | 458 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; |
475 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; | 459 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; |
@@ -509,95 +493,87 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |||
509 | return retbits; | 493 | return retbits; |
510 | } | 494 | } |
511 | 495 | ||
496 | struct pl08x_lli_build_data { | ||
497 | struct pl08x_txd *txd; | ||
498 | struct pl08x_driver_data *pl08x; | ||
499 | struct pl08x_bus_data srcbus; | ||
500 | struct pl08x_bus_data dstbus; | ||
501 | size_t remainder; | ||
502 | }; | ||
503 | |||
512 | /* | 504 | /* |
513 | * Autoselect a master bus to use for the transfer | 505 | * Autoselect a master bus to use for the transfer this prefers the |
514 | * this prefers the destination bus if both available | 506 | * destination bus if both available if fixed address on one bus the |
515 | * if fixed address on one bus the other will be chosen | 507 | * other will be chosen |
516 | */ | 508 | */ |
517 | void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, | 509 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, |
518 | struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, | 510 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) |
519 | struct pl08x_bus_data **sbus, u32 cctl) | ||
520 | { | 511 | { |
521 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | 512 | if (!(cctl & PL080_CONTROL_DST_INCR)) { |
522 | *mbus = src_bus; | 513 | *mbus = &bd->srcbus; |
523 | *sbus = dst_bus; | 514 | *sbus = &bd->dstbus; |
524 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | 515 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { |
525 | *mbus = dst_bus; | 516 | *mbus = &bd->dstbus; |
526 | *sbus = src_bus; | 517 | *sbus = &bd->srcbus; |
527 | } else { | 518 | } else { |
528 | if (dst_bus->buswidth == 4) { | 519 | if (bd->dstbus.buswidth == 4) { |
529 | *mbus = dst_bus; | 520 | *mbus = &bd->dstbus; |
530 | *sbus = src_bus; | 521 | *sbus = &bd->srcbus; |
531 | } else if (src_bus->buswidth == 4) { | 522 | } else if (bd->srcbus.buswidth == 4) { |
532 | *mbus = src_bus; | 523 | *mbus = &bd->srcbus; |
533 | *sbus = dst_bus; | 524 | *sbus = &bd->dstbus; |
534 | } else if (dst_bus->buswidth == 2) { | 525 | } else if (bd->dstbus.buswidth == 2) { |
535 | *mbus = dst_bus; | 526 | *mbus = &bd->dstbus; |
536 | *sbus = src_bus; | 527 | *sbus = &bd->srcbus; |
537 | } else if (src_bus->buswidth == 2) { | 528 | } else if (bd->srcbus.buswidth == 2) { |
538 | *mbus = src_bus; | 529 | *mbus = &bd->srcbus; |
539 | *sbus = dst_bus; | 530 | *sbus = &bd->dstbus; |
540 | } else { | 531 | } else { |
541 | /* src_bus->buswidth == 1 */ | 532 | /* bd->srcbus.buswidth == 1 */ |
542 | *mbus = dst_bus; | 533 | *mbus = &bd->dstbus; |
543 | *sbus = src_bus; | 534 | *sbus = &bd->srcbus; |
544 | } | 535 | } |
545 | } | 536 | } |
546 | } | 537 | } |
547 | 538 | ||
548 | /* | 539 | /* |
549 | * Fills in one LLI for a certain transfer descriptor | 540 | * Fills in one LLI for a certain transfer descriptor and advance the counter |
550 | * and advance the counter | ||
551 | */ | 541 | */ |
552 | int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, | 542 | static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, |
553 | struct pl08x_txd *txd, int num_llis, int len, | 543 | int num_llis, int len, u32 cctl) |
554 | u32 cctl, u32 *remainder) | ||
555 | { | 544 | { |
556 | struct lli *llis_va = txd->llis_va; | 545 | struct pl08x_lli *llis_va = bd->txd->llis_va; |
557 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | 546 | dma_addr_t llis_bus = bd->txd->llis_bus; |
558 | 547 | ||
559 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | 548 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); |
560 | 549 | ||
561 | llis_va[num_llis].cctl = cctl; | 550 | llis_va[num_llis].cctl = cctl; |
562 | llis_va[num_llis].src = txd->srcbus.addr; | 551 | llis_va[num_llis].src = bd->srcbus.addr; |
563 | llis_va[num_llis].dst = txd->dstbus.addr; | 552 | llis_va[num_llis].dst = bd->dstbus.addr; |
564 | 553 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | |
565 | /* | 554 | if (bd->pl08x->lli_buses & PL08X_AHB2) |
566 | * On versions with dual masters, you can optionally AND on | 555 | llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; |
567 | * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read | ||
568 | * in new LLIs with that controller, but we always try to | ||
569 | * choose AHB1 to point into memory. The idea is to have AHB2 | ||
570 | * fixed on the peripheral and AHB1 messing around in the | ||
571 | * memory. So we don't manipulate this bit currently. | ||
572 | */ | ||
573 | |||
574 | llis_va[num_llis].next = | ||
575 | (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); | ||
576 | 556 | ||
577 | if (cctl & PL080_CONTROL_SRC_INCR) | 557 | if (cctl & PL080_CONTROL_SRC_INCR) |
578 | txd->srcbus.addr += len; | 558 | bd->srcbus.addr += len; |
579 | if (cctl & PL080_CONTROL_DST_INCR) | 559 | if (cctl & PL080_CONTROL_DST_INCR) |
580 | txd->dstbus.addr += len; | 560 | bd->dstbus.addr += len; |
581 | 561 | ||
582 | *remainder -= len; | 562 | BUG_ON(bd->remainder < len); |
583 | 563 | ||
584 | return num_llis + 1; | 564 | bd->remainder -= len; |
585 | } | 565 | } |
586 | 566 | ||
587 | /* | 567 | /* |
588 | * Return number of bytes to fill to boundary, or len | 568 | * Return number of bytes to fill to boundary, or len. |
569 | * This calculation works for any value of addr. | ||
589 | */ | 570 | */ |
590 | static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | 571 | static inline size_t pl08x_pre_boundary(u32 addr, size_t len) |
591 | { | 572 | { |
592 | u32 boundary; | 573 | size_t boundary_len = PL08X_BOUNDARY_SIZE - |
574 | (addr & (PL08X_BOUNDARY_SIZE - 1)); | ||
593 | 575 | ||
594 | boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) | 576 | return min(boundary_len, len); |
595 | << PL08X_BOUNDARY_SHIFT; | ||
596 | |||
597 | if (boundary < addr + len) | ||
598 | return boundary - addr; | ||
599 | else | ||
600 | return len; | ||
601 | } | 577 | } |
602 | 578 | ||
603 | /* | 579 | /* |
@@ -608,20 +584,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | |||
608 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | 584 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, |
609 | struct pl08x_txd *txd) | 585 | struct pl08x_txd *txd) |
610 | { | 586 | { |
611 | struct pl08x_channel_data *cd = txd->cd; | ||
612 | struct pl08x_bus_data *mbus, *sbus; | 587 | struct pl08x_bus_data *mbus, *sbus; |
613 | u32 remainder; | 588 | struct pl08x_lli_build_data bd; |
614 | int num_llis = 0; | 589 | int num_llis = 0; |
615 | u32 cctl; | 590 | u32 cctl; |
616 | int max_bytes_per_lli; | 591 | size_t max_bytes_per_lli; |
617 | int total_bytes = 0; | 592 | size_t total_bytes = 0; |
618 | struct lli *llis_va; | 593 | struct pl08x_lli *llis_va; |
619 | struct lli *llis_bus; | ||
620 | |||
621 | if (!txd) { | ||
622 | dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); | ||
623 | return 0; | ||
624 | } | ||
625 | 594 | ||
626 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | 595 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, |
627 | &txd->llis_bus); | 596 | &txd->llis_bus); |
@@ -632,121 +601,79 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
632 | 601 | ||
633 | pl08x->pool_ctr++; | 602 | pl08x->pool_ctr++; |
634 | 603 | ||
635 | /* | 604 | /* Get the default CCTL */ |
636 | * Initialize bus values for this transfer | 605 | cctl = txd->cctl; |
637 | * from the passed optimal values | ||
638 | */ | ||
639 | if (!cd) { | ||
640 | dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | /* Get the default CCTL from the platform data */ | ||
645 | cctl = cd->cctl; | ||
646 | 606 | ||
647 | /* | 607 | bd.txd = txd; |
648 | * On the PL080 we have two bus masters and we | 608 | bd.pl08x = pl08x; |
649 | * should select one for source and one for | 609 | bd.srcbus.addr = txd->src_addr; |
650 | * destination. We try to use AHB2 for the | 610 | bd.dstbus.addr = txd->dst_addr; |
651 | * bus which does not increment (typically the | ||
652 | * peripheral) else we just choose something. | ||
653 | */ | ||
654 | cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
655 | if (pl08x->vd->dualmaster) { | ||
656 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
657 | /* Source increments, use AHB2 for destination */ | ||
658 | cctl |= PL080_CONTROL_DST_AHB2; | ||
659 | else if (cctl & PL080_CONTROL_DST_INCR) | ||
660 | /* Destination increments, use AHB2 for source */ | ||
661 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
662 | else | ||
663 | /* Just pick something, source AHB1 dest AHB2 */ | ||
664 | cctl |= PL080_CONTROL_DST_AHB2; | ||
665 | } | ||
666 | 611 | ||
667 | /* Find maximum width of the source bus */ | 612 | /* Find maximum width of the source bus */ |
668 | txd->srcbus.maxwidth = | 613 | bd.srcbus.maxwidth = |
669 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> | 614 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> |
670 | PL080_CONTROL_SWIDTH_SHIFT); | 615 | PL080_CONTROL_SWIDTH_SHIFT); |
671 | 616 | ||
672 | /* Find maximum width of the destination bus */ | 617 | /* Find maximum width of the destination bus */ |
673 | txd->dstbus.maxwidth = | 618 | bd.dstbus.maxwidth = |
674 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | 619 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> |
675 | PL080_CONTROL_DWIDTH_SHIFT); | 620 | PL080_CONTROL_DWIDTH_SHIFT); |
676 | 621 | ||
677 | /* Set up the bus widths to the maximum */ | 622 | /* Set up the bus widths to the maximum */ |
678 | txd->srcbus.buswidth = txd->srcbus.maxwidth; | 623 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
679 | txd->dstbus.buswidth = txd->dstbus.maxwidth; | 624 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
680 | dev_vdbg(&pl08x->adev->dev, | 625 | dev_vdbg(&pl08x->adev->dev, |
681 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | 626 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", |
682 | __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); | 627 | __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); |
683 | 628 | ||
684 | 629 | ||
685 | /* | 630 | /* |
686 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | 631 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) |
687 | */ | 632 | */ |
688 | max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * | 633 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * |
689 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 634 | PL080_CONTROL_TRANSFER_SIZE_MASK; |
690 | dev_vdbg(&pl08x->adev->dev, | 635 | dev_vdbg(&pl08x->adev->dev, |
691 | "%s max bytes per lli = %d\n", | 636 | "%s max bytes per lli = %zu\n", |
692 | __func__, max_bytes_per_lli); | 637 | __func__, max_bytes_per_lli); |
693 | 638 | ||
694 | /* We need to count this down to zero */ | 639 | /* We need to count this down to zero */ |
695 | remainder = txd->len; | 640 | bd.remainder = txd->len; |
696 | dev_vdbg(&pl08x->adev->dev, | 641 | dev_vdbg(&pl08x->adev->dev, |
697 | "%s remainder = %d\n", | 642 | "%s remainder = %zu\n", |
698 | __func__, remainder); | 643 | __func__, bd.remainder); |
699 | 644 | ||
700 | /* | 645 | /* |
701 | * Choose bus to align to | 646 | * Choose bus to align to |
702 | * - prefers destination bus if both available | 647 | * - prefers destination bus if both available |
703 | * - if fixed address on one bus chooses other | 648 | * - if fixed address on one bus chooses other |
704 | * - modifies cctl to choose an apropriate master | ||
705 | */ | ||
706 | pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, | ||
707 | &mbus, &sbus, cctl); | ||
708 | |||
709 | |||
710 | /* | ||
711 | * The lowest bit of the LLI register | ||
712 | * is also used to indicate which master to | ||
713 | * use for reading the LLIs. | ||
714 | */ | 649 | */ |
650 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | ||
715 | 651 | ||
716 | if (txd->len < mbus->buswidth) { | 652 | if (txd->len < mbus->buswidth) { |
717 | /* | 653 | /* Less than a bus width available - send as single bytes */ |
718 | * Less than a bus width available | 654 | while (bd.remainder) { |
719 | * - send as single bytes | ||
720 | */ | ||
721 | while (remainder) { | ||
722 | dev_vdbg(&pl08x->adev->dev, | 655 | dev_vdbg(&pl08x->adev->dev, |
723 | "%s single byte LLIs for a transfer of " | 656 | "%s single byte LLIs for a transfer of " |
724 | "less than a bus width (remain %08x)\n", | 657 | "less than a bus width (remain 0x%08x)\n", |
725 | __func__, remainder); | 658 | __func__, bd.remainder); |
726 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 659 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
727 | num_llis = | 660 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
728 | pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, | ||
729 | cctl, &remainder); | ||
730 | total_bytes++; | 661 | total_bytes++; |
731 | } | 662 | } |
732 | } else { | 663 | } else { |
733 | /* | 664 | /* Make one byte LLIs until master bus is aligned */ |
734 | * Make one byte LLIs until master bus is aligned | ||
735 | * - slave will then be aligned also | ||
736 | */ | ||
737 | while ((mbus->addr) % (mbus->buswidth)) { | 665 | while ((mbus->addr) % (mbus->buswidth)) { |
738 | dev_vdbg(&pl08x->adev->dev, | 666 | dev_vdbg(&pl08x->adev->dev, |
739 | "%s adjustment lli for less than bus width " | 667 | "%s adjustment lli for less than bus width " |
740 | "(remain %08x)\n", | 668 | "(remain 0x%08x)\n", |
741 | __func__, remainder); | 669 | __func__, bd.remainder); |
742 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 670 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
743 | num_llis = pl08x_fill_lli_for_desc | 671 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
744 | (pl08x, txd, num_llis, 1, cctl, &remainder); | ||
745 | total_bytes++; | 672 | total_bytes++; |
746 | } | 673 | } |
747 | 674 | ||
748 | /* | 675 | /* |
749 | * Master now aligned | 676 | * Master now aligned |
750 | * - if slave is not then we must set its width down | 677 | * - if slave is not then we must set its width down |
751 | */ | 678 | */ |
752 | if (sbus->addr % sbus->buswidth) { | 679 | if (sbus->addr % sbus->buswidth) { |
@@ -761,63 +688,51 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
761 | * Make largest possible LLIs until less than one bus | 688 | * Make largest possible LLIs until less than one bus |
762 | * width left | 689 | * width left |
763 | */ | 690 | */ |
764 | while (remainder > (mbus->buswidth - 1)) { | 691 | while (bd.remainder > (mbus->buswidth - 1)) { |
765 | int lli_len, target_len; | 692 | size_t lli_len, target_len, tsize, odd_bytes; |
766 | int tsize; | ||
767 | int odd_bytes; | ||
768 | 693 | ||
769 | /* | 694 | /* |
770 | * If enough left try to send max possible, | 695 | * If enough left try to send max possible, |
771 | * otherwise try to send the remainder | 696 | * otherwise try to send the remainder |
772 | */ | 697 | */ |
773 | target_len = remainder; | 698 | target_len = min(bd.remainder, max_bytes_per_lli); |
774 | if (remainder > max_bytes_per_lli) | ||
775 | target_len = max_bytes_per_lli; | ||
776 | 699 | ||
777 | /* | 700 | /* |
778 | * Set bus lengths for incrementing busses | 701 | * Set bus lengths for incrementing buses to the |
779 | * to number of bytes which fill to next memory | 702 | * number of bytes which fill to next memory boundary, |
780 | * boundary | 703 | * limiting on the target length calculated above. |
781 | */ | 704 | */ |
782 | if (cctl & PL080_CONTROL_SRC_INCR) | 705 | if (cctl & PL080_CONTROL_SRC_INCR) |
783 | txd->srcbus.fill_bytes = | 706 | bd.srcbus.fill_bytes = |
784 | pl08x_pre_boundary( | 707 | pl08x_pre_boundary(bd.srcbus.addr, |
785 | txd->srcbus.addr, | 708 | target_len); |
786 | remainder); | ||
787 | else | 709 | else |
788 | txd->srcbus.fill_bytes = | 710 | bd.srcbus.fill_bytes = target_len; |
789 | max_bytes_per_lli; | ||
790 | 711 | ||
791 | if (cctl & PL080_CONTROL_DST_INCR) | 712 | if (cctl & PL080_CONTROL_DST_INCR) |
792 | txd->dstbus.fill_bytes = | 713 | bd.dstbus.fill_bytes = |
793 | pl08x_pre_boundary( | 714 | pl08x_pre_boundary(bd.dstbus.addr, |
794 | txd->dstbus.addr, | 715 | target_len); |
795 | remainder); | ||
796 | else | 716 | else |
797 | txd->dstbus.fill_bytes = | 717 | bd.dstbus.fill_bytes = target_len; |
798 | max_bytes_per_lli; | ||
799 | 718 | ||
800 | /* | 719 | /* Find the nearest */ |
801 | * Find the nearest | 720 | lli_len = min(bd.srcbus.fill_bytes, |
802 | */ | 721 | bd.dstbus.fill_bytes); |
803 | lli_len = min(txd->srcbus.fill_bytes, | ||
804 | txd->dstbus.fill_bytes); | ||
805 | 722 | ||
806 | BUG_ON(lli_len > remainder); | 723 | BUG_ON(lli_len > bd.remainder); |
807 | 724 | ||
808 | if (lli_len <= 0) { | 725 | if (lli_len <= 0) { |
809 | dev_err(&pl08x->adev->dev, | 726 | dev_err(&pl08x->adev->dev, |
810 | "%s lli_len is %d, <= 0\n", | 727 | "%s lli_len is %zu, <= 0\n", |
811 | __func__, lli_len); | 728 | __func__, lli_len); |
812 | return 0; | 729 | return 0; |
813 | } | 730 | } |
814 | 731 | ||
815 | if (lli_len == target_len) { | 732 | if (lli_len == target_len) { |
816 | /* | 733 | /* |
817 | * Can send what we wanted | 734 | * Can send what we wanted. |
818 | */ | 735 | * Maintain alignment |
819 | /* | ||
820 | * Maintain alignment | ||
821 | */ | 736 | */ |
822 | lli_len = (lli_len/mbus->buswidth) * | 737 | lli_len = (lli_len/mbus->buswidth) * |
823 | mbus->buswidth; | 738 | mbus->buswidth; |
@@ -825,17 +740,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
825 | } else { | 740 | } else { |
826 | /* | 741 | /* |
827 | * So now we know how many bytes to transfer | 742 | * So now we know how many bytes to transfer |
828 | * to get to the nearest boundary | 743 | * to get to the nearest boundary. The next |
829 | * The next lli will past the boundary | 744 | * LLI will past the boundary. However, we |
830 | * - however we may be working to a boundary | 745 | * may be working to a boundary on the slave |
831 | * on the slave bus | 746 | * bus. We need to ensure the master stays |
832 | * We need to ensure the master stays aligned | 747 | * aligned, and that we are working in |
748 | * multiples of the bus widths. | ||
833 | */ | 749 | */ |
834 | odd_bytes = lli_len % mbus->buswidth; | 750 | odd_bytes = lli_len % mbus->buswidth; |
835 | /* | ||
836 | * - and that we are working in multiples | ||
837 | * of the bus widths | ||
838 | */ | ||
839 | lli_len -= odd_bytes; | 751 | lli_len -= odd_bytes; |
840 | 752 | ||
841 | } | 753 | } |
@@ -855,41 +767,38 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
855 | 767 | ||
856 | if (target_len != lli_len) { | 768 | if (target_len != lli_len) { |
857 | dev_vdbg(&pl08x->adev->dev, | 769 | dev_vdbg(&pl08x->adev->dev, |
858 | "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", | 770 | "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", |
859 | __func__, target_len, lli_len, txd->len); | 771 | __func__, target_len, lli_len, txd->len); |
860 | } | 772 | } |
861 | 773 | ||
862 | cctl = pl08x_cctl_bits(cctl, | 774 | cctl = pl08x_cctl_bits(cctl, |
863 | txd->srcbus.buswidth, | 775 | bd.srcbus.buswidth, |
864 | txd->dstbus.buswidth, | 776 | bd.dstbus.buswidth, |
865 | tsize); | 777 | tsize); |
866 | 778 | ||
867 | dev_vdbg(&pl08x->adev->dev, | 779 | dev_vdbg(&pl08x->adev->dev, |
868 | "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", | 780 | "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", |
869 | __func__, lli_len, remainder); | 781 | __func__, lli_len, bd.remainder); |
870 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, | 782 | pl08x_fill_lli_for_desc(&bd, num_llis++, |
871 | num_llis, lli_len, cctl, | 783 | lli_len, cctl); |
872 | &remainder); | ||
873 | total_bytes += lli_len; | 784 | total_bytes += lli_len; |
874 | } | 785 | } |
875 | 786 | ||
876 | 787 | ||
877 | if (odd_bytes) { | 788 | if (odd_bytes) { |
878 | /* | 789 | /* |
879 | * Creep past the boundary, | 790 | * Creep past the boundary, maintaining |
880 | * maintaining master alignment | 791 | * master alignment |
881 | */ | 792 | */ |
882 | int j; | 793 | int j; |
883 | for (j = 0; (j < mbus->buswidth) | 794 | for (j = 0; (j < mbus->buswidth) |
884 | && (remainder); j++) { | 795 | && (bd.remainder); j++) { |
885 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 796 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
886 | dev_vdbg(&pl08x->adev->dev, | 797 | dev_vdbg(&pl08x->adev->dev, |
887 | "%s align with boundardy, single byte (remain %08x)\n", | 798 | "%s align with boundary, single byte (remain 0x%08zx)\n", |
888 | __func__, remainder); | 799 | __func__, bd.remainder); |
889 | num_llis = | 800 | pl08x_fill_lli_for_desc(&bd, |
890 | pl08x_fill_lli_for_desc(pl08x, | 801 | num_llis++, 1, cctl); |
891 | txd, num_llis, 1, | ||
892 | cctl, &remainder); | ||
893 | total_bytes++; | 802 | total_bytes++; |
894 | } | 803 | } |
895 | } | 804 | } |
@@ -898,25 +807,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
898 | /* | 807 | /* |
899 | * Send any odd bytes | 808 | * Send any odd bytes |
900 | */ | 809 | */ |
901 | if (remainder < 0) { | 810 | while (bd.remainder) { |
902 | dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", | ||
903 | __func__, remainder); | ||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | while (remainder) { | ||
908 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 811 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
909 | dev_vdbg(&pl08x->adev->dev, | 812 | dev_vdbg(&pl08x->adev->dev, |
910 | "%s align with boundardy, single odd byte (remain %d)\n", | 813 | "%s align with boundary, single odd byte (remain %zu)\n", |
911 | __func__, remainder); | 814 | __func__, bd.remainder); |
912 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, | 815 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
913 | 1, cctl, &remainder); | ||
914 | total_bytes++; | 816 | total_bytes++; |
915 | } | 817 | } |
916 | } | 818 | } |
917 | if (total_bytes != txd->len) { | 819 | if (total_bytes != txd->len) { |
918 | dev_err(&pl08x->adev->dev, | 820 | dev_err(&pl08x->adev->dev, |
919 | "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", | 821 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", |
920 | __func__, total_bytes, txd->len); | 822 | __func__, total_bytes, txd->len); |
921 | return 0; | 823 | return 0; |
922 | } | 824 | } |
@@ -927,41 +829,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
927 | __func__, (u32) MAX_NUM_TSFR_LLIS); | 829 | __func__, (u32) MAX_NUM_TSFR_LLIS); |
928 | return 0; | 830 | return 0; |
929 | } | 831 | } |
930 | /* | ||
931 | * Decide whether this is a loop or a terminated transfer | ||
932 | */ | ||
933 | llis_va = txd->llis_va; | ||
934 | llis_bus = (struct lli *) txd->llis_bus; | ||
935 | |||
936 | if (cd->circular_buffer) { | ||
937 | /* | ||
938 | * Loop the circular buffer so that the next element | ||
939 | * points back to the beginning of the LLI. | ||
940 | */ | ||
941 | llis_va[num_llis - 1].next = | ||
942 | (dma_addr_t)((unsigned int)&(llis_bus[0])); | ||
943 | } else { | ||
944 | /* | ||
945 | * On non-circular buffers, the final LLI terminates | ||
946 | * the LLI. | ||
947 | */ | ||
948 | llis_va[num_llis - 1].next = 0; | ||
949 | /* | ||
950 | * The final LLI element shall also fire an interrupt | ||
951 | */ | ||
952 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
953 | } | ||
954 | |||
955 | /* Now store the channel register values */ | ||
956 | txd->csrc = llis_va[0].src; | ||
957 | txd->cdst = llis_va[0].dst; | ||
958 | if (num_llis > 1) | ||
959 | txd->clli = llis_va[0].next; | ||
960 | else | ||
961 | txd->clli = 0; | ||
962 | 832 | ||
963 | txd->cctl = llis_va[0].cctl; | 833 | llis_va = txd->llis_va; |
964 | /* ccfg will be set at physical channel allocation time */ | 834 | /* The final LLI terminates the LLI. */ |
835 | llis_va[num_llis - 1].lli = 0; | ||
836 | /* The final LLI element shall also fire an interrupt. */ | ||
837 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
965 | 838 | ||
966 | #ifdef VERBOSE_DEBUG | 839 | #ifdef VERBOSE_DEBUG |
967 | { | 840 | { |
@@ -969,13 +842,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
969 | 842 | ||
970 | for (i = 0; i < num_llis; i++) { | 843 | for (i = 0; i < num_llis; i++) { |
971 | dev_vdbg(&pl08x->adev->dev, | 844 | dev_vdbg(&pl08x->adev->dev, |
972 | "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", | 845 | "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", |
973 | i, | 846 | i, |
974 | &llis_va[i], | 847 | &llis_va[i], |
975 | llis_va[i].src, | 848 | llis_va[i].src, |
976 | llis_va[i].dst, | 849 | llis_va[i].dst, |
977 | llis_va[i].cctl, | 850 | llis_va[i].cctl, |
978 | llis_va[i].next | 851 | llis_va[i].lli |
979 | ); | 852 | ); |
980 | } | 853 | } |
981 | } | 854 | } |
@@ -988,14 +861,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
988 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | 861 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, |
989 | struct pl08x_txd *txd) | 862 | struct pl08x_txd *txd) |
990 | { | 863 | { |
991 | if (!txd) | ||
992 | dev_err(&pl08x->adev->dev, | ||
993 | "%s no descriptor to free\n", | ||
994 | __func__); | ||
995 | |||
996 | /* Free the LLI */ | 864 | /* Free the LLI */ |
997 | dma_pool_free(pl08x->pool, txd->llis_va, | 865 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); |
998 | txd->llis_bus); | ||
999 | 866 | ||
1000 | pl08x->pool_ctr--; | 867 | pl08x->pool_ctr--; |
1001 | 868 | ||
@@ -1008,13 +875,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |||
1008 | struct pl08x_txd *txdi = NULL; | 875 | struct pl08x_txd *txdi = NULL; |
1009 | struct pl08x_txd *next; | 876 | struct pl08x_txd *next; |
1010 | 877 | ||
1011 | if (!list_empty(&plchan->desc_list)) { | 878 | if (!list_empty(&plchan->pend_list)) { |
1012 | list_for_each_entry_safe(txdi, | 879 | list_for_each_entry_safe(txdi, |
1013 | next, &plchan->desc_list, node) { | 880 | next, &plchan->pend_list, node) { |
1014 | list_del(&txdi->node); | 881 | list_del(&txdi->node); |
1015 | pl08x_free_txd(pl08x, txdi); | 882 | pl08x_free_txd(pl08x, txdi); |
1016 | } | 883 | } |
1017 | |||
1018 | } | 884 | } |
1019 | } | 885 | } |
1020 | 886 | ||
@@ -1069,6 +935,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
1069 | return -EBUSY; | 935 | return -EBUSY; |
1070 | } | 936 | } |
1071 | ch->signal = ret; | 937 | ch->signal = ret; |
938 | |||
939 | /* Assign the flow control signal to this channel */ | ||
940 | if (txd->direction == DMA_TO_DEVICE) | ||
941 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
942 | else if (txd->direction == DMA_FROM_DEVICE) | ||
943 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
1072 | } | 944 | } |
1073 | 945 | ||
1074 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | 946 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", |
@@ -1076,19 +948,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
1076 | ch->signal, | 948 | ch->signal, |
1077 | plchan->name); | 949 | plchan->name); |
1078 | 950 | ||
951 | plchan->phychan_hold++; | ||
1079 | plchan->phychan = ch; | 952 | plchan->phychan = ch; |
1080 | 953 | ||
1081 | return 0; | 954 | return 0; |
1082 | } | 955 | } |
1083 | 956 | ||
957 | static void release_phy_channel(struct pl08x_dma_chan *plchan) | ||
958 | { | ||
959 | struct pl08x_driver_data *pl08x = plchan->host; | ||
960 | |||
961 | if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { | ||
962 | pl08x->pd->put_signal(plchan); | ||
963 | plchan->phychan->signal = -1; | ||
964 | } | ||
965 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
966 | plchan->phychan = NULL; | ||
967 | } | ||
968 | |||
1084 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | 969 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) |
1085 | { | 970 | { |
1086 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | 971 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); |
972 | struct pl08x_txd *txd = to_pl08x_txd(tx); | ||
973 | unsigned long flags; | ||
974 | |||
975 | spin_lock_irqsave(&plchan->lock, flags); | ||
976 | |||
977 | plchan->chan.cookie += 1; | ||
978 | if (plchan->chan.cookie < 0) | ||
979 | plchan->chan.cookie = 1; | ||
980 | tx->cookie = plchan->chan.cookie; | ||
981 | |||
982 | /* Put this onto the pending list */ | ||
983 | list_add_tail(&txd->node, &plchan->pend_list); | ||
984 | |||
985 | /* | ||
986 | * If there was no physical channel available for this memcpy, | ||
987 | * stack the request up and indicate that the channel is waiting | ||
988 | * for a free physical channel. | ||
989 | */ | ||
990 | if (!plchan->slave && !plchan->phychan) { | ||
991 | /* Do this memcpy whenever there is a channel ready */ | ||
992 | plchan->state = PL08X_CHAN_WAITING; | ||
993 | plchan->waiting = txd; | ||
994 | } else { | ||
995 | plchan->phychan_hold--; | ||
996 | } | ||
1087 | 997 | ||
1088 | atomic_inc(&plchan->last_issued); | 998 | spin_unlock_irqrestore(&plchan->lock, flags); |
1089 | tx->cookie = atomic_read(&plchan->last_issued); | ||
1090 | /* This unlock follows the lock in the prep() function */ | ||
1091 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | ||
1092 | 999 | ||
1093 | return tx->cookie; | 1000 | return tx->cookie; |
1094 | } | 1001 | } |
@@ -1102,10 +1009,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | |||
1102 | } | 1009 | } |
1103 | 1010 | ||
1104 | /* | 1011 | /* |
1105 | * Code accessing dma_async_is_complete() in a tight loop | 1012 | * Code accessing dma_async_is_complete() in a tight loop may give problems. |
1106 | * may give problems - could schedule where indicated. | 1013 | * If slaves are relying on interrupts to signal completion this function |
1107 | * If slaves are relying on interrupts to signal completion this | 1014 | * must not be called with interrupts disabled. |
1108 | * function must not be called with interrupts disabled | ||
1109 | */ | 1015 | */ |
1110 | static enum dma_status | 1016 | static enum dma_status |
1111 | pl08x_dma_tx_status(struct dma_chan *chan, | 1017 | pl08x_dma_tx_status(struct dma_chan *chan, |
@@ -1118,7 +1024,7 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1118 | enum dma_status ret; | 1024 | enum dma_status ret; |
1119 | u32 bytesleft = 0; | 1025 | u32 bytesleft = 0; |
1120 | 1026 | ||
1121 | last_used = atomic_read(&plchan->last_issued); | 1027 | last_used = plchan->chan.cookie; |
1122 | last_complete = plchan->lc; | 1028 | last_complete = plchan->lc; |
1123 | 1029 | ||
1124 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 1030 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
@@ -1128,13 +1034,9 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1128 | } | 1034 | } |
1129 | 1035 | ||
1130 | /* | 1036 | /* |
1131 | * schedule(); could be inserted here | ||
1132 | */ | ||
1133 | |||
1134 | /* | ||
1135 | * This cookie not complete yet | 1037 | * This cookie not complete yet |
1136 | */ | 1038 | */ |
1137 | last_used = atomic_read(&plchan->last_issued); | 1039 | last_used = plchan->chan.cookie; |
1138 | last_complete = plchan->lc; | 1040 | last_complete = plchan->lc; |
1139 | 1041 | ||
1140 | /* Get number of bytes left in the active transactions and queue */ | 1042 | /* Get number of bytes left in the active transactions and queue */ |
@@ -1199,37 +1101,35 @@ static const struct burst_table burst_sizes[] = { | |||
1199 | }, | 1101 | }, |
1200 | }; | 1102 | }; |
1201 | 1103 | ||
1202 | static void dma_set_runtime_config(struct dma_chan *chan, | 1104 | static int dma_set_runtime_config(struct dma_chan *chan, |
1203 | struct dma_slave_config *config) | 1105 | struct dma_slave_config *config) |
1204 | { | 1106 | { |
1205 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1107 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1206 | struct pl08x_driver_data *pl08x = plchan->host; | 1108 | struct pl08x_driver_data *pl08x = plchan->host; |
1207 | struct pl08x_channel_data *cd = plchan->cd; | 1109 | struct pl08x_channel_data *cd = plchan->cd; |
1208 | enum dma_slave_buswidth addr_width; | 1110 | enum dma_slave_buswidth addr_width; |
1111 | dma_addr_t addr; | ||
1209 | u32 maxburst; | 1112 | u32 maxburst; |
1210 | u32 cctl = 0; | 1113 | u32 cctl = 0; |
1211 | /* Mask out all except src and dst channel */ | 1114 | int i; |
1212 | u32 ccfg = cd->ccfg & 0x000003DEU; | 1115 | |
1213 | int i = 0; | 1116 | if (!plchan->slave) |
1117 | return -EINVAL; | ||
1214 | 1118 | ||
1215 | /* Transfer direction */ | 1119 | /* Transfer direction */ |
1216 | plchan->runtime_direction = config->direction; | 1120 | plchan->runtime_direction = config->direction; |
1217 | if (config->direction == DMA_TO_DEVICE) { | 1121 | if (config->direction == DMA_TO_DEVICE) { |
1218 | plchan->runtime_addr = config->dst_addr; | 1122 | addr = config->dst_addr; |
1219 | cctl |= PL080_CONTROL_SRC_INCR; | ||
1220 | ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1221 | addr_width = config->dst_addr_width; | 1123 | addr_width = config->dst_addr_width; |
1222 | maxburst = config->dst_maxburst; | 1124 | maxburst = config->dst_maxburst; |
1223 | } else if (config->direction == DMA_FROM_DEVICE) { | 1125 | } else if (config->direction == DMA_FROM_DEVICE) { |
1224 | plchan->runtime_addr = config->src_addr; | 1126 | addr = config->src_addr; |
1225 | cctl |= PL080_CONTROL_DST_INCR; | ||
1226 | ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1227 | addr_width = config->src_addr_width; | 1127 | addr_width = config->src_addr_width; |
1228 | maxburst = config->src_maxburst; | 1128 | maxburst = config->src_maxburst; |
1229 | } else { | 1129 | } else { |
1230 | dev_err(&pl08x->adev->dev, | 1130 | dev_err(&pl08x->adev->dev, |
1231 | "bad runtime_config: alien transfer direction\n"); | 1131 | "bad runtime_config: alien transfer direction\n"); |
1232 | return; | 1132 | return -EINVAL; |
1233 | } | 1133 | } |
1234 | 1134 | ||
1235 | switch (addr_width) { | 1135 | switch (addr_width) { |
@@ -1248,42 +1148,40 @@ static void dma_set_runtime_config(struct dma_chan *chan, | |||
1248 | default: | 1148 | default: |
1249 | dev_err(&pl08x->adev->dev, | 1149 | dev_err(&pl08x->adev->dev, |
1250 | "bad runtime_config: alien address width\n"); | 1150 | "bad runtime_config: alien address width\n"); |
1251 | return; | 1151 | return -EINVAL; |
1252 | } | 1152 | } |
1253 | 1153 | ||
1254 | /* | 1154 | /* |
1255 | * Now decide on a maxburst: | 1155 | * Now decide on a maxburst: |
1256 | * If this channel will only request single transfers, set | 1156 | * If this channel will only request single transfers, set this |
1257 | * this down to ONE element. | 1157 | * down to ONE element. Also select one element if no maxburst |
1158 | * is specified. | ||
1258 | */ | 1159 | */ |
1259 | if (plchan->cd->single) { | 1160 | if (plchan->cd->single || maxburst == 0) { |
1260 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1161 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | |
1261 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | 1162 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); |
1262 | } else { | 1163 | } else { |
1263 | while (i < ARRAY_SIZE(burst_sizes)) { | 1164 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) |
1264 | if (burst_sizes[i].burstwords <= maxburst) | 1165 | if (burst_sizes[i].burstwords <= maxburst) |
1265 | break; | 1166 | break; |
1266 | i++; | ||
1267 | } | ||
1268 | cctl |= burst_sizes[i].reg; | 1167 | cctl |= burst_sizes[i].reg; |
1269 | } | 1168 | } |
1270 | 1169 | ||
1271 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | 1170 | plchan->runtime_addr = addr; |
1272 | cctl &= ~PL080_CONTROL_PROT_MASK; | ||
1273 | cctl |= PL080_CONTROL_PROT_SYS; | ||
1274 | 1171 | ||
1275 | /* Modify the default channel data to fit PrimeCell request */ | 1172 | /* Modify the default channel data to fit PrimeCell request */ |
1276 | cd->cctl = cctl; | 1173 | cd->cctl = cctl; |
1277 | cd->ccfg = ccfg; | ||
1278 | 1174 | ||
1279 | dev_dbg(&pl08x->adev->dev, | 1175 | dev_dbg(&pl08x->adev->dev, |
1280 | "configured channel %s (%s) for %s, data width %d, " | 1176 | "configured channel %s (%s) for %s, data width %d, " |
1281 | "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", | 1177 | "maxburst %d words, LE, CCTL=0x%08x\n", |
1282 | dma_chan_name(chan), plchan->name, | 1178 | dma_chan_name(chan), plchan->name, |
1283 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 1179 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
1284 | addr_width, | 1180 | addr_width, |
1285 | maxburst, | 1181 | maxburst, |
1286 | cctl, ccfg); | 1182 | cctl); |
1183 | |||
1184 | return 0; | ||
1287 | } | 1185 | } |
1288 | 1186 | ||
1289 | /* | 1187 | /* |
@@ -1293,35 +1191,26 @@ static void dma_set_runtime_config(struct dma_chan *chan, | |||
1293 | static void pl08x_issue_pending(struct dma_chan *chan) | 1191 | static void pl08x_issue_pending(struct dma_chan *chan) |
1294 | { | 1192 | { |
1295 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1193 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1296 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1297 | unsigned long flags; | 1194 | unsigned long flags; |
1298 | 1195 | ||
1299 | spin_lock_irqsave(&plchan->lock, flags); | 1196 | spin_lock_irqsave(&plchan->lock, flags); |
1300 | /* Something is already active */ | 1197 | /* Something is already active, or we're waiting for a channel... */ |
1301 | if (plchan->at) { | 1198 | if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { |
1302 | spin_unlock_irqrestore(&plchan->lock, flags); | 1199 | spin_unlock_irqrestore(&plchan->lock, flags); |
1303 | return; | ||
1304 | } | ||
1305 | |||
1306 | /* Didn't get a physical channel so waiting for it ... */ | ||
1307 | if (plchan->state == PL08X_CHAN_WAITING) | ||
1308 | return; | 1200 | return; |
1201 | } | ||
1309 | 1202 | ||
1310 | /* Take the first element in the queue and execute it */ | 1203 | /* Take the first element in the queue and execute it */ |
1311 | if (!list_empty(&plchan->desc_list)) { | 1204 | if (!list_empty(&plchan->pend_list)) { |
1312 | struct pl08x_txd *next; | 1205 | struct pl08x_txd *next; |
1313 | 1206 | ||
1314 | next = list_first_entry(&plchan->desc_list, | 1207 | next = list_first_entry(&plchan->pend_list, |
1315 | struct pl08x_txd, | 1208 | struct pl08x_txd, |
1316 | node); | 1209 | node); |
1317 | list_del(&next->node); | 1210 | list_del(&next->node); |
1318 | plchan->at = next; | ||
1319 | plchan->state = PL08X_CHAN_RUNNING; | 1211 | plchan->state = PL08X_CHAN_RUNNING; |
1320 | 1212 | ||
1321 | /* Configure the physical channel for the active txd */ | 1213 | pl08x_start_txd(plchan, next); |
1322 | pl08x_config_phychan_for_txd(plchan); | ||
1323 | pl08x_set_cregs(pl08x, plchan->phychan); | ||
1324 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | ||
1325 | } | 1214 | } |
1326 | 1215 | ||
1327 | spin_unlock_irqrestore(&plchan->lock, flags); | 1216 | spin_unlock_irqrestore(&plchan->lock, flags); |
@@ -1330,30 +1219,17 @@ static void pl08x_issue_pending(struct dma_chan *chan) | |||
1330 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | 1219 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, |
1331 | struct pl08x_txd *txd) | 1220 | struct pl08x_txd *txd) |
1332 | { | 1221 | { |
1333 | int num_llis; | ||
1334 | struct pl08x_driver_data *pl08x = plchan->host; | 1222 | struct pl08x_driver_data *pl08x = plchan->host; |
1335 | int ret; | 1223 | unsigned long flags; |
1224 | int num_llis, ret; | ||
1336 | 1225 | ||
1337 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | 1226 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); |
1338 | 1227 | if (!num_llis) { | |
1339 | if (!num_llis) | 1228 | kfree(txd); |
1340 | return -EINVAL; | 1229 | return -EINVAL; |
1230 | } | ||
1341 | 1231 | ||
1342 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); | 1232 | spin_lock_irqsave(&plchan->lock, flags); |
1343 | |||
1344 | /* | ||
1345 | * If this device is not using a circular buffer then | ||
1346 | * queue this new descriptor for transfer. | ||
1347 | * The descriptor for a circular buffer continues | ||
1348 | * to be used until the channel is freed. | ||
1349 | */ | ||
1350 | if (txd->cd->circular_buffer) | ||
1351 | dev_err(&pl08x->adev->dev, | ||
1352 | "%s attempting to queue a circular buffer\n", | ||
1353 | __func__); | ||
1354 | else | ||
1355 | list_add_tail(&txd->node, | ||
1356 | &plchan->desc_list); | ||
1357 | 1233 | ||
1358 | /* | 1234 | /* |
1359 | * See if we already have a physical channel allocated, | 1235 | * See if we already have a physical channel allocated, |
@@ -1362,45 +1238,74 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1362 | ret = prep_phy_channel(plchan, txd); | 1238 | ret = prep_phy_channel(plchan, txd); |
1363 | if (ret) { | 1239 | if (ret) { |
1364 | /* | 1240 | /* |
1365 | * No physical channel available, we will | 1241 | * No physical channel was available. |
1366 | * stack up the memcpy channels until there is a channel | 1242 | * |
1367 | * available to handle it whereas slave transfers may | 1243 | * memcpy transfers can be sorted out at submission time. |
1368 | * have been denied due to platform channel muxing restrictions | 1244 | * |
1369 | * and since there is no guarantee that this will ever be | 1245 | * Slave transfers may have been denied due to platform |
1370 | * resolved, and since the signal must be aquired AFTER | 1246 | * channel muxing restrictions. Since there is no guarantee |
1371 | * aquiring the physical channel, we will let them be NACK:ed | 1247 | * that this will ever be resolved, and the signal must be |
1372 | * with -EBUSY here. The drivers can alway retry the prep() | 1248 | * acquired AFTER acquiring the physical channel, we will let |
1373 | * call if they are eager on doing this using DMA. | 1249 | * them be NACK:ed with -EBUSY here. The drivers can retry |
1250 | * the prep() call if they are eager on doing this using DMA. | ||
1374 | */ | 1251 | */ |
1375 | if (plchan->slave) { | 1252 | if (plchan->slave) { |
1376 | pl08x_free_txd_list(pl08x, plchan); | 1253 | pl08x_free_txd_list(pl08x, plchan); |
1377 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | 1254 | pl08x_free_txd(pl08x, txd); |
1255 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1378 | return -EBUSY; | 1256 | return -EBUSY; |
1379 | } | 1257 | } |
1380 | /* Do this memcpy whenever there is a channel ready */ | ||
1381 | plchan->state = PL08X_CHAN_WAITING; | ||
1382 | plchan->waiting = txd; | ||
1383 | } else | 1258 | } else |
1384 | /* | 1259 | /* |
1385 | * Else we're all set, paused and ready to roll, | 1260 | * Else we're all set, paused and ready to roll, status |
1386 | * status will switch to PL08X_CHAN_RUNNING when | 1261 | * will switch to PL08X_CHAN_RUNNING when we call |
1387 | * we call issue_pending(). If there is something | 1262 | * issue_pending(). If there is something running on the |
1388 | * running on the channel already we don't change | 1263 | * channel already we don't change its state. |
1389 | * its state. | ||
1390 | */ | 1264 | */ |
1391 | if (plchan->state == PL08X_CHAN_IDLE) | 1265 | if (plchan->state == PL08X_CHAN_IDLE) |
1392 | plchan->state = PL08X_CHAN_PAUSED; | 1266 | plchan->state = PL08X_CHAN_PAUSED; |
1393 | 1267 | ||
1394 | /* | 1268 | spin_unlock_irqrestore(&plchan->lock, flags); |
1395 | * Notice that we leave plchan->lock locked on purpose: | ||
1396 | * it will be unlocked in the subsequent tx_submit() | ||
1397 | * call. This is a consequence of the current API. | ||
1398 | */ | ||
1399 | 1269 | ||
1400 | return 0; | 1270 | return 0; |
1401 | } | 1271 | } |
1402 | 1272 | ||
1403 | /* | 1273 | /* |
1274 | * Given the source and destination available bus masks, select which | ||
1275 | * will be routed to each port. We try to have source and destination | ||
1276 | * on separate ports, but always respect the allowable settings. | ||
1277 | */ | ||
1278 | static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) | ||
1279 | { | ||
1280 | u32 cctl = 0; | ||
1281 | |||
1282 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1283 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1284 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1285 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1286 | |||
1287 | return cctl; | ||
1288 | } | ||
1289 | |||
1290 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | ||
1291 | unsigned long flags) | ||
1292 | { | ||
1293 | struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | ||
1294 | |||
1295 | if (txd) { | ||
1296 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); | ||
1297 | txd->tx.flags = flags; | ||
1298 | txd->tx.tx_submit = pl08x_tx_submit; | ||
1299 | INIT_LIST_HEAD(&txd->node); | ||
1300 | |||
1301 | /* Always enable error and terminal interrupts */ | ||
1302 | txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | | ||
1303 | PL080_CONFIG_TC_IRQ_MASK; | ||
1304 | } | ||
1305 | return txd; | ||
1306 | } | ||
1307 | |||
1308 | /* | ||
1404 | * Initialize a descriptor to be used by memcpy submit | 1309 | * Initialize a descriptor to be used by memcpy submit |
1405 | */ | 1310 | */ |
1406 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | 1311 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( |
@@ -1412,40 +1317,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1412 | struct pl08x_txd *txd; | 1317 | struct pl08x_txd *txd; |
1413 | int ret; | 1318 | int ret; |
1414 | 1319 | ||
1415 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | 1320 | txd = pl08x_get_txd(plchan, flags); |
1416 | if (!txd) { | 1321 | if (!txd) { |
1417 | dev_err(&pl08x->adev->dev, | 1322 | dev_err(&pl08x->adev->dev, |
1418 | "%s no memory for descriptor\n", __func__); | 1323 | "%s no memory for descriptor\n", __func__); |
1419 | return NULL; | 1324 | return NULL; |
1420 | } | 1325 | } |
1421 | 1326 | ||
1422 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
1423 | txd->direction = DMA_NONE; | 1327 | txd->direction = DMA_NONE; |
1424 | txd->srcbus.addr = src; | 1328 | txd->src_addr = src; |
1425 | txd->dstbus.addr = dest; | 1329 | txd->dst_addr = dest; |
1330 | txd->len = len; | ||
1426 | 1331 | ||
1427 | /* Set platform data for m2m */ | 1332 | /* Set platform data for m2m */ |
1428 | txd->cd = &pl08x->pd->memcpy_channel; | 1333 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1334 | txd->cctl = pl08x->pd->memcpy_channel.cctl & | ||
1335 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
1336 | |||
1429 | /* Both to be incremented or the code will break */ | 1337 | /* Both to be incremented or the code will break */ |
1430 | txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | 1338 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; |
1431 | txd->tx.tx_submit = pl08x_tx_submit; | 1339 | |
1432 | txd->tx.callback = NULL; | 1340 | if (pl08x->vd->dualmaster) |
1433 | txd->tx.callback_param = NULL; | 1341 | txd->cctl |= pl08x_select_bus(pl08x, |
1434 | txd->len = len; | 1342 | pl08x->mem_buses, pl08x->mem_buses); |
1435 | 1343 | ||
1436 | INIT_LIST_HEAD(&txd->node); | ||
1437 | ret = pl08x_prep_channel_resources(plchan, txd); | 1344 | ret = pl08x_prep_channel_resources(plchan, txd); |
1438 | if (ret) | 1345 | if (ret) |
1439 | return NULL; | 1346 | return NULL; |
1440 | /* | ||
1441 | * NB: the channel lock is held at this point so tx_submit() | ||
1442 | * must be called in direct succession. | ||
1443 | */ | ||
1444 | 1347 | ||
1445 | return &txd->tx; | 1348 | return &txd->tx; |
1446 | } | 1349 | } |
1447 | 1350 | ||
1448 | struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1351 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
1449 | struct dma_chan *chan, struct scatterlist *sgl, | 1352 | struct dma_chan *chan, struct scatterlist *sgl, |
1450 | unsigned int sg_len, enum dma_data_direction direction, | 1353 | unsigned int sg_len, enum dma_data_direction direction, |
1451 | unsigned long flags) | 1354 | unsigned long flags) |
@@ -1453,6 +1356,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1453 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1356 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1454 | struct pl08x_driver_data *pl08x = plchan->host; | 1357 | struct pl08x_driver_data *pl08x = plchan->host; |
1455 | struct pl08x_txd *txd; | 1358 | struct pl08x_txd *txd; |
1359 | u8 src_buses, dst_buses; | ||
1456 | int ret; | 1360 | int ret; |
1457 | 1361 | ||
1458 | /* | 1362 | /* |
@@ -1467,14 +1371,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1467 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | 1371 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", |
1468 | __func__, sgl->length, plchan->name); | 1372 | __func__, sgl->length, plchan->name); |
1469 | 1373 | ||
1470 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | 1374 | txd = pl08x_get_txd(plchan, flags); |
1471 | if (!txd) { | 1375 | if (!txd) { |
1472 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | 1376 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); |
1473 | return NULL; | 1377 | return NULL; |
1474 | } | 1378 | } |
1475 | 1379 | ||
1476 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
1477 | |||
1478 | if (direction != plchan->runtime_direction) | 1380 | if (direction != plchan->runtime_direction) |
1479 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | 1381 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " |
1480 | "the direction configured for the PrimeCell\n", | 1382 | "the direction configured for the PrimeCell\n", |
@@ -1486,37 +1388,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1486 | * channel target address dynamically at runtime. | 1388 | * channel target address dynamically at runtime. |
1487 | */ | 1389 | */ |
1488 | txd->direction = direction; | 1390 | txd->direction = direction; |
1391 | txd->len = sgl->length; | ||
1392 | |||
1393 | txd->cctl = plchan->cd->cctl & | ||
1394 | ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1395 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1396 | PL080_CONTROL_PROT_MASK); | ||
1397 | |||
1398 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1399 | txd->cctl |= PL080_CONTROL_PROT_SYS; | ||
1400 | |||
1489 | if (direction == DMA_TO_DEVICE) { | 1401 | if (direction == DMA_TO_DEVICE) { |
1490 | txd->srcbus.addr = sgl->dma_address; | 1402 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1403 | txd->cctl |= PL080_CONTROL_SRC_INCR; | ||
1404 | txd->src_addr = sgl->dma_address; | ||
1491 | if (plchan->runtime_addr) | 1405 | if (plchan->runtime_addr) |
1492 | txd->dstbus.addr = plchan->runtime_addr; | 1406 | txd->dst_addr = plchan->runtime_addr; |
1493 | else | 1407 | else |
1494 | txd->dstbus.addr = plchan->cd->addr; | 1408 | txd->dst_addr = plchan->cd->addr; |
1409 | src_buses = pl08x->mem_buses; | ||
1410 | dst_buses = plchan->cd->periph_buses; | ||
1495 | } else if (direction == DMA_FROM_DEVICE) { | 1411 | } else if (direction == DMA_FROM_DEVICE) { |
1412 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1413 | txd->cctl |= PL080_CONTROL_DST_INCR; | ||
1496 | if (plchan->runtime_addr) | 1414 | if (plchan->runtime_addr) |
1497 | txd->srcbus.addr = plchan->runtime_addr; | 1415 | txd->src_addr = plchan->runtime_addr; |
1498 | else | 1416 | else |
1499 | txd->srcbus.addr = plchan->cd->addr; | 1417 | txd->src_addr = plchan->cd->addr; |
1500 | txd->dstbus.addr = sgl->dma_address; | 1418 | txd->dst_addr = sgl->dma_address; |
1419 | src_buses = plchan->cd->periph_buses; | ||
1420 | dst_buses = pl08x->mem_buses; | ||
1501 | } else { | 1421 | } else { |
1502 | dev_err(&pl08x->adev->dev, | 1422 | dev_err(&pl08x->adev->dev, |
1503 | "%s direction unsupported\n", __func__); | 1423 | "%s direction unsupported\n", __func__); |
1504 | return NULL; | 1424 | return NULL; |
1505 | } | 1425 | } |
1506 | txd->cd = plchan->cd; | 1426 | |
1507 | txd->tx.tx_submit = pl08x_tx_submit; | 1427 | txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); |
1508 | txd->tx.callback = NULL; | ||
1509 | txd->tx.callback_param = NULL; | ||
1510 | txd->len = sgl->length; | ||
1511 | INIT_LIST_HEAD(&txd->node); | ||
1512 | 1428 | ||
1513 | ret = pl08x_prep_channel_resources(plchan, txd); | 1429 | ret = pl08x_prep_channel_resources(plchan, txd); |
1514 | if (ret) | 1430 | if (ret) |
1515 | return NULL; | 1431 | return NULL; |
1516 | /* | ||
1517 | * NB: the channel lock is held at this point so tx_submit() | ||
1518 | * must be called in direct succession. | ||
1519 | */ | ||
1520 | 1432 | ||
1521 | return &txd->tx; | 1433 | return &txd->tx; |
1522 | } | 1434 | } |
@@ -1531,10 +1443,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1531 | 1443 | ||
1532 | /* Controls applicable to inactive channels */ | 1444 | /* Controls applicable to inactive channels */ |
1533 | if (cmd == DMA_SLAVE_CONFIG) { | 1445 | if (cmd == DMA_SLAVE_CONFIG) { |
1534 | dma_set_runtime_config(chan, | 1446 | return dma_set_runtime_config(chan, |
1535 | (struct dma_slave_config *) | 1447 | (struct dma_slave_config *)arg); |
1536 | arg); | ||
1537 | return 0; | ||
1538 | } | 1448 | } |
1539 | 1449 | ||
1540 | /* | 1450 | /* |
@@ -1552,22 +1462,14 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1552 | plchan->state = PL08X_CHAN_IDLE; | 1462 | plchan->state = PL08X_CHAN_IDLE; |
1553 | 1463 | ||
1554 | if (plchan->phychan) { | 1464 | if (plchan->phychan) { |
1555 | pl08x_stop_phy_chan(plchan->phychan); | 1465 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); |
1556 | 1466 | ||
1557 | /* | 1467 | /* |
1558 | * Mark physical channel as free and free any slave | 1468 | * Mark physical channel as free and free any slave |
1559 | * signal | 1469 | * signal |
1560 | */ | 1470 | */ |
1561 | if ((plchan->phychan->signal >= 0) && | 1471 | release_phy_channel(plchan); |
1562 | pl08x->pd->put_signal) { | ||
1563 | pl08x->pd->put_signal(plchan); | ||
1564 | plchan->phychan->signal = -1; | ||
1565 | } | ||
1566 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
1567 | plchan->phychan = NULL; | ||
1568 | } | 1472 | } |
1569 | /* Stop any pending tasklet */ | ||
1570 | tasklet_disable(&plchan->tasklet); | ||
1571 | /* Dequeue jobs and free LLIs */ | 1473 | /* Dequeue jobs and free LLIs */ |
1572 | if (plchan->at) { | 1474 | if (plchan->at) { |
1573 | pl08x_free_txd(pl08x, plchan->at); | 1475 | pl08x_free_txd(pl08x, plchan->at); |
@@ -1609,10 +1511,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | |||
1609 | 1511 | ||
1610 | /* | 1512 | /* |
1611 | * Just check that the device is there and active | 1513 | * Just check that the device is there and active |
1612 | * TODO: turn this bit on/off depending on the number of | 1514 | * TODO: turn this bit on/off depending on the number of physical channels |
1613 | * physical channels actually used, if it is zero... well | 1515 | * actually used, if it is zero... well shut it off. That will save some |
1614 | * shut it off. That will save some power. Cut the clock | 1516 | * power. Cut the clock at the same time. |
1615 | * at the same time. | ||
1616 | */ | 1517 | */ |
1617 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | 1518 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) |
1618 | { | 1519 | { |
@@ -1620,78 +1521,66 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | |||
1620 | 1521 | ||
1621 | val = readl(pl08x->base + PL080_CONFIG); | 1522 | val = readl(pl08x->base + PL080_CONFIG); |
1622 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | 1523 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); |
1623 | /* We implictly clear bit 1 and that means little-endian mode */ | 1524 | /* We implicitly clear bit 1 and that means little-endian mode */ |
1624 | val |= PL080_CONFIG_ENABLE; | 1525 | val |= PL080_CONFIG_ENABLE; |
1625 | writel(val, pl08x->base + PL080_CONFIG); | 1526 | writel(val, pl08x->base + PL080_CONFIG); |
1626 | } | 1527 | } |
1627 | 1528 | ||
1529 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
1530 | { | ||
1531 | struct device *dev = txd->tx.chan->device->dev; | ||
1532 | |||
1533 | if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1534 | if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1535 | dma_unmap_single(dev, txd->src_addr, txd->len, | ||
1536 | DMA_TO_DEVICE); | ||
1537 | else | ||
1538 | dma_unmap_page(dev, txd->src_addr, txd->len, | ||
1539 | DMA_TO_DEVICE); | ||
1540 | } | ||
1541 | if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1542 | if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1543 | dma_unmap_single(dev, txd->dst_addr, txd->len, | ||
1544 | DMA_FROM_DEVICE); | ||
1545 | else | ||
1546 | dma_unmap_page(dev, txd->dst_addr, txd->len, | ||
1547 | DMA_FROM_DEVICE); | ||
1548 | } | ||
1549 | } | ||
1550 | |||
1628 | static void pl08x_tasklet(unsigned long data) | 1551 | static void pl08x_tasklet(unsigned long data) |
1629 | { | 1552 | { |
1630 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | 1553 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; |
1631 | struct pl08x_phy_chan *phychan = plchan->phychan; | ||
1632 | struct pl08x_driver_data *pl08x = plchan->host; | 1554 | struct pl08x_driver_data *pl08x = plchan->host; |
1555 | struct pl08x_txd *txd; | ||
1556 | unsigned long flags; | ||
1633 | 1557 | ||
1634 | if (!plchan) | 1558 | spin_lock_irqsave(&plchan->lock, flags); |
1635 | BUG(); | ||
1636 | |||
1637 | spin_lock(&plchan->lock); | ||
1638 | |||
1639 | if (plchan->at) { | ||
1640 | dma_async_tx_callback callback = | ||
1641 | plchan->at->tx.callback; | ||
1642 | void *callback_param = | ||
1643 | plchan->at->tx.callback_param; | ||
1644 | |||
1645 | /* | ||
1646 | * Update last completed | ||
1647 | */ | ||
1648 | plchan->lc = | ||
1649 | (plchan->at->tx.cookie); | ||
1650 | |||
1651 | /* | ||
1652 | * Callback to signal completion | ||
1653 | */ | ||
1654 | if (callback) | ||
1655 | callback(callback_param); | ||
1656 | 1559 | ||
1657 | /* | 1560 | txd = plchan->at; |
1658 | * Device callbacks should NOT clear | 1561 | plchan->at = NULL; |
1659 | * the current transaction on the channel | ||
1660 | * Linus: sometimes they should? | ||
1661 | */ | ||
1662 | if (!plchan->at) | ||
1663 | BUG(); | ||
1664 | 1562 | ||
1665 | /* | 1563 | if (txd) { |
1666 | * Free the descriptor if it's not for a device | 1564 | /* Update last completed */ |
1667 | * using a circular buffer | 1565 | plchan->lc = txd->tx.cookie; |
1668 | */ | ||
1669 | if (!plchan->at->cd->circular_buffer) { | ||
1670 | pl08x_free_txd(pl08x, plchan->at); | ||
1671 | plchan->at = NULL; | ||
1672 | } | ||
1673 | /* | ||
1674 | * else descriptor for circular | ||
1675 | * buffers only freed when | ||
1676 | * client has disabled dma | ||
1677 | */ | ||
1678 | } | 1566 | } |
1679 | /* | 1567 | |
1680 | * If a new descriptor is queued, set it up | 1568 | /* If a new descriptor is queued, set it up plchan->at is NULL here */ |
1681 | * plchan->at is NULL here | 1569 | if (!list_empty(&plchan->pend_list)) { |
1682 | */ | ||
1683 | if (!list_empty(&plchan->desc_list)) { | ||
1684 | struct pl08x_txd *next; | 1570 | struct pl08x_txd *next; |
1685 | 1571 | ||
1686 | next = list_first_entry(&plchan->desc_list, | 1572 | next = list_first_entry(&plchan->pend_list, |
1687 | struct pl08x_txd, | 1573 | struct pl08x_txd, |
1688 | node); | 1574 | node); |
1689 | list_del(&next->node); | 1575 | list_del(&next->node); |
1690 | plchan->at = next; | 1576 | |
1691 | /* Configure the physical channel for the next txd */ | 1577 | pl08x_start_txd(plchan, next); |
1692 | pl08x_config_phychan_for_txd(plchan); | 1578 | } else if (plchan->phychan_hold) { |
1693 | pl08x_set_cregs(pl08x, plchan->phychan); | 1579 | /* |
1694 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | 1580 | * This channel is still in use - we have a new txd being |
1581 | * prepared and will soon be queued. Don't give up the | ||
1582 | * physical channel. | ||
1583 | */ | ||
1695 | } else { | 1584 | } else { |
1696 | struct pl08x_dma_chan *waiting = NULL; | 1585 | struct pl08x_dma_chan *waiting = NULL; |
1697 | 1586 | ||
@@ -1699,20 +1588,14 @@ static void pl08x_tasklet(unsigned long data) | |||
1699 | * No more jobs, so free up the physical channel | 1588 | * No more jobs, so free up the physical channel |
1700 | * Free any allocated signal on slave transfers too | 1589 | * Free any allocated signal on slave transfers too |
1701 | */ | 1590 | */ |
1702 | if ((phychan->signal >= 0) && pl08x->pd->put_signal) { | 1591 | release_phy_channel(plchan); |
1703 | pl08x->pd->put_signal(plchan); | ||
1704 | phychan->signal = -1; | ||
1705 | } | ||
1706 | pl08x_put_phy_channel(pl08x, phychan); | ||
1707 | plchan->phychan = NULL; | ||
1708 | plchan->state = PL08X_CHAN_IDLE; | 1592 | plchan->state = PL08X_CHAN_IDLE; |
1709 | 1593 | ||
1710 | /* | 1594 | /* |
1711 | * And NOW before anyone else can grab that free:d | 1595 | * And NOW before anyone else can grab that free:d up |
1712 | * up physical channel, see if there is some memcpy | 1596 | * physical channel, see if there is some memcpy pending |
1713 | * pending that seriously needs to start because of | 1597 | * that seriously needs to start because of being stacked |
1714 | * being stacked up while we were choking the | 1598 | * up while we were choking the physical channels with data. |
1715 | * physical channels with data. | ||
1716 | */ | 1599 | */ |
1717 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | 1600 | list_for_each_entry(waiting, &pl08x->memcpy.channels, |
1718 | chan.device_node) { | 1601 | chan.device_node) { |
@@ -1724,6 +1607,7 @@ static void pl08x_tasklet(unsigned long data) | |||
1724 | ret = prep_phy_channel(waiting, | 1607 | ret = prep_phy_channel(waiting, |
1725 | waiting->waiting); | 1608 | waiting->waiting); |
1726 | BUG_ON(ret); | 1609 | BUG_ON(ret); |
1610 | waiting->phychan_hold--; | ||
1727 | waiting->state = PL08X_CHAN_RUNNING; | 1611 | waiting->state = PL08X_CHAN_RUNNING; |
1728 | waiting->waiting = NULL; | 1612 | waiting->waiting = NULL; |
1729 | pl08x_issue_pending(&waiting->chan); | 1613 | pl08x_issue_pending(&waiting->chan); |
@@ -1732,7 +1616,25 @@ static void pl08x_tasklet(unsigned long data) | |||
1732 | } | 1616 | } |
1733 | } | 1617 | } |
1734 | 1618 | ||
1735 | spin_unlock(&plchan->lock); | 1619 | spin_unlock_irqrestore(&plchan->lock, flags); |
1620 | |||
1621 | if (txd) { | ||
1622 | dma_async_tx_callback callback = txd->tx.callback; | ||
1623 | void *callback_param = txd->tx.callback_param; | ||
1624 | |||
1625 | /* Don't try to unmap buffers on slave channels */ | ||
1626 | if (!plchan->slave) | ||
1627 | pl08x_unmap_buffers(txd); | ||
1628 | |||
1629 | /* Free the descriptor */ | ||
1630 | spin_lock_irqsave(&plchan->lock, flags); | ||
1631 | pl08x_free_txd(pl08x, txd); | ||
1632 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1633 | |||
1634 | /* Callback to signal completion */ | ||
1635 | if (callback) | ||
1636 | callback(callback_param); | ||
1637 | } | ||
1736 | } | 1638 | } |
1737 | 1639 | ||
1738 | static irqreturn_t pl08x_irq(int irq, void *dev) | 1640 | static irqreturn_t pl08x_irq(int irq, void *dev) |
@@ -1744,9 +1646,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1744 | 1646 | ||
1745 | val = readl(pl08x->base + PL080_ERR_STATUS); | 1647 | val = readl(pl08x->base + PL080_ERR_STATUS); |
1746 | if (val) { | 1648 | if (val) { |
1747 | /* | 1649 | /* An error interrupt (on one or more channels) */ |
1748 | * An error interrupt (on one or more channels) | ||
1749 | */ | ||
1750 | dev_err(&pl08x->adev->dev, | 1650 | dev_err(&pl08x->adev->dev, |
1751 | "%s error interrupt, register value 0x%08x\n", | 1651 | "%s error interrupt, register value 0x%08x\n", |
1752 | __func__, val); | 1652 | __func__, val); |
@@ -1770,9 +1670,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1770 | mask |= (1 << i); | 1670 | mask |= (1 << i); |
1771 | } | 1671 | } |
1772 | } | 1672 | } |
1773 | /* | 1673 | /* Clear only the terminal interrupts on channels we processed */ |
1774 | * Clear only the terminal interrupts on channels we processed | ||
1775 | */ | ||
1776 | writel(mask, pl08x->base + PL080_TC_CLEAR); | 1674 | writel(mask, pl08x->base + PL080_TC_CLEAR); |
1777 | 1675 | ||
1778 | return mask ? IRQ_HANDLED : IRQ_NONE; | 1676 | return mask ? IRQ_HANDLED : IRQ_NONE; |
@@ -1791,6 +1689,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1791 | int i; | 1689 | int i; |
1792 | 1690 | ||
1793 | INIT_LIST_HEAD(&dmadev->channels); | 1691 | INIT_LIST_HEAD(&dmadev->channels); |
1692 | |||
1794 | /* | 1693 | /* |
1795 | * Register as many many memcpy as we have physical channels, | 1694 | * Register as many many memcpy as we have physical channels, |
1796 | * we won't always be able to use all but the code will have | 1695 | * we won't always be able to use all but the code will have |
@@ -1819,16 +1718,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1819 | return -ENOMEM; | 1718 | return -ENOMEM; |
1820 | } | 1719 | } |
1821 | } | 1720 | } |
1721 | if (chan->cd->circular_buffer) { | ||
1722 | dev_err(&pl08x->adev->dev, | ||
1723 | "channel %s: circular buffers not supported\n", | ||
1724 | chan->name); | ||
1725 | kfree(chan); | ||
1726 | continue; | ||
1727 | } | ||
1822 | dev_info(&pl08x->adev->dev, | 1728 | dev_info(&pl08x->adev->dev, |
1823 | "initialize virtual channel \"%s\"\n", | 1729 | "initialize virtual channel \"%s\"\n", |
1824 | chan->name); | 1730 | chan->name); |
1825 | 1731 | ||
1826 | chan->chan.device = dmadev; | 1732 | chan->chan.device = dmadev; |
1827 | atomic_set(&chan->last_issued, 0); | 1733 | chan->chan.cookie = 0; |
1828 | chan->lc = atomic_read(&chan->last_issued); | 1734 | chan->lc = 0; |
1829 | 1735 | ||
1830 | spin_lock_init(&chan->lock); | 1736 | spin_lock_init(&chan->lock); |
1831 | INIT_LIST_HEAD(&chan->desc_list); | 1737 | INIT_LIST_HEAD(&chan->pend_list); |
1832 | tasklet_init(&chan->tasklet, pl08x_tasklet, | 1738 | tasklet_init(&chan->tasklet, pl08x_tasklet, |
1833 | (unsigned long) chan); | 1739 | (unsigned long) chan); |
1834 | 1740 | ||
@@ -1898,7 +1804,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1898 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1804 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1899 | seq_printf(s, "--------\t------\n"); | 1805 | seq_printf(s, "--------\t------\n"); |
1900 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | 1806 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { |
1901 | seq_printf(s, "%s\t\t\%s\n", chan->name, | 1807 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1902 | pl08x_state_str(chan->state)); | 1808 | pl08x_state_str(chan->state)); |
1903 | } | 1809 | } |
1904 | 1810 | ||
@@ -1906,7 +1812,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1906 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1812 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1907 | seq_printf(s, "--------\t------\n"); | 1813 | seq_printf(s, "--------\t------\n"); |
1908 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | 1814 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { |
1909 | seq_printf(s, "%s\t\t\%s\n", chan->name, | 1815 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1910 | pl08x_state_str(chan->state)); | 1816 | pl08x_state_str(chan->state)); |
1911 | } | 1817 | } |
1912 | 1818 | ||
@@ -1942,7 +1848,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | |||
1942 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | 1848 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) |
1943 | { | 1849 | { |
1944 | struct pl08x_driver_data *pl08x; | 1850 | struct pl08x_driver_data *pl08x; |
1945 | struct vendor_data *vd = id->data; | 1851 | const struct vendor_data *vd = id->data; |
1946 | int ret = 0; | 1852 | int ret = 0; |
1947 | int i; | 1853 | int i; |
1948 | 1854 | ||
@@ -1990,6 +1896,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
1990 | pl08x->adev = adev; | 1896 | pl08x->adev = adev; |
1991 | pl08x->vd = vd; | 1897 | pl08x->vd = vd; |
1992 | 1898 | ||
1899 | /* By default, AHB1 only. If dualmaster, from platform */ | ||
1900 | pl08x->lli_buses = PL08X_AHB1; | ||
1901 | pl08x->mem_buses = PL08X_AHB1; | ||
1902 | if (pl08x->vd->dualmaster) { | ||
1903 | pl08x->lli_buses = pl08x->pd->lli_buses; | ||
1904 | pl08x->mem_buses = pl08x->pd->mem_buses; | ||
1905 | } | ||
1906 | |||
1993 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | 1907 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ |
1994 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | 1908 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, |
1995 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | 1909 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); |
@@ -2009,14 +1923,12 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
2009 | /* Turn on the PL08x */ | 1923 | /* Turn on the PL08x */ |
2010 | pl08x_ensure_on(pl08x); | 1924 | pl08x_ensure_on(pl08x); |
2011 | 1925 | ||
2012 | /* | 1926 | /* Attach the interrupt handler */ |
2013 | * Attach the interrupt handler | ||
2014 | */ | ||
2015 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | 1927 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); |
2016 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | 1928 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); |
2017 | 1929 | ||
2018 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | 1930 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, |
2019 | vd->name, pl08x); | 1931 | DRIVER_NAME, pl08x); |
2020 | if (ret) { | 1932 | if (ret) { |
2021 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | 1933 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", |
2022 | __func__, adev->irq[0]); | 1934 | __func__, adev->irq[0]); |
@@ -2087,8 +1999,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
2087 | 1999 | ||
2088 | amba_set_drvdata(adev, pl08x); | 2000 | amba_set_drvdata(adev, pl08x); |
2089 | init_pl08x_debugfs(pl08x); | 2001 | init_pl08x_debugfs(pl08x); |
2090 | dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", | 2002 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", |
2091 | vd->name, adev->res.start); | 2003 | amba_part(adev), amba_rev(adev), |
2004 | (unsigned long long)adev->res.start, adev->irq[0]); | ||
2092 | return 0; | 2005 | return 0; |
2093 | 2006 | ||
2094 | out_no_slave_reg: | 2007 | out_no_slave_reg: |
@@ -2115,13 +2028,11 @@ out_no_pl08x: | |||
2115 | 2028 | ||
2116 | /* PL080 has 8 channels and the PL080 have just 2 */ | 2029 | /* PL080 has 8 channels and the PL080 have just 2 */ |
2117 | static struct vendor_data vendor_pl080 = { | 2030 | static struct vendor_data vendor_pl080 = { |
2118 | .name = "PL080", | ||
2119 | .channels = 8, | 2031 | .channels = 8, |
2120 | .dualmaster = true, | 2032 | .dualmaster = true, |
2121 | }; | 2033 | }; |
2122 | 2034 | ||
2123 | static struct vendor_data vendor_pl081 = { | 2035 | static struct vendor_data vendor_pl081 = { |
2124 | .name = "PL081", | ||
2125 | .channels = 2, | 2036 | .channels = 2, |
2126 | .dualmaster = false, | 2037 | .dualmaster = false, |
2127 | }; | 2038 | }; |
@@ -2160,7 +2071,7 @@ static int __init pl08x_init(void) | |||
2160 | retval = amba_driver_register(&pl08x_amba_driver); | 2071 | retval = amba_driver_register(&pl08x_amba_driver); |
2161 | if (retval) | 2072 | if (retval) |
2162 | printk(KERN_WARNING DRIVER_NAME | 2073 | printk(KERN_WARNING DRIVER_NAME |
2163 | "failed to register as an amba device (%d)\n", | 2074 | "failed to register as an AMBA device (%d)\n", |
2164 | retval); | 2075 | retval); |
2165 | return retval; | 2076 | return retval; |
2166 | } | 2077 | } |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index a0f3e6a06e0..3d7d705f026 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
253 | /* move myself to free_list */ | 253 | /* move myself to free_list */ |
254 | list_move(&desc->desc_node, &atchan->free_list); | 254 | list_move(&desc->desc_node, &atchan->free_list); |
255 | 255 | ||
256 | /* unmap dma addresses */ | 256 | /* unmap dma addresses (not on slave channels) */ |
257 | if (!atchan->chan_common.private) { | 257 | if (!atchan->chan_common.private) { |
258 | struct device *parent = chan2parent(&atchan->chan_common); | 258 | struct device *parent = chan2parent(&atchan->chan_common); |
259 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 259 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
@@ -583,7 +583,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
583 | desc->lli.ctrlb = ctrlb; | 583 | desc->lli.ctrlb = ctrlb; |
584 | 584 | ||
585 | desc->txd.cookie = 0; | 585 | desc->txd.cookie = 0; |
586 | async_tx_ack(&desc->txd); | ||
587 | 586 | ||
588 | if (!first) { | 587 | if (!first) { |
589 | first = desc; | 588 | first = desc; |
@@ -604,7 +603,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
604 | /* set end-of-link to the last link descriptor of list*/ | 603 | /* set end-of-link to the last link descriptor of list*/ |
605 | set_desc_eol(desc); | 604 | set_desc_eol(desc); |
606 | 605 | ||
607 | desc->txd.flags = flags; /* client is in control of this ack */ | 606 | first->txd.flags = flags; /* client is in control of this ack */ |
608 | 607 | ||
609 | return &first->txd; | 608 | return &first->txd; |
610 | 609 | ||
@@ -670,7 +669,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
670 | if (!desc) | 669 | if (!desc) |
671 | goto err_desc_get; | 670 | goto err_desc_get; |
672 | 671 | ||
673 | mem = sg_phys(sg); | 672 | mem = sg_dma_address(sg); |
674 | len = sg_dma_len(sg); | 673 | len = sg_dma_len(sg); |
675 | mem_width = 2; | 674 | mem_width = 2; |
676 | if (unlikely(mem & 3 || len & 3)) | 675 | if (unlikely(mem & 3 || len & 3)) |
@@ -712,7 +711,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
712 | if (!desc) | 711 | if (!desc) |
713 | goto err_desc_get; | 712 | goto err_desc_get; |
714 | 713 | ||
715 | mem = sg_phys(sg); | 714 | mem = sg_dma_address(sg); |
716 | len = sg_dma_len(sg); | 715 | len = sg_dma_len(sg); |
717 | mem_width = 2; | 716 | mem_width = 2; |
718 | if (unlikely(mem & 3 || len & 3)) | 717 | if (unlikely(mem & 3 || len & 3)) |
@@ -722,7 +721,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
722 | desc->lli.daddr = mem; | 721 | desc->lli.daddr = mem; |
723 | desc->lli.ctrla = ctrla | 722 | desc->lli.ctrla = ctrla |
724 | | ATC_DST_WIDTH(mem_width) | 723 | | ATC_DST_WIDTH(mem_width) |
725 | | len >> mem_width; | 724 | | len >> reg_width; |
726 | desc->lli.ctrlb = ctrlb; | 725 | desc->lli.ctrlb = ctrlb; |
727 | 726 | ||
728 | if (!first) { | 727 | if (!first) { |
@@ -749,8 +748,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
749 | first->txd.cookie = -EBUSY; | 748 | first->txd.cookie = -EBUSY; |
750 | first->len = total_len; | 749 | first->len = total_len; |
751 | 750 | ||
752 | /* last link descriptor of list is responsible of flags */ | 751 | /* first link descriptor of list is responsible of flags */ |
753 | prev->txd.flags = flags; /* client is in control of this ack */ | 752 | first->txd.flags = flags; /* client is in control of this ack */ |
754 | 753 | ||
755 | return &first->txd; | 754 | return &first->txd; |
756 | 755 | ||
@@ -854,11 +853,11 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
854 | 853 | ||
855 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 854 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
856 | 855 | ||
856 | spin_lock_bh(&atchan->lock); | ||
857 | if (!atc_chan_is_enabled(atchan)) { | 857 | if (!atc_chan_is_enabled(atchan)) { |
858 | spin_lock_bh(&atchan->lock); | ||
859 | atc_advance_work(atchan); | 858 | atc_advance_work(atchan); |
860 | spin_unlock_bh(&atchan->lock); | ||
861 | } | 859 | } |
860 | spin_unlock_bh(&atchan->lock); | ||
862 | } | 861 | } |
863 | 862 | ||
864 | /** | 863 | /** |
@@ -1210,7 +1209,7 @@ static int __init at_dma_init(void) | |||
1210 | { | 1209 | { |
1211 | return platform_driver_probe(&at_dma_driver, at_dma_probe); | 1210 | return platform_driver_probe(&at_dma_driver, at_dma_probe); |
1212 | } | 1211 | } |
1213 | module_init(at_dma_init); | 1212 | subsys_initcall(at_dma_init); |
1214 | 1213 | ||
1215 | static void __exit at_dma_exit(void) | 1214 | static void __exit at_dma_exit(void) |
1216 | { | 1215 | { |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 286c3ac6bdc..4de947a450f 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | 2 | * Freescale MPC85xx, MPC83xx DMA Engine support |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 4 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * Author: | 6 | * Author: |
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
@@ -50,9 +50,11 @@ static void dma_init(struct fsldma_chan *chan) | |||
50 | * EIE - Error interrupt enable | 50 | * EIE - Error interrupt enable |
51 | * EOSIE - End of segments interrupt enable (basic mode) | 51 | * EOSIE - End of segments interrupt enable (basic mode) |
52 | * EOLNIE - End of links interrupt enable | 52 | * EOLNIE - End of links interrupt enable |
53 | * BWC - Bandwidth sharing among channels | ||
53 | */ | 54 | */ |
54 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE | 55 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC |
55 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | 56 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE |
57 | | FSL_DMA_MR_EOSIE, 32); | ||
56 | break; | 58 | break; |
57 | case FSL_DMA_IP_83XX: | 59 | case FSL_DMA_IP_83XX: |
58 | /* Set the channel to below modes: | 60 | /* Set the channel to below modes: |
@@ -1322,6 +1324,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op, | |||
1322 | fdev->common.device_control = fsl_dma_device_control; | 1324 | fdev->common.device_control = fsl_dma_device_control; |
1323 | fdev->common.dev = &op->dev; | 1325 | fdev->common.dev = &op->dev; |
1324 | 1326 | ||
1327 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); | ||
1328 | |||
1325 | dev_set_drvdata(&op->dev, fdev); | 1329 | dev_set_drvdata(&op->dev, fdev); |
1326 | 1330 | ||
1327 | /* | 1331 | /* |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index cb4d6ff5159..ba9f403c0fb 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: | 4 | * Author: |
5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
@@ -36,6 +36,13 @@ | |||
36 | #define FSL_DMA_MR_DAHE 0x00002000 | 36 | #define FSL_DMA_MR_DAHE 0x00002000 |
37 | #define FSL_DMA_MR_SAHE 0x00001000 | 37 | #define FSL_DMA_MR_SAHE 0x00001000 |
38 | 38 | ||
39 | /* | ||
40 | * Bandwidth/pause control determines how many bytes a given | ||
41 | * channel is allowed to transfer before the DMA engine pauses | ||
42 | * the current channel and switches to the next channel | ||
43 | */ | ||
44 | #define FSL_DMA_MR_BWC 0x08000000 | ||
45 | |||
39 | /* Special MR definition for MPC8349 */ | 46 | /* Special MR definition for MPC8349 */ |
40 | #define FSL_DMA_MR_EOTIE 0x00000080 | 47 | #define FSL_DMA_MR_EOTIE 0x00000080 |
41 | #define FSL_DMA_MR_PRC_RM 0x00000800 | 48 | #define FSL_DMA_MR_PRC_RM 0x00000800 |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f629e4961af..e18eaabe92b 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -49,6 +49,7 @@ struct imxdma_channel { | |||
49 | 49 | ||
50 | struct imxdma_engine { | 50 | struct imxdma_engine { |
51 | struct device *dev; | 51 | struct device *dev; |
52 | struct device_dma_parameters dma_parms; | ||
52 | struct dma_device dma_device; | 53 | struct dma_device dma_device; |
53 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; | 54 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; |
54 | }; | 55 | }; |
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
242 | else | 243 | else |
243 | dmamode = DMA_MODE_WRITE; | 244 | dmamode = DMA_MODE_WRITE; |
244 | 245 | ||
246 | switch (imxdmac->word_size) { | ||
247 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
248 | if (sgl->length & 3 || sgl->dma_address & 3) | ||
249 | return NULL; | ||
250 | break; | ||
251 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
252 | if (sgl->length & 1 || sgl->dma_address & 1) | ||
253 | return NULL; | ||
254 | break; | ||
255 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
256 | break; | ||
257 | default: | ||
258 | return NULL; | ||
259 | } | ||
260 | |||
245 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, | 261 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, |
246 | dma_length, imxdmac->per_address, dmamode); | 262 | dma_length, imxdmac->per_address, dmamode); |
247 | if (ret) | 263 | if (ret) |
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
329 | 345 | ||
330 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | 346 | INIT_LIST_HEAD(&imxdma->dma_device.channels); |
331 | 347 | ||
348 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
349 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
350 | |||
332 | /* Initialize channel parameters */ | 351 | /* Initialize channel parameters */ |
333 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 352 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
334 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 353 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
346 | imxdmac->imxdma = imxdma; | 365 | imxdmac->imxdma = imxdma; |
347 | spin_lock_init(&imxdmac->lock); | 366 | spin_lock_init(&imxdmac->lock); |
348 | 367 | ||
349 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
350 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
351 | |||
352 | imxdmac->chan.device = &imxdma->dma_device; | 368 | imxdmac->chan.device = &imxdma->dma_device; |
353 | imxdmac->chan.chan_id = i; | ||
354 | imxdmac->channel = i; | 369 | imxdmac->channel = i; |
355 | 370 | ||
356 | /* Add the channel to the DMAC list */ | 371 | /* Add the channel to the DMAC list */ |
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
370 | 385 | ||
371 | platform_set_drvdata(pdev, imxdma); | 386 | platform_set_drvdata(pdev, imxdma); |
372 | 387 | ||
388 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; | ||
389 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | ||
390 | |||
373 | ret = dma_async_device_register(&imxdma->dma_device); | 391 | ret = dma_async_device_register(&imxdma->dma_device); |
374 | if (ret) { | 392 | if (ret) { |
375 | dev_err(&pdev->dev, "unable to register\n"); | 393 | dev_err(&pdev->dev, "unable to register\n"); |
@@ -379,7 +397,7 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
379 | return 0; | 397 | return 0; |
380 | 398 | ||
381 | err_init: | 399 | err_init: |
382 | while (i-- >= 0) { | 400 | while (--i >= 0) { |
383 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 401 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
384 | imx_dma_free(imxdmac->imxdma_channel); | 402 | imx_dma_free(imxdmac->imxdma_channel); |
385 | } | 403 | } |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 0834323a059..b6d1455fa93 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -230,7 +230,7 @@ struct sdma_engine; | |||
230 | * struct sdma_channel - housekeeping for a SDMA channel | 230 | * struct sdma_channel - housekeeping for a SDMA channel |
231 | * | 231 | * |
232 | * @sdma pointer to the SDMA engine for this channel | 232 | * @sdma pointer to the SDMA engine for this channel |
233 | * @channel the channel number, matches dmaengine chan_id | 233 | * @channel the channel number, matches dmaengine chan_id + 1 |
234 | * @direction transfer type. Needed for setting SDMA script | 234 | * @direction transfer type. Needed for setting SDMA script |
235 | * @peripheral_type Peripheral type. Needed for setting SDMA script | 235 | * @peripheral_type Peripheral type. Needed for setting SDMA script |
236 | * @event_id0 aka dma request line | 236 | * @event_id0 aka dma request line |
@@ -273,50 +273,6 @@ struct sdma_channel { | |||
273 | #define MXC_SDMA_MIN_PRIORITY 1 | 273 | #define MXC_SDMA_MIN_PRIORITY 1 |
274 | #define MXC_SDMA_MAX_PRIORITY 7 | 274 | #define MXC_SDMA_MAX_PRIORITY 7 |
275 | 275 | ||
276 | /** | ||
277 | * struct sdma_script_start_addrs - SDMA script start pointers | ||
278 | * | ||
279 | * start addresses of the different functions in the physical | ||
280 | * address space of the SDMA engine. | ||
281 | */ | ||
282 | struct sdma_script_start_addrs { | ||
283 | u32 ap_2_ap_addr; | ||
284 | u32 ap_2_bp_addr; | ||
285 | u32 ap_2_ap_fixed_addr; | ||
286 | u32 bp_2_ap_addr; | ||
287 | u32 loopback_on_dsp_side_addr; | ||
288 | u32 mcu_interrupt_only_addr; | ||
289 | u32 firi_2_per_addr; | ||
290 | u32 firi_2_mcu_addr; | ||
291 | u32 per_2_firi_addr; | ||
292 | u32 mcu_2_firi_addr; | ||
293 | u32 uart_2_per_addr; | ||
294 | u32 uart_2_mcu_addr; | ||
295 | u32 per_2_app_addr; | ||
296 | u32 mcu_2_app_addr; | ||
297 | u32 per_2_per_addr; | ||
298 | u32 uartsh_2_per_addr; | ||
299 | u32 uartsh_2_mcu_addr; | ||
300 | u32 per_2_shp_addr; | ||
301 | u32 mcu_2_shp_addr; | ||
302 | u32 ata_2_mcu_addr; | ||
303 | u32 mcu_2_ata_addr; | ||
304 | u32 app_2_per_addr; | ||
305 | u32 app_2_mcu_addr; | ||
306 | u32 shp_2_per_addr; | ||
307 | u32 shp_2_mcu_addr; | ||
308 | u32 mshc_2_mcu_addr; | ||
309 | u32 mcu_2_mshc_addr; | ||
310 | u32 spdif_2_mcu_addr; | ||
311 | u32 mcu_2_spdif_addr; | ||
312 | u32 asrc_2_mcu_addr; | ||
313 | u32 ext_mem_2_ipu_addr; | ||
314 | u32 descrambler_addr; | ||
315 | u32 dptc_dvfs_addr; | ||
316 | u32 utra_addr; | ||
317 | u32 ram_code_start_addr; | ||
318 | }; | ||
319 | |||
320 | #define SDMA_FIRMWARE_MAGIC 0x414d4453 | 276 | #define SDMA_FIRMWARE_MAGIC 0x414d4453 |
321 | 277 | ||
322 | /** | 278 | /** |
@@ -345,6 +301,7 @@ struct sdma_firmware_header { | |||
345 | 301 | ||
346 | struct sdma_engine { | 302 | struct sdma_engine { |
347 | struct device *dev; | 303 | struct device *dev; |
304 | struct device_dma_parameters dma_parms; | ||
348 | struct sdma_channel channel[MAX_DMA_CHANNELS]; | 305 | struct sdma_channel channel[MAX_DMA_CHANNELS]; |
349 | struct sdma_channel_control *channel_control; | 306 | struct sdma_channel_control *channel_control; |
350 | void __iomem *regs; | 307 | void __iomem *regs; |
@@ -493,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | |||
493 | if (bd->mode.status & BD_RROR) | 450 | if (bd->mode.status & BD_RROR) |
494 | sdmac->status = DMA_ERROR; | 451 | sdmac->status = DMA_ERROR; |
495 | else | 452 | else |
496 | sdmac->status = DMA_SUCCESS; | 453 | sdmac->status = DMA_IN_PROGRESS; |
497 | 454 | ||
498 | bd->mode.status |= BD_DONE; | 455 | bd->mode.status |= BD_DONE; |
499 | sdmac->buf_tail++; | 456 | sdmac->buf_tail++; |
@@ -814,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | |||
814 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | 771 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); |
815 | } | 772 | } |
816 | 773 | ||
817 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) | 774 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) |
818 | { | 775 | { |
819 | dma_cookie_t cookie = sdma->chan.cookie; | 776 | dma_cookie_t cookie = sdmac->chan.cookie; |
820 | 777 | ||
821 | if (++cookie < 0) | 778 | if (++cookie < 0) |
822 | cookie = 1; | 779 | cookie = 1; |
823 | 780 | ||
824 | sdma->chan.cookie = cookie; | 781 | sdmac->chan.cookie = cookie; |
825 | sdma->desc.cookie = cookie; | 782 | sdmac->desc.cookie = cookie; |
826 | 783 | ||
827 | return cookie; | 784 | return cookie; |
828 | } | 785 | } |
@@ -842,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
842 | 799 | ||
843 | cookie = sdma_assign_cookie(sdmac); | 800 | cookie = sdma_assign_cookie(sdmac); |
844 | 801 | ||
845 | sdma_enable_channel(sdma, tx->chan->chan_id); | 802 | sdma_enable_channel(sdma, sdmac->channel); |
846 | 803 | ||
847 | spin_unlock_irq(&sdmac->lock); | 804 | spin_unlock_irq(&sdmac->lock); |
848 | 805 | ||
@@ -855,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
855 | struct imx_dma_data *data = chan->private; | 812 | struct imx_dma_data *data = chan->private; |
856 | int prio, ret; | 813 | int prio, ret; |
857 | 814 | ||
858 | /* No need to execute this for internal channel 0 */ | ||
859 | if (chan->chan_id == 0) | ||
860 | return 0; | ||
861 | |||
862 | if (!data) | 815 | if (!data) |
863 | return -EINVAL; | 816 | return -EINVAL; |
864 | 817 | ||
@@ -923,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
923 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 876 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
924 | struct sdma_engine *sdma = sdmac->sdma; | 877 | struct sdma_engine *sdma = sdmac->sdma; |
925 | int ret, i, count; | 878 | int ret, i, count; |
926 | int channel = chan->chan_id; | 879 | int channel = sdmac->channel; |
927 | struct scatterlist *sg; | 880 | struct scatterlist *sg; |
928 | 881 | ||
929 | if (sdmac->status == DMA_IN_PROGRESS) | 882 | if (sdmac->status == DMA_IN_PROGRESS) |
@@ -951,7 +904,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
951 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | 904 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; |
952 | int param; | 905 | int param; |
953 | 906 | ||
954 | bd->buffer_addr = sgl->dma_address; | 907 | bd->buffer_addr = sg->dma_address; |
955 | 908 | ||
956 | count = sg->length; | 909 | count = sg->length; |
957 | 910 | ||
@@ -968,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
968 | ret = -EINVAL; | 921 | ret = -EINVAL; |
969 | goto err_out; | 922 | goto err_out; |
970 | } | 923 | } |
971 | if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) | 924 | |
925 | switch (sdmac->word_size) { | ||
926 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
972 | bd->mode.command = 0; | 927 | bd->mode.command = 0; |
973 | else | 928 | if (count & 3 || sg->dma_address & 3) |
974 | bd->mode.command = sdmac->word_size; | 929 | return NULL; |
930 | break; | ||
931 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
932 | bd->mode.command = 2; | ||
933 | if (count & 1 || sg->dma_address & 1) | ||
934 | return NULL; | ||
935 | break; | ||
936 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
937 | bd->mode.command = 1; | ||
938 | break; | ||
939 | default: | ||
940 | return NULL; | ||
941 | } | ||
975 | 942 | ||
976 | param = BD_DONE | BD_EXTD | BD_CONT; | 943 | param = BD_DONE | BD_EXTD | BD_CONT; |
977 | 944 | ||
978 | if (sdmac->flags & IMX_DMA_SG_LOOP) { | 945 | if (i + 1 == sg_len) { |
979 | param |= BD_INTR; | 946 | param |= BD_INTR; |
980 | if (i + 1 == sg_len) | 947 | param |= BD_LAST; |
981 | param |= BD_WRAP; | 948 | param &= ~BD_CONT; |
982 | } | 949 | } |
983 | 950 | ||
984 | if (i + 1 == sg_len) | ||
985 | param |= BD_INTR; | ||
986 | |||
987 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | 951 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", |
988 | i, count, sg->dma_address, | 952 | i, count, sg->dma_address, |
989 | param & BD_WRAP ? "wrap" : "", | 953 | param & BD_WRAP ? "wrap" : "", |
@@ -997,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
997 | 961 | ||
998 | return &sdmac->desc; | 962 | return &sdmac->desc; |
999 | err_out: | 963 | err_out: |
964 | sdmac->status = DMA_ERROR; | ||
1000 | return NULL; | 965 | return NULL; |
1001 | } | 966 | } |
1002 | 967 | ||
@@ -1007,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
1007 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 972 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1008 | struct sdma_engine *sdma = sdmac->sdma; | 973 | struct sdma_engine *sdma = sdmac->sdma; |
1009 | int num_periods = buf_len / period_len; | 974 | int num_periods = buf_len / period_len; |
1010 | int channel = chan->chan_id; | 975 | int channel = sdmac->channel; |
1011 | int ret, i = 0, buf = 0; | 976 | int ret, i = 0, buf = 0; |
1012 | 977 | ||
1013 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); | 978 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); |
@@ -1110,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1110 | { | 1075 | { |
1111 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1076 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1112 | dma_cookie_t last_used; | 1077 | dma_cookie_t last_used; |
1113 | enum dma_status ret; | ||
1114 | 1078 | ||
1115 | last_used = chan->cookie; | 1079 | last_used = chan->cookie; |
1116 | 1080 | ||
1117 | ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used); | ||
1118 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); | 1081 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); |
1119 | 1082 | ||
1120 | return ret; | 1083 | return sdmac->status; |
1121 | } | 1084 | } |
1122 | 1085 | ||
1123 | static void sdma_issue_pending(struct dma_chan *chan) | 1086 | static void sdma_issue_pending(struct dma_chan *chan) |
@@ -1127,8 +1090,74 @@ static void sdma_issue_pending(struct dma_chan *chan) | |||
1127 | */ | 1090 | */ |
1128 | } | 1091 | } |
1129 | 1092 | ||
1130 | static int __init sdma_init(struct sdma_engine *sdma, | 1093 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 |
1131 | void *ram_code, int ram_code_size) | 1094 | |
1095 | static void sdma_add_scripts(struct sdma_engine *sdma, | ||
1096 | const struct sdma_script_start_addrs *addr) | ||
1097 | { | ||
1098 | s32 *addr_arr = (u32 *)addr; | ||
1099 | s32 *saddr_arr = (u32 *)sdma->script_addrs; | ||
1100 | int i; | ||
1101 | |||
1102 | for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) | ||
1103 | if (addr_arr[i] > 0) | ||
1104 | saddr_arr[i] = addr_arr[i]; | ||
1105 | } | ||
1106 | |||
1107 | static int __init sdma_get_firmware(struct sdma_engine *sdma, | ||
1108 | const char *cpu_name, int to_version) | ||
1109 | { | ||
1110 | const struct firmware *fw; | ||
1111 | char *fwname; | ||
1112 | const struct sdma_firmware_header *header; | ||
1113 | int ret; | ||
1114 | const struct sdma_script_start_addrs *addr; | ||
1115 | unsigned short *ram_code; | ||
1116 | |||
1117 | fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", cpu_name, to_version); | ||
1118 | if (!fwname) | ||
1119 | return -ENOMEM; | ||
1120 | |||
1121 | ret = request_firmware(&fw, fwname, sdma->dev); | ||
1122 | if (ret) { | ||
1123 | kfree(fwname); | ||
1124 | return ret; | ||
1125 | } | ||
1126 | kfree(fwname); | ||
1127 | |||
1128 | if (fw->size < sizeof(*header)) | ||
1129 | goto err_firmware; | ||
1130 | |||
1131 | header = (struct sdma_firmware_header *)fw->data; | ||
1132 | |||
1133 | if (header->magic != SDMA_FIRMWARE_MAGIC) | ||
1134 | goto err_firmware; | ||
1135 | if (header->ram_code_start + header->ram_code_size > fw->size) | ||
1136 | goto err_firmware; | ||
1137 | |||
1138 | addr = (void *)header + header->script_addrs_start; | ||
1139 | ram_code = (void *)header + header->ram_code_start; | ||
1140 | |||
1141 | clk_enable(sdma->clk); | ||
1142 | /* download the RAM image for SDMA */ | ||
1143 | sdma_load_script(sdma, ram_code, | ||
1144 | header->ram_code_size, | ||
1145 | addr->ram_code_start_addr); | ||
1146 | clk_disable(sdma->clk); | ||
1147 | |||
1148 | sdma_add_scripts(sdma, addr); | ||
1149 | |||
1150 | dev_info(sdma->dev, "loaded firmware %d.%d\n", | ||
1151 | header->version_major, | ||
1152 | header->version_minor); | ||
1153 | |||
1154 | err_firmware: | ||
1155 | release_firmware(fw); | ||
1156 | |||
1157 | return ret; | ||
1158 | } | ||
1159 | |||
1160 | static int __init sdma_init(struct sdma_engine *sdma) | ||
1132 | { | 1161 | { |
1133 | int i, ret; | 1162 | int i, ret; |
1134 | dma_addr_t ccb_phys; | 1163 | dma_addr_t ccb_phys; |
@@ -1192,11 +1221,6 @@ static int __init sdma_init(struct sdma_engine *sdma, | |||
1192 | 1221 | ||
1193 | __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1222 | __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1194 | 1223 | ||
1195 | /* download the RAM image for SDMA */ | ||
1196 | sdma_load_script(sdma, ram_code, | ||
1197 | ram_code_size, | ||
1198 | sdma->script_addrs->ram_code_start_addr); | ||
1199 | |||
1200 | /* Set bits of CONFIG register with given context switching mode */ | 1224 | /* Set bits of CONFIG register with given context switching mode */ |
1201 | __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | 1225 | __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); |
1202 | 1226 | ||
@@ -1216,16 +1240,10 @@ err_dma_alloc: | |||
1216 | static int __init sdma_probe(struct platform_device *pdev) | 1240 | static int __init sdma_probe(struct platform_device *pdev) |
1217 | { | 1241 | { |
1218 | int ret; | 1242 | int ret; |
1219 | const struct firmware *fw; | ||
1220 | const struct sdma_firmware_header *header; | ||
1221 | const struct sdma_script_start_addrs *addr; | ||
1222 | int irq; | 1243 | int irq; |
1223 | unsigned short *ram_code; | ||
1224 | struct resource *iores; | 1244 | struct resource *iores; |
1225 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | 1245 | struct sdma_platform_data *pdata = pdev->dev.platform_data; |
1226 | char *fwname; | ||
1227 | int i; | 1246 | int i; |
1228 | dma_cap_mask_t mask; | ||
1229 | struct sdma_engine *sdma; | 1247 | struct sdma_engine *sdma; |
1230 | 1248 | ||
1231 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | 1249 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); |
@@ -1262,41 +1280,15 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1262 | if (ret) | 1280 | if (ret) |
1263 | goto err_request_irq; | 1281 | goto err_request_irq; |
1264 | 1282 | ||
1265 | fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", | 1283 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); |
1266 | pdata->cpu_name, pdata->to_version); | ||
1267 | if (!fwname) { | ||
1268 | ret = -ENOMEM; | ||
1269 | goto err_cputype; | ||
1270 | } | ||
1271 | |||
1272 | ret = request_firmware(&fw, fwname, &pdev->dev); | ||
1273 | if (ret) { | ||
1274 | dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n", | ||
1275 | fwname, ret); | ||
1276 | kfree(fwname); | ||
1277 | goto err_cputype; | ||
1278 | } | ||
1279 | kfree(fwname); | ||
1280 | |||
1281 | if (fw->size < sizeof(*header)) | ||
1282 | goto err_firmware; | ||
1283 | |||
1284 | header = (struct sdma_firmware_header *)fw->data; | ||
1285 | |||
1286 | if (header->magic != SDMA_FIRMWARE_MAGIC) | ||
1287 | goto err_firmware; | ||
1288 | if (header->ram_code_start + header->ram_code_size > fw->size) | ||
1289 | goto err_firmware; | ||
1290 | |||
1291 | addr = (void *)header + header->script_addrs_start; | ||
1292 | ram_code = (void *)header + header->ram_code_start; | ||
1293 | sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL); | ||
1294 | if (!sdma->script_addrs) | 1284 | if (!sdma->script_addrs) |
1295 | goto err_firmware; | 1285 | goto err_alloc; |
1296 | memcpy(sdma->script_addrs, addr, sizeof(*addr)); | ||
1297 | 1286 | ||
1298 | sdma->version = pdata->sdma_version; | 1287 | sdma->version = pdata->sdma_version; |
1299 | 1288 | ||
1289 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1290 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1291 | |||
1300 | INIT_LIST_HEAD(&sdma->dma_device.channels); | 1292 | INIT_LIST_HEAD(&sdma->dma_device.channels); |
1301 | /* Initialize channel parameters */ | 1293 | /* Initialize channel parameters */ |
1302 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 1294 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
@@ -1305,21 +1297,28 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1305 | sdmac->sdma = sdma; | 1297 | sdmac->sdma = sdma; |
1306 | spin_lock_init(&sdmac->lock); | 1298 | spin_lock_init(&sdmac->lock); |
1307 | 1299 | ||
1308 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1309 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1310 | |||
1311 | sdmac->chan.device = &sdma->dma_device; | 1300 | sdmac->chan.device = &sdma->dma_device; |
1312 | sdmac->chan.chan_id = i; | ||
1313 | sdmac->channel = i; | 1301 | sdmac->channel = i; |
1314 | 1302 | ||
1315 | /* Add the channel to the DMAC list */ | 1303 | /* |
1316 | list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); | 1304 | * Add the channel to the DMAC list. Do not add channel 0 though |
1305 | * because we need it internally in the SDMA driver. This also means | ||
1306 | * that channel 0 in dmaengine counting matches sdma channel 1. | ||
1307 | */ | ||
1308 | if (i) | ||
1309 | list_add_tail(&sdmac->chan.device_node, | ||
1310 | &sdma->dma_device.channels); | ||
1317 | } | 1311 | } |
1318 | 1312 | ||
1319 | ret = sdma_init(sdma, ram_code, header->ram_code_size); | 1313 | ret = sdma_init(sdma); |
1320 | if (ret) | 1314 | if (ret) |
1321 | goto err_init; | 1315 | goto err_init; |
1322 | 1316 | ||
1317 | if (pdata->script_addrs) | ||
1318 | sdma_add_scripts(sdma, pdata->script_addrs); | ||
1319 | |||
1320 | sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version); | ||
1321 | |||
1323 | sdma->dma_device.dev = &pdev->dev; | 1322 | sdma->dma_device.dev = &pdev->dev; |
1324 | 1323 | ||
1325 | sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; | 1324 | sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; |
@@ -1329,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1329 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 1328 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
1330 | sdma->dma_device.device_control = sdma_control; | 1329 | sdma->dma_device.device_control = sdma_control; |
1331 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 1330 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
1331 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | ||
1332 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | ||
1332 | 1333 | ||
1333 | ret = dma_async_device_register(&sdma->dma_device); | 1334 | ret = dma_async_device_register(&sdma->dma_device); |
1334 | if (ret) { | 1335 | if (ret) { |
@@ -1336,26 +1337,13 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1336 | goto err_init; | 1337 | goto err_init; |
1337 | } | 1338 | } |
1338 | 1339 | ||
1339 | dev_info(&pdev->dev, "initialized (firmware %d.%d)\n", | 1340 | dev_info(sdma->dev, "initialized\n"); |
1340 | header->version_major, | ||
1341 | header->version_minor); | ||
1342 | |||
1343 | /* request channel 0. This is an internal control channel | ||
1344 | * to the SDMA engine and not available to clients. | ||
1345 | */ | ||
1346 | dma_cap_zero(mask); | ||
1347 | dma_cap_set(DMA_SLAVE, mask); | ||
1348 | dma_request_channel(mask, NULL, NULL); | ||
1349 | |||
1350 | release_firmware(fw); | ||
1351 | 1341 | ||
1352 | return 0; | 1342 | return 0; |
1353 | 1343 | ||
1354 | err_init: | 1344 | err_init: |
1355 | kfree(sdma->script_addrs); | 1345 | kfree(sdma->script_addrs); |
1356 | err_firmware: | 1346 | err_alloc: |
1357 | release_firmware(fw); | ||
1358 | err_cputype: | ||
1359 | free_irq(irq, sdma); | 1347 | free_irq(irq, sdma); |
1360 | err_request_irq: | 1348 | err_request_irq: |
1361 | iounmap(sdma->regs); | 1349 | iounmap(sdma->regs); |
@@ -1366,7 +1354,7 @@ err_clk: | |||
1366 | err_request_region: | 1354 | err_request_region: |
1367 | err_irq: | 1355 | err_irq: |
1368 | kfree(sdma); | 1356 | kfree(sdma); |
1369 | return 0; | 1357 | return ret; |
1370 | } | 1358 | } |
1371 | 1359 | ||
1372 | static int __exit sdma_remove(struct platform_device *pdev) | 1360 | static int __exit sdma_remove(struct platform_device *pdev) |
@@ -1385,7 +1373,7 @@ static int __init sdma_module_init(void) | |||
1385 | { | 1373 | { |
1386 | return platform_driver_probe(&sdma_driver, sdma_probe); | 1374 | return platform_driver_probe(&sdma_driver, sdma_probe); |
1387 | } | 1375 | } |
1388 | subsys_initcall(sdma_module_init); | 1376 | module_init(sdma_module_init); |
1389 | 1377 | ||
1390 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | 1378 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); |
1391 | MODULE_DESCRIPTION("i.MX SDMA driver"); | 1379 | MODULE_DESCRIPTION("i.MX SDMA driver"); |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 338bc4eed1f..798f46a4590 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
664 | /*calculate CTL_LO*/ | 664 | /*calculate CTL_LO*/ |
665 | ctl_lo.ctl_lo = 0; | 665 | ctl_lo.ctl_lo = 0; |
666 | ctl_lo.ctlx.int_en = 1; | 666 | ctl_lo.ctlx.int_en = 1; |
667 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width; | ||
668 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width; | ||
669 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; | 667 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; |
670 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; | 668 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; |
671 | 669 | ||
670 | /* | ||
671 | * Here we need some translation from "enum dma_slave_buswidth" | ||
672 | * to the format for our dma controller | ||
673 | * standard intel_mid_dmac's format | ||
674 | * 1 Byte 0b000 | ||
675 | * 2 Bytes 0b001 | ||
676 | * 4 Bytes 0b010 | ||
677 | */ | ||
678 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; | ||
679 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; | ||
680 | |||
672 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | 681 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { |
673 | ctl_lo.ctlx.tt_fc = 0; | 682 | ctl_lo.ctlx.tt_fc = 0; |
674 | ctl_lo.ctlx.sinc = 0; | 683 | ctl_lo.ctlx.sinc = 0; |
@@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
746 | BUG_ON(!mids); | 755 | BUG_ON(!mids); |
747 | 756 | ||
748 | if (!midc->dma->pimr_mask) { | 757 | if (!midc->dma->pimr_mask) { |
749 | pr_debug("MDMA: SG list is not supported by this controller\n"); | 758 | /* We can still handle sg list with only one item */ |
750 | return NULL; | 759 | if (sg_len == 1) { |
760 | txd = intel_mid_dma_prep_memcpy(chan, | ||
761 | mids->dma_slave.dst_addr, | ||
762 | mids->dma_slave.src_addr, | ||
763 | sgl->length, | ||
764 | flags); | ||
765 | return txd; | ||
766 | } else { | ||
767 | pr_warn("MDMA: SG list is not supported by this controller\n"); | ||
768 | return NULL; | ||
769 | } | ||
751 | } | 770 | } |
752 | 771 | ||
753 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | 772 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", |
@@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
758 | pr_err("MDMA: Prep memcpy failed\n"); | 777 | pr_err("MDMA: Prep memcpy failed\n"); |
759 | return NULL; | 778 | return NULL; |
760 | } | 779 | } |
780 | |||
761 | desc = to_intel_mid_dma_desc(txd); | 781 | desc = to_intel_mid_dma_desc(txd); |
762 | desc->dirn = direction; | 782 | desc->dirn = direction; |
763 | ctl_lo.ctl_lo = desc->ctl_lo; | 783 | ctl_lo.ctl_lo = desc->ctl_lo; |
@@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
1021 | 1041 | ||
1022 | /*DMA Interrupt*/ | 1042 | /*DMA Interrupt*/ |
1023 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | 1043 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); |
1024 | if (!mid) { | ||
1025 | pr_err("ERR_MDMA:null pointer mid\n"); | ||
1026 | return -EINVAL; | ||
1027 | } | ||
1028 | |||
1029 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); | 1044 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); |
1030 | tfr_status &= mid->intr_mask; | 1045 | tfr_status &= mid->intr_mask; |
1031 | if (tfr_status) { | 1046 | if (tfr_status) { |
@@ -1060,8 +1075,8 @@ static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) | |||
1060 | * mid_setup_dma - Setup the DMA controller | 1075 | * mid_setup_dma - Setup the DMA controller |
1061 | * @pdev: Controller PCI device structure | 1076 | * @pdev: Controller PCI device structure |
1062 | * | 1077 | * |
1063 | * Initilize the DMA controller, channels, registers with DMA engine, | 1078 | * Initialize the DMA controller, channels, registers with DMA engine, |
1064 | * ISR. Initilize DMA controller channels. | 1079 | * ISR. Initialize DMA controller channels. |
1065 | */ | 1080 | */ |
1066 | static int mid_setup_dma(struct pci_dev *pdev) | 1081 | static int mid_setup_dma(struct pci_dev *pdev) |
1067 | { | 1082 | { |
@@ -1075,7 +1090,6 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1075 | if (NULL == dma->dma_pool) { | 1090 | if (NULL == dma->dma_pool) { |
1076 | pr_err("ERR_MDMA:pci_pool_create failed\n"); | 1091 | pr_err("ERR_MDMA:pci_pool_create failed\n"); |
1077 | err = -ENOMEM; | 1092 | err = -ENOMEM; |
1078 | kfree(dma); | ||
1079 | goto err_dma_pool; | 1093 | goto err_dma_pool; |
1080 | } | 1094 | } |
1081 | 1095 | ||
@@ -1186,7 +1200,6 @@ err_engine: | |||
1186 | free_irq(pdev->irq, dma); | 1200 | free_irq(pdev->irq, dma); |
1187 | err_irq: | 1201 | err_irq: |
1188 | pci_pool_destroy(dma->dma_pool); | 1202 | pci_pool_destroy(dma->dma_pool); |
1189 | kfree(dma); | ||
1190 | err_dma_pool: | 1203 | err_dma_pool: |
1191 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | 1204 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); |
1192 | return err; | 1205 | return err; |
@@ -1219,7 +1232,7 @@ static void middma_shutdown(struct pci_dev *pdev) | |||
1219 | * @pdev: Controller PCI device structure | 1232 | * @pdev: Controller PCI device structure |
1220 | * @id: pci device id structure | 1233 | * @id: pci device id structure |
1221 | * | 1234 | * |
1222 | * Initilize the PCI device, map BARs, query driver data. | 1235 | * Initialize the PCI device, map BARs, query driver data. |
1223 | * Call setup_dma to complete contoller and chan initilzation | 1236 | * Call setup_dma to complete contoller and chan initilzation |
1224 | */ | 1237 | */ |
1225 | static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, | 1238 | static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, |
@@ -1413,7 +1426,7 @@ static const struct dev_pm_ops intel_mid_dma_pm = { | |||
1413 | .runtime_idle = dma_runtime_idle, | 1426 | .runtime_idle = dma_runtime_idle, |
1414 | }; | 1427 | }; |
1415 | 1428 | ||
1416 | static struct pci_driver intel_mid_dma_pci = { | 1429 | static struct pci_driver intel_mid_dma_pci_driver = { |
1417 | .name = "Intel MID DMA", | 1430 | .name = "Intel MID DMA", |
1418 | .id_table = intel_mid_dma_ids, | 1431 | .id_table = intel_mid_dma_ids, |
1419 | .probe = intel_mid_dma_probe, | 1432 | .probe = intel_mid_dma_probe, |
@@ -1431,13 +1444,13 @@ static int __init intel_mid_dma_init(void) | |||
1431 | { | 1444 | { |
1432 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", | 1445 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", |
1433 | INTEL_MID_DMA_DRIVER_VERSION); | 1446 | INTEL_MID_DMA_DRIVER_VERSION); |
1434 | return pci_register_driver(&intel_mid_dma_pci); | 1447 | return pci_register_driver(&intel_mid_dma_pci_driver); |
1435 | } | 1448 | } |
1436 | fs_initcall(intel_mid_dma_init); | 1449 | fs_initcall(intel_mid_dma_init); |
1437 | 1450 | ||
1438 | static void __exit intel_mid_dma_exit(void) | 1451 | static void __exit intel_mid_dma_exit(void) |
1439 | { | 1452 | { |
1440 | pci_unregister_driver(&intel_mid_dma_pci); | 1453 | pci_unregister_driver(&intel_mid_dma_pci_driver); |
1441 | } | 1454 | } |
1442 | module_exit(intel_mid_dma_exit); | 1455 | module_exit(intel_mid_dma_exit); |
1443 | 1456 | ||
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 8997d3fb905..0ff7270af25 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
2 | ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o | 2 | ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 161c452923b..c6b01f535b2 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -1261,7 +1261,7 @@ out: | |||
1261 | return err; | 1261 | return err; |
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | #ifdef CONFIG_MD_RAID6_PQ | 1264 | #ifdef CONFIG_RAID6_PQ |
1265 | static int __devinit | 1265 | static int __devinit |
1266 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | 1266 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) |
1267 | { | 1267 | { |
@@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1584 | 1584 | ||
1585 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && | 1585 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && |
1586 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { | 1586 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { |
1587 | #ifdef CONFIG_MD_RAID6_PQ | 1587 | #ifdef CONFIG_RAID6_PQ |
1588 | ret = iop_adma_pq_zero_sum_self_test(adev); | 1588 | ret = iop_adma_pq_zero_sum_self_test(adev); |
1589 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); | 1589 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); |
1590 | #else | 1590 | #else |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb26ee9773d..c1a125e7d1d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, | |||
1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | 1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); |
1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); | 1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); |
1147 | 1147 | ||
1148 | /* | ||
1149 | * Problem (observed with channel DMAIC_7): after enabling the channel | ||
1150 | * and initialising buffers, there comes an interrupt with current still | ||
1151 | * pointing at buffer 0, whereas it should use buffer 0 first and only | ||
1152 | * generate an interrupt when it is done, then current should already | ||
1153 | * point to buffer 1. This spurious interrupt also comes on channel | ||
1154 | * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the | ||
1155 | * first interrupt, there comes the second with current correctly | ||
1156 | * pointing to buffer 1 this time. But sometimes this second interrupt | ||
1157 | * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling | ||
1158 | * the channel seems to prevent the channel from hanging, but it doesn't | ||
1159 | * prevent the spurious interrupt. This might also be unsafe. Think | ||
1160 | * about the IDMAC controller trying to switch to a buffer, when we | ||
1161 | * clear the ready bit, and re-enable it a moment later. | ||
1162 | */ | ||
1163 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); | ||
1164 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); | ||
1165 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); | ||
1166 | |||
1167 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); | ||
1168 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); | ||
1169 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); | ||
1170 | |||
1171 | spin_unlock_irqrestore(&ipu->lock, flags); | 1148 | spin_unlock_irqrestore(&ipu->lock, flags); |
1172 | 1149 | ||
1173 | return 0; | 1150 | return 0; |
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1246 | 1223 | ||
1247 | /* Other interrupts do not interfere with this channel */ | 1224 | /* Other interrupts do not interfere with this channel */ |
1248 | spin_lock(&ichan->lock); | 1225 | spin_lock(&ichan->lock); |
1249 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && | ||
1250 | ((curbuf >> chan_id) & 1) == ichan->active_buffer && | ||
1251 | !list_is_last(ichan->queue.next, &ichan->queue))) { | ||
1252 | int i = 100; | ||
1253 | |||
1254 | /* This doesn't help. See comment in ipu_disable_channel() */ | ||
1255 | while (--i) { | ||
1256 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | ||
1257 | if (((curbuf >> chan_id) & 1) != ichan->active_buffer) | ||
1258 | break; | ||
1259 | cpu_relax(); | ||
1260 | } | ||
1261 | |||
1262 | if (!i) { | ||
1263 | spin_unlock(&ichan->lock); | ||
1264 | dev_dbg(dev, | ||
1265 | "IRQ on active buffer on channel %x, active " | ||
1266 | "%d, ready %x, %x, current %x!\n", chan_id, | ||
1267 | ichan->active_buffer, ready0, ready1, curbuf); | ||
1268 | return IRQ_NONE; | ||
1269 | } else | ||
1270 | dev_dbg(dev, | ||
1271 | "Buffer deactivated on channel %x, active " | ||
1272 | "%d, ready %x, %x, current %x, rest %d!\n", chan_id, | ||
1273 | ichan->active_buffer, ready0, ready1, curbuf, i); | ||
1274 | } | ||
1275 | |||
1276 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || | 1226 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || |
1277 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) | 1227 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) |
1278 | )) { | 1228 | )) { |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 4e9cbf30059..59c270192cc 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | 2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. |
3 | * Copyright (C) Semihalf 2009 | 3 | * Copyright (C) Semihalf 2009 |
4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | ||
4 | * | 5 | * |
5 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | 6 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description |
6 | * (defines, structures and comments) was taken from MPC5121 DMA driver | 7 | * (defines, structures and comments) was taken from MPC5121 DMA driver |
@@ -70,6 +71,8 @@ | |||
70 | #define MPC_DMA_DMAES_SBE (1 << 1) | 71 | #define MPC_DMA_DMAES_SBE (1 << 1) |
71 | #define MPC_DMA_DMAES_DBE (1 << 0) | 72 | #define MPC_DMA_DMAES_DBE (1 << 0) |
72 | 73 | ||
74 | #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) | ||
75 | |||
73 | #define MPC_DMA_TSIZE_1 0x00 | 76 | #define MPC_DMA_TSIZE_1 0x00 |
74 | #define MPC_DMA_TSIZE_2 0x01 | 77 | #define MPC_DMA_TSIZE_2 0x01 |
75 | #define MPC_DMA_TSIZE_4 0x02 | 78 | #define MPC_DMA_TSIZE_4 0x02 |
@@ -104,7 +107,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs { | |||
104 | /* 0x30 */ | 107 | /* 0x30 */ |
105 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ | 108 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ |
106 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ | 109 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ |
107 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ | 110 | union { |
111 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ | ||
112 | u32 dmagpor; /* (General purpose register on MPC8308) */ | ||
113 | }; | ||
108 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ | 114 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ |
109 | /* 0x40 ~ 0xff */ | 115 | /* 0x40 ~ 0xff */ |
110 | u32 reserve0[48]; /* Reserved */ | 116 | u32 reserve0[48]; /* Reserved */ |
@@ -195,7 +201,9 @@ struct mpc_dma { | |||
195 | struct mpc_dma_regs __iomem *regs; | 201 | struct mpc_dma_regs __iomem *regs; |
196 | struct mpc_dma_tcd __iomem *tcd; | 202 | struct mpc_dma_tcd __iomem *tcd; |
197 | int irq; | 203 | int irq; |
204 | int irq2; | ||
198 | uint error_status; | 205 | uint error_status; |
206 | int is_mpc8308; | ||
199 | 207 | ||
200 | /* Lock for error_status field in this structure */ | 208 | /* Lock for error_status field in this structure */ |
201 | spinlock_t error_status_lock; | 209 | spinlock_t error_status_lock; |
@@ -252,11 +260,13 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan) | |||
252 | prev = mdesc; | 260 | prev = mdesc; |
253 | } | 261 | } |
254 | 262 | ||
255 | prev->tcd->start = 0; | ||
256 | prev->tcd->int_maj = 1; | 263 | prev->tcd->int_maj = 1; |
257 | 264 | ||
258 | /* Send first descriptor in chain into hardware */ | 265 | /* Send first descriptor in chain into hardware */ |
259 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); | 266 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); |
267 | |||
268 | if (first != prev) | ||
269 | mdma->tcd[cid].e_sg = 1; | ||
260 | out_8(&mdma->regs->dmassrt, cid); | 270 | out_8(&mdma->regs->dmassrt, cid); |
261 | } | 271 | } |
262 | 272 | ||
@@ -274,6 +284,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) | |||
274 | 284 | ||
275 | spin_lock(&mchan->lock); | 285 | spin_lock(&mchan->lock); |
276 | 286 | ||
287 | out_8(&mdma->regs->dmacint, ch + off); | ||
288 | out_8(&mdma->regs->dmacerr, ch + off); | ||
289 | |||
277 | /* Check error status */ | 290 | /* Check error status */ |
278 | if (es & (1 << ch)) | 291 | if (es & (1 << ch)) |
279 | list_for_each_entry(mdesc, &mchan->active, node) | 292 | list_for_each_entry(mdesc, &mchan->active, node) |
@@ -302,36 +315,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data) | |||
302 | spin_unlock(&mdma->error_status_lock); | 315 | spin_unlock(&mdma->error_status_lock); |
303 | 316 | ||
304 | /* Handle interrupt on each channel */ | 317 | /* Handle interrupt on each channel */ |
305 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), | 318 | if (mdma->dma.chancnt > 32) { |
319 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), | ||
306 | in_be32(&mdma->regs->dmaerrh), 32); | 320 | in_be32(&mdma->regs->dmaerrh), 32); |
321 | } | ||
307 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), | 322 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), |
308 | in_be32(&mdma->regs->dmaerrl), 0); | 323 | in_be32(&mdma->regs->dmaerrl), 0); |
309 | 324 | ||
310 | /* Ack interrupt on all channels */ | ||
311 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | ||
312 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | ||
313 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | ||
314 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | ||
315 | |||
316 | /* Schedule tasklet */ | 325 | /* Schedule tasklet */ |
317 | tasklet_schedule(&mdma->tasklet); | 326 | tasklet_schedule(&mdma->tasklet); |
318 | 327 | ||
319 | return IRQ_HANDLED; | 328 | return IRQ_HANDLED; |
320 | } | 329 | } |
321 | 330 | ||
322 | /* DMA Tasklet */ | 331 | /* proccess completed descriptors */ |
323 | static void mpc_dma_tasklet(unsigned long data) | 332 | static void mpc_dma_process_completed(struct mpc_dma *mdma) |
324 | { | 333 | { |
325 | struct mpc_dma *mdma = (void *)data; | ||
326 | dma_cookie_t last_cookie = 0; | 334 | dma_cookie_t last_cookie = 0; |
327 | struct mpc_dma_chan *mchan; | 335 | struct mpc_dma_chan *mchan; |
328 | struct mpc_dma_desc *mdesc; | 336 | struct mpc_dma_desc *mdesc; |
329 | struct dma_async_tx_descriptor *desc; | 337 | struct dma_async_tx_descriptor *desc; |
330 | unsigned long flags; | 338 | unsigned long flags; |
331 | LIST_HEAD(list); | 339 | LIST_HEAD(list); |
332 | uint es; | ||
333 | int i; | 340 | int i; |
334 | 341 | ||
342 | for (i = 0; i < mdma->dma.chancnt; i++) { | ||
343 | mchan = &mdma->channels[i]; | ||
344 | |||
345 | /* Get all completed descriptors */ | ||
346 | spin_lock_irqsave(&mchan->lock, flags); | ||
347 | if (!list_empty(&mchan->completed)) | ||
348 | list_splice_tail_init(&mchan->completed, &list); | ||
349 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
350 | |||
351 | if (list_empty(&list)) | ||
352 | continue; | ||
353 | |||
354 | /* Execute callbacks and run dependencies */ | ||
355 | list_for_each_entry(mdesc, &list, node) { | ||
356 | desc = &mdesc->desc; | ||
357 | |||
358 | if (desc->callback) | ||
359 | desc->callback(desc->callback_param); | ||
360 | |||
361 | last_cookie = desc->cookie; | ||
362 | dma_run_dependencies(desc); | ||
363 | } | ||
364 | |||
365 | /* Free descriptors */ | ||
366 | spin_lock_irqsave(&mchan->lock, flags); | ||
367 | list_splice_tail_init(&list, &mchan->free); | ||
368 | mchan->completed_cookie = last_cookie; | ||
369 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* DMA Tasklet */ | ||
374 | static void mpc_dma_tasklet(unsigned long data) | ||
375 | { | ||
376 | struct mpc_dma *mdma = (void *)data; | ||
377 | unsigned long flags; | ||
378 | uint es; | ||
379 | |||
335 | spin_lock_irqsave(&mdma->error_status_lock, flags); | 380 | spin_lock_irqsave(&mdma->error_status_lock, flags); |
336 | es = mdma->error_status; | 381 | es = mdma->error_status; |
337 | mdma->error_status = 0; | 382 | mdma->error_status = 0; |
@@ -370,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data) | |||
370 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); | 415 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); |
371 | } | 416 | } |
372 | 417 | ||
373 | for (i = 0; i < mdma->dma.chancnt; i++) { | 418 | mpc_dma_process_completed(mdma); |
374 | mchan = &mdma->channels[i]; | ||
375 | |||
376 | /* Get all completed descriptors */ | ||
377 | spin_lock_irqsave(&mchan->lock, flags); | ||
378 | if (!list_empty(&mchan->completed)) | ||
379 | list_splice_tail_init(&mchan->completed, &list); | ||
380 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
381 | |||
382 | if (list_empty(&list)) | ||
383 | continue; | ||
384 | |||
385 | /* Execute callbacks and run dependencies */ | ||
386 | list_for_each_entry(mdesc, &list, node) { | ||
387 | desc = &mdesc->desc; | ||
388 | |||
389 | if (desc->callback) | ||
390 | desc->callback(desc->callback_param); | ||
391 | |||
392 | last_cookie = desc->cookie; | ||
393 | dma_run_dependencies(desc); | ||
394 | } | ||
395 | |||
396 | /* Free descriptors */ | ||
397 | spin_lock_irqsave(&mchan->lock, flags); | ||
398 | list_splice_tail_init(&list, &mchan->free); | ||
399 | mchan->completed_cookie = last_cookie; | ||
400 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
401 | } | ||
402 | } | 419 | } |
403 | 420 | ||
404 | /* Submit descriptor to hardware */ | 421 | /* Submit descriptor to hardware */ |
@@ -563,6 +580,7 @@ static struct dma_async_tx_descriptor * | |||
563 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | 580 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
564 | size_t len, unsigned long flags) | 581 | size_t len, unsigned long flags) |
565 | { | 582 | { |
583 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | ||
566 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | 584 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
567 | struct mpc_dma_desc *mdesc = NULL; | 585 | struct mpc_dma_desc *mdesc = NULL; |
568 | struct mpc_dma_tcd *tcd; | 586 | struct mpc_dma_tcd *tcd; |
@@ -577,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
577 | } | 595 | } |
578 | spin_unlock_irqrestore(&mchan->lock, iflags); | 596 | spin_unlock_irqrestore(&mchan->lock, iflags); |
579 | 597 | ||
580 | if (!mdesc) | 598 | if (!mdesc) { |
599 | /* try to free completed descriptors */ | ||
600 | mpc_dma_process_completed(mdma); | ||
581 | return NULL; | 601 | return NULL; |
602 | } | ||
582 | 603 | ||
583 | mdesc->error = 0; | 604 | mdesc->error = 0; |
584 | tcd = mdesc->tcd; | 605 | tcd = mdesc->tcd; |
@@ -591,7 +612,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
591 | tcd->dsize = MPC_DMA_TSIZE_32; | 612 | tcd->dsize = MPC_DMA_TSIZE_32; |
592 | tcd->soff = 32; | 613 | tcd->soff = 32; |
593 | tcd->doff = 32; | 614 | tcd->doff = 32; |
594 | } else if (IS_ALIGNED(src | dst | len, 16)) { | 615 | } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { |
616 | /* MPC8308 doesn't support 16 byte transfers */ | ||
595 | tcd->ssize = MPC_DMA_TSIZE_16; | 617 | tcd->ssize = MPC_DMA_TSIZE_16; |
596 | tcd->dsize = MPC_DMA_TSIZE_16; | 618 | tcd->dsize = MPC_DMA_TSIZE_16; |
597 | tcd->soff = 16; | 619 | tcd->soff = 16; |
@@ -651,6 +673,15 @@ static int __devinit mpc_dma_probe(struct platform_device *op, | |||
651 | return -EINVAL; | 673 | return -EINVAL; |
652 | } | 674 | } |
653 | 675 | ||
676 | if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { | ||
677 | mdma->is_mpc8308 = 1; | ||
678 | mdma->irq2 = irq_of_parse_and_map(dn, 1); | ||
679 | if (mdma->irq2 == NO_IRQ) { | ||
680 | dev_err(dev, "Error mapping IRQ!\n"); | ||
681 | return -EINVAL; | ||
682 | } | ||
683 | } | ||
684 | |||
654 | retval = of_address_to_resource(dn, 0, &res); | 685 | retval = of_address_to_resource(dn, 0, &res); |
655 | if (retval) { | 686 | if (retval) { |
656 | dev_err(dev, "Error parsing memory region!\n"); | 687 | dev_err(dev, "Error parsing memory region!\n"); |
@@ -681,11 +712,23 @@ static int __devinit mpc_dma_probe(struct platform_device *op, | |||
681 | return -EINVAL; | 712 | return -EINVAL; |
682 | } | 713 | } |
683 | 714 | ||
715 | if (mdma->is_mpc8308) { | ||
716 | retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0, | ||
717 | DRV_NAME, mdma); | ||
718 | if (retval) { | ||
719 | dev_err(dev, "Error requesting IRQ2!\n"); | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | } | ||
723 | |||
684 | spin_lock_init(&mdma->error_status_lock); | 724 | spin_lock_init(&mdma->error_status_lock); |
685 | 725 | ||
686 | dma = &mdma->dma; | 726 | dma = &mdma->dma; |
687 | dma->dev = dev; | 727 | dma->dev = dev; |
688 | dma->chancnt = MPC_DMA_CHANNELS; | 728 | if (!mdma->is_mpc8308) |
729 | dma->chancnt = MPC_DMA_CHANNELS; | ||
730 | else | ||
731 | dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */ | ||
689 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; | 732 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; |
690 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; | 733 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; |
691 | dma->device_issue_pending = mpc_dma_issue_pending; | 734 | dma->device_issue_pending = mpc_dma_issue_pending; |
@@ -721,26 +764,40 @@ static int __devinit mpc_dma_probe(struct platform_device *op, | |||
721 | * - Round-robin group arbitration, | 764 | * - Round-robin group arbitration, |
722 | * - Round-robin channel arbitration. | 765 | * - Round-robin channel arbitration. |
723 | */ | 766 | */ |
724 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | | 767 | if (!mdma->is_mpc8308) { |
725 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | 768 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
726 | 769 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | |
727 | /* Disable hardware DMA requests */ | 770 | |
728 | out_be32(&mdma->regs->dmaerqh, 0); | 771 | /* Disable hardware DMA requests */ |
729 | out_be32(&mdma->regs->dmaerql, 0); | 772 | out_be32(&mdma->regs->dmaerqh, 0); |
730 | 773 | out_be32(&mdma->regs->dmaerql, 0); | |
731 | /* Disable error interrupts */ | 774 | |
732 | out_be32(&mdma->regs->dmaeeih, 0); | 775 | /* Disable error interrupts */ |
733 | out_be32(&mdma->regs->dmaeeil, 0); | 776 | out_be32(&mdma->regs->dmaeeih, 0); |
734 | 777 | out_be32(&mdma->regs->dmaeeil, 0); | |
735 | /* Clear interrupts status */ | 778 | |
736 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | 779 | /* Clear interrupts status */ |
737 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | 780 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); |
738 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | 781 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); |
739 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | 782 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); |
740 | 783 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | |
741 | /* Route interrupts to IPIC */ | 784 | |
742 | out_be32(&mdma->regs->dmaihsa, 0); | 785 | /* Route interrupts to IPIC */ |
743 | out_be32(&mdma->regs->dmailsa, 0); | 786 | out_be32(&mdma->regs->dmaihsa, 0); |
787 | out_be32(&mdma->regs->dmailsa, 0); | ||
788 | } else { | ||
789 | /* MPC8308 has 16 channels and lacks some registers */ | ||
790 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); | ||
791 | |||
792 | /* enable snooping */ | ||
793 | out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); | ||
794 | /* Disable error interrupts */ | ||
795 | out_be32(&mdma->regs->dmaeeil, 0); | ||
796 | |||
797 | /* Clear interrupts status */ | ||
798 | out_be32(&mdma->regs->dmaintl, 0xFFFF); | ||
799 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); | ||
800 | } | ||
744 | 801 | ||
745 | /* Register DMA engine */ | 802 | /* Register DMA engine */ |
746 | dev_set_drvdata(dev, mdma); | 803 | dev_set_drvdata(dev, mdma); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 411d5bf50fc..a25f5f61e0e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -449,7 +449,7 @@ mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |||
449 | static void mv_xor_tasklet(unsigned long data) | 449 | static void mv_xor_tasklet(unsigned long data) |
450 | { | 450 | { |
451 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | 451 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; |
452 | __mv_xor_slot_cleanup(chan); | 452 | mv_xor_slot_cleanup(chan); |
453 | } | 453 | } |
454 | 454 | ||
455 | static struct mv_xor_desc_slot * | 455 | static struct mv_xor_desc_slot * |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 92b679024fe..1c38418ae61 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Topcliff PCH DMA controller driver | 2 | * Topcliff PCH DMA controller driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -259,11 +260,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |||
259 | return; | 260 | return; |
260 | } | 261 | } |
261 | 262 | ||
262 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | ||
263 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | ||
264 | channel_writel(pd_chan, SIZE, desc->regs.size); | ||
265 | channel_writel(pd_chan, NEXT, desc->regs.next); | ||
266 | |||
267 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", | 263 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", |
268 | pd_chan->chan.chan_id, desc->regs.dev_addr); | 264 | pd_chan->chan.chan_id, desc->regs.dev_addr); |
269 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", | 265 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", |
@@ -273,10 +269,16 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |||
273 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", | 269 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", |
274 | pd_chan->chan.chan_id, desc->regs.next); | 270 | pd_chan->chan.chan_id, desc->regs.next); |
275 | 271 | ||
276 | if (list_empty(&desc->tx_list)) | 272 | if (list_empty(&desc->tx_list)) { |
273 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | ||
274 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | ||
275 | channel_writel(pd_chan, SIZE, desc->regs.size); | ||
276 | channel_writel(pd_chan, NEXT, desc->regs.next); | ||
277 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); | 277 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); |
278 | else | 278 | } else { |
279 | channel_writel(pd_chan, NEXT, desc->txd.phys); | ||
279 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); | 280 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
281 | } | ||
280 | 282 | ||
281 | val = dma_readl(pd, CTL2); | 283 | val = dma_readl(pd, CTL2); |
282 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); | 284 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); |
@@ -920,12 +922,19 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) | |||
920 | } | 922 | } |
921 | 923 | ||
922 | /* PCI Device ID of DMA device */ | 924 | /* PCI Device ID of DMA device */ |
923 | #define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 | 925 | #define PCI_VENDOR_ID_ROHM 0x10DB |
924 | #define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 | 926 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 |
927 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 | ||
928 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 | ||
929 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B | ||
930 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 | ||
925 | 931 | ||
926 | static const struct pci_device_id pch_dma_id_table[] = { | 932 | static const struct pci_device_id pch_dma_id_table[] = { |
927 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, | 933 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
928 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, | 934 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, |
935 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | ||
936 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ | ||
937 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ | ||
929 | { 0, }, | 938 | { 0, }, |
930 | }; | 939 | }; |
931 | 940 | ||
@@ -953,6 +962,7 @@ static void __exit pch_dma_exit(void) | |||
953 | module_init(pch_dma_init); | 962 | module_init(pch_dma_init); |
954 | module_exit(pch_dma_exit); | 963 | module_exit(pch_dma_exit); |
955 | 964 | ||
956 | MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); | 965 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " |
966 | "DMA controller driver"); | ||
957 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 967 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
958 | MODULE_LICENSE("GPL v2"); | 968 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 0d58a4a4487..cef584533ee 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -4449,9 +4449,8 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev, | |||
4449 | 4449 | ||
4450 | if (!request_mem_region(res.start, resource_size(&res), | 4450 | if (!request_mem_region(res.start, resource_size(&res), |
4451 | dev_driver_string(&ofdev->dev))) { | 4451 | dev_driver_string(&ofdev->dev))) { |
4452 | dev_err(&ofdev->dev, "failed to request memory region " | 4452 | dev_err(&ofdev->dev, "failed to request memory region %pR\n", |
4453 | "(0x%016llx-0x%016llx)\n", | 4453 | &res); |
4454 | (u64)res.start, (u64)res.end); | ||
4455 | initcode = PPC_ADMA_INIT_MEMREG; | 4454 | initcode = PPC_ADMA_INIT_MEMREG; |
4456 | ret = -EBUSY; | 4455 | ret = -EBUSY; |
4457 | goto out; | 4456 | goto out; |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index eb6b54dbb80..28720d3103c 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -27,7 +27,10 @@ | |||
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <linux/sh_dma.h> | 29 | #include <linux/sh_dma.h> |
30 | 30 | #include <linux/notifier.h> | |
31 | #include <linux/kdebug.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/rculist.h> | ||
31 | #include "shdma.h" | 34 | #include "shdma.h" |
32 | 35 | ||
33 | /* DMA descriptor control */ | 36 | /* DMA descriptor control */ |
@@ -43,6 +46,13 @@ enum sh_dmae_desc_status { | |||
43 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | 46 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
44 | #define LOG2_DEFAULT_XFER_SIZE 2 | 47 | #define LOG2_DEFAULT_XFER_SIZE 2 |
45 | 48 | ||
49 | /* | ||
50 | * Used for write-side mutual exclusion for the global device list, | ||
51 | * read-side synchronization by way of RCU. | ||
52 | */ | ||
53 | static DEFINE_SPINLOCK(sh_dmae_lock); | ||
54 | static LIST_HEAD(sh_dmae_devices); | ||
55 | |||
46 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | 56 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ |
47 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | 57 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; |
48 | 58 | ||
@@ -817,10 +827,9 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
817 | return ret; | 827 | return ret; |
818 | } | 828 | } |
819 | 829 | ||
820 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 830 | static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev) |
821 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
822 | { | 831 | { |
823 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 832 | unsigned int handled = 0; |
824 | int i; | 833 | int i; |
825 | 834 | ||
826 | /* halt the dma controller */ | 835 | /* halt the dma controller */ |
@@ -829,25 +838,35 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
829 | /* We cannot detect, which channel caused the error, have to reset all */ | 838 | /* We cannot detect, which channel caused the error, have to reset all */ |
830 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | 839 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
831 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 840 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
832 | if (sh_chan) { | 841 | struct sh_desc *desc; |
833 | struct sh_desc *desc; | 842 | |
834 | /* Stop the channel */ | 843 | if (!sh_chan) |
835 | dmae_halt(sh_chan); | 844 | continue; |
836 | /* Complete all */ | 845 | |
837 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 846 | /* Stop the channel */ |
838 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 847 | dmae_halt(sh_chan); |
839 | desc->mark = DESC_IDLE; | 848 | |
840 | if (tx->callback) | 849 | /* Complete all */ |
841 | tx->callback(tx->callback_param); | 850 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
842 | } | 851 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
843 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | 852 | desc->mark = DESC_IDLE; |
853 | if (tx->callback) | ||
854 | tx->callback(tx->callback_param); | ||
844 | } | 855 | } |
856 | |||
857 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | ||
858 | handled++; | ||
845 | } | 859 | } |
860 | |||
846 | sh_dmae_rst(shdev); | 861 | sh_dmae_rst(shdev); |
847 | 862 | ||
848 | return IRQ_HANDLED; | 863 | return !!handled; |
864 | } | ||
865 | |||
866 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
867 | { | ||
868 | return IRQ_RETVAL(sh_dmae_reset(data)); | ||
849 | } | 869 | } |
850 | #endif | ||
851 | 870 | ||
852 | static void dmae_do_tasklet(unsigned long data) | 871 | static void dmae_do_tasklet(unsigned long data) |
853 | { | 872 | { |
@@ -876,6 +895,60 @@ static void dmae_do_tasklet(unsigned long data) | |||
876 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 895 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
877 | } | 896 | } |
878 | 897 | ||
898 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | ||
899 | { | ||
900 | unsigned int handled; | ||
901 | |||
902 | /* Fast path out if NMIF is not asserted for this controller */ | ||
903 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | ||
904 | return false; | ||
905 | |||
906 | handled = sh_dmae_reset(shdev); | ||
907 | if (handled) | ||
908 | return true; | ||
909 | |||
910 | return false; | ||
911 | } | ||
912 | |||
913 | static int sh_dmae_nmi_handler(struct notifier_block *self, | ||
914 | unsigned long cmd, void *data) | ||
915 | { | ||
916 | struct sh_dmae_device *shdev; | ||
917 | int ret = NOTIFY_DONE; | ||
918 | bool triggered; | ||
919 | |||
920 | /* | ||
921 | * Only concern ourselves with NMI events. | ||
922 | * | ||
923 | * Normally we would check the die chain value, but as this needs | ||
924 | * to be architecture independent, check for NMI context instead. | ||
925 | */ | ||
926 | if (!in_nmi()) | ||
927 | return NOTIFY_DONE; | ||
928 | |||
929 | rcu_read_lock(); | ||
930 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | ||
931 | /* | ||
932 | * Only stop if one of the controllers has NMIF asserted, | ||
933 | * we do not want to interfere with regular address error | ||
934 | * handling or NMI events that don't concern the DMACs. | ||
935 | */ | ||
936 | triggered = sh_dmae_nmi_notify(shdev); | ||
937 | if (triggered == true) | ||
938 | ret = NOTIFY_OK; | ||
939 | } | ||
940 | rcu_read_unlock(); | ||
941 | |||
942 | return ret; | ||
943 | } | ||
944 | |||
945 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | ||
946 | .notifier_call = sh_dmae_nmi_handler, | ||
947 | |||
948 | /* Run before NMI debug handler and KGDB */ | ||
949 | .priority = 1, | ||
950 | }; | ||
951 | |||
879 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | 952 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
880 | int irq, unsigned long flags) | 953 | int irq, unsigned long flags) |
881 | { | 954 | { |
@@ -967,6 +1040,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
967 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | 1040 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
968 | unsigned long irqflags = IRQF_DISABLED, | 1041 | unsigned long irqflags = IRQF_DISABLED, |
969 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; | 1042 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; |
1043 | unsigned long flags; | ||
970 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; | 1044 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; |
971 | int err, i, irq_cnt = 0, irqres = 0; | 1045 | int err, i, irq_cnt = 0, irqres = 0; |
972 | struct sh_dmae_device *shdev; | 1046 | struct sh_dmae_device *shdev; |
@@ -1032,6 +1106,10 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1032 | pm_runtime_enable(&pdev->dev); | 1106 | pm_runtime_enable(&pdev->dev); |
1033 | pm_runtime_get_sync(&pdev->dev); | 1107 | pm_runtime_get_sync(&pdev->dev); |
1034 | 1108 | ||
1109 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
1110 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | ||
1111 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
1112 | |||
1035 | /* reset dma controller */ | 1113 | /* reset dma controller */ |
1036 | err = sh_dmae_rst(shdev); | 1114 | err = sh_dmae_rst(shdev); |
1037 | if (err) | 1115 | if (err) |
@@ -1135,6 +1213,10 @@ eirqres: | |||
1135 | eirq_err: | 1213 | eirq_err: |
1136 | #endif | 1214 | #endif |
1137 | rst_err: | 1215 | rst_err: |
1216 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
1217 | list_del_rcu(&shdev->node); | ||
1218 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
1219 | |||
1138 | pm_runtime_put(&pdev->dev); | 1220 | pm_runtime_put(&pdev->dev); |
1139 | if (dmars) | 1221 | if (dmars) |
1140 | iounmap(shdev->dmars); | 1222 | iounmap(shdev->dmars); |
@@ -1155,6 +1237,7 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
1155 | { | 1237 | { |
1156 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1238 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1157 | struct resource *res; | 1239 | struct resource *res; |
1240 | unsigned long flags; | ||
1158 | int errirq = platform_get_irq(pdev, 0); | 1241 | int errirq = platform_get_irq(pdev, 0); |
1159 | 1242 | ||
1160 | dma_async_device_unregister(&shdev->common); | 1243 | dma_async_device_unregister(&shdev->common); |
@@ -1162,6 +1245,10 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
1162 | if (errirq > 0) | 1245 | if (errirq > 0) |
1163 | free_irq(errirq, shdev); | 1246 | free_irq(errirq, shdev); |
1164 | 1247 | ||
1248 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
1249 | list_del_rcu(&shdev->node); | ||
1250 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
1251 | |||
1165 | /* channel data remove */ | 1252 | /* channel data remove */ |
1166 | sh_dmae_chan_remove(shdev); | 1253 | sh_dmae_chan_remove(shdev); |
1167 | 1254 | ||
@@ -1200,6 +1287,11 @@ static struct platform_driver sh_dmae_driver = { | |||
1200 | 1287 | ||
1201 | static int __init sh_dmae_init(void) | 1288 | static int __init sh_dmae_init(void) |
1202 | { | 1289 | { |
1290 | /* Wire up NMI handling */ | ||
1291 | int err = register_die_notifier(&sh_dmae_nmi_notifier); | ||
1292 | if (err) | ||
1293 | return err; | ||
1294 | |||
1203 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | 1295 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); |
1204 | } | 1296 | } |
1205 | module_init(sh_dmae_init); | 1297 | module_init(sh_dmae_init); |
@@ -1207,9 +1299,12 @@ module_init(sh_dmae_init); | |||
1207 | static void __exit sh_dmae_exit(void) | 1299 | static void __exit sh_dmae_exit(void) |
1208 | { | 1300 | { |
1209 | platform_driver_unregister(&sh_dmae_driver); | 1301 | platform_driver_unregister(&sh_dmae_driver); |
1302 | |||
1303 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
1210 | } | 1304 | } |
1211 | module_exit(sh_dmae_exit); | 1305 | module_exit(sh_dmae_exit); |
1212 | 1306 | ||
1213 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | 1307 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); |
1214 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | 1308 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); |
1215 | MODULE_LICENSE("GPL"); | 1309 | MODULE_LICENSE("GPL"); |
1310 | MODULE_ALIAS("platform:sh-dma-engine"); | ||
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 4021275a0a4..52e4fb17380 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -43,6 +43,7 @@ struct sh_dmae_device { | |||
43 | struct dma_device common; | 43 | struct dma_device common; |
44 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; | 44 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; |
45 | struct sh_dmae_pdata *pdata; | 45 | struct sh_dmae_pdata *pdata; |
46 | struct list_head node; | ||
46 | u32 __iomem *chan_reg; | 47 | u32 __iomem *chan_reg; |
47 | u16 __iomem *dmars; | 48 | u16 __iomem *dmars; |
48 | }; | 49 | }; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index fab68a55320..af955de035f 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) ST-Ericsson SA 2007-2010 | 2 | * Copyright (C) Ericsson AB 2007-2008 |
3 | * Copyright (C) ST-Ericsson SA 2008-2010 | ||
3 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson | 4 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson | 5 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
5 | * License terms: GNU General Public License (GPL) version 2 | 6 | * License terms: GNU General Public License (GPL) version 2 |
@@ -67,6 +68,7 @@ enum d40_command { | |||
67 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | 68 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
68 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | 69 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if |
69 | * pre_alloc_lli is used. | 70 | * pre_alloc_lli is used. |
71 | * @dma_addr: DMA address, if mapped | ||
70 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. | 72 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
71 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | 73 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, |
72 | * one buffer to one buffer. | 74 | * one buffer to one buffer. |
@@ -74,6 +76,7 @@ enum d40_command { | |||
74 | struct d40_lli_pool { | 76 | struct d40_lli_pool { |
75 | void *base; | 77 | void *base; |
76 | int size; | 78 | int size; |
79 | dma_addr_t dma_addr; | ||
77 | /* Space for dst and src, plus an extra for padding */ | 80 | /* Space for dst and src, plus an extra for padding */ |
78 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 81 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
79 | }; | 82 | }; |
@@ -93,7 +96,6 @@ struct d40_lli_pool { | |||
93 | * during a transfer. | 96 | * during a transfer. |
94 | * @node: List entry. | 97 | * @node: List entry. |
95 | * @is_in_client_list: true if the client owns this descriptor. | 98 | * @is_in_client_list: true if the client owns this descriptor. |
96 | * @is_hw_linked: true if this job will automatically be continued for | ||
97 | * the previous one. | 99 | * the previous one. |
98 | * | 100 | * |
99 | * This descriptor is used for both logical and physical transfers. | 101 | * This descriptor is used for both logical and physical transfers. |
@@ -113,7 +115,7 @@ struct d40_desc { | |||
113 | struct list_head node; | 115 | struct list_head node; |
114 | 116 | ||
115 | bool is_in_client_list; | 117 | bool is_in_client_list; |
116 | bool is_hw_linked; | 118 | bool cyclic; |
117 | }; | 119 | }; |
118 | 120 | ||
119 | /** | 121 | /** |
@@ -129,6 +131,7 @@ struct d40_desc { | |||
129 | */ | 131 | */ |
130 | struct d40_lcla_pool { | 132 | struct d40_lcla_pool { |
131 | void *base; | 133 | void *base; |
134 | dma_addr_t dma_addr; | ||
132 | void *base_unaligned; | 135 | void *base_unaligned; |
133 | int pages; | 136 | int pages; |
134 | spinlock_t lock; | 137 | spinlock_t lock; |
@@ -302,9 +305,37 @@ struct d40_reg_val { | |||
302 | unsigned int val; | 305 | unsigned int val; |
303 | }; | 306 | }; |
304 | 307 | ||
305 | static int d40_pool_lli_alloc(struct d40_desc *d40d, | 308 | static struct device *chan2dev(struct d40_chan *d40c) |
306 | int lli_len, bool is_log) | ||
307 | { | 309 | { |
310 | return &d40c->chan.dev->device; | ||
311 | } | ||
312 | |||
313 | static bool chan_is_physical(struct d40_chan *chan) | ||
314 | { | ||
315 | return chan->log_num == D40_PHY_CHAN; | ||
316 | } | ||
317 | |||
318 | static bool chan_is_logical(struct d40_chan *chan) | ||
319 | { | ||
320 | return !chan_is_physical(chan); | ||
321 | } | ||
322 | |||
323 | static void __iomem *chan_base(struct d40_chan *chan) | ||
324 | { | ||
325 | return chan->base->virtbase + D40_DREG_PCBASE + | ||
326 | chan->phy_chan->num * D40_DREG_PCDELTA; | ||
327 | } | ||
328 | |||
329 | #define d40_err(dev, format, arg...) \ | ||
330 | dev_err(dev, "[%s] " format, __func__, ## arg) | ||
331 | |||
332 | #define chan_err(d40c, format, arg...) \ | ||
333 | d40_err(chan2dev(d40c), format, ## arg) | ||
334 | |||
335 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, | ||
336 | int lli_len) | ||
337 | { | ||
338 | bool is_log = chan_is_logical(d40c); | ||
308 | u32 align; | 339 | u32 align; |
309 | void *base; | 340 | void *base; |
310 | 341 | ||
@@ -318,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, | |||
318 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | 349 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); |
319 | d40d->lli_pool.base = NULL; | 350 | d40d->lli_pool.base = NULL; |
320 | } else { | 351 | } else { |
321 | d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); | 352 | d40d->lli_pool.size = lli_len * 2 * align; |
322 | 353 | ||
323 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | 354 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); |
324 | d40d->lli_pool.base = base; | 355 | d40d->lli_pool.base = base; |
@@ -328,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, | |||
328 | } | 359 | } |
329 | 360 | ||
330 | if (is_log) { | 361 | if (is_log) { |
331 | d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, | 362 | d40d->lli_log.src = PTR_ALIGN(base, align); |
332 | align); | 363 | d40d->lli_log.dst = d40d->lli_log.src + lli_len; |
333 | d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, | 364 | |
334 | align); | 365 | d40d->lli_pool.dma_addr = 0; |
335 | } else { | 366 | } else { |
336 | d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, | 367 | d40d->lli_phy.src = PTR_ALIGN(base, align); |
337 | align); | 368 | d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; |
338 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, | 369 | |
339 | align); | 370 | d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, |
371 | d40d->lli_phy.src, | ||
372 | d40d->lli_pool.size, | ||
373 | DMA_TO_DEVICE); | ||
374 | |||
375 | if (dma_mapping_error(d40c->base->dev, | ||
376 | d40d->lli_pool.dma_addr)) { | ||
377 | kfree(d40d->lli_pool.base); | ||
378 | d40d->lli_pool.base = NULL; | ||
379 | d40d->lli_pool.dma_addr = 0; | ||
380 | return -ENOMEM; | ||
381 | } | ||
340 | } | 382 | } |
341 | 383 | ||
342 | return 0; | 384 | return 0; |
343 | } | 385 | } |
344 | 386 | ||
345 | static void d40_pool_lli_free(struct d40_desc *d40d) | 387 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) |
346 | { | 388 | { |
389 | if (d40d->lli_pool.dma_addr) | ||
390 | dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, | ||
391 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
392 | |||
347 | kfree(d40d->lli_pool.base); | 393 | kfree(d40d->lli_pool.base); |
348 | d40d->lli_pool.base = NULL; | 394 | d40d->lli_pool.base = NULL; |
349 | d40d->lli_pool.size = 0; | 395 | d40d->lli_pool.size = 0; |
@@ -390,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c, | |||
390 | int i; | 436 | int i; |
391 | int ret = -EINVAL; | 437 | int ret = -EINVAL; |
392 | 438 | ||
393 | if (d40c->log_num == D40_PHY_CHAN) | 439 | if (chan_is_physical(d40c)) |
394 | return 0; | 440 | return 0; |
395 | 441 | ||
396 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 442 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
@@ -429,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
429 | 475 | ||
430 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 476 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
431 | if (async_tx_test_ack(&d->txd)) { | 477 | if (async_tx_test_ack(&d->txd)) { |
432 | d40_pool_lli_free(d); | 478 | d40_pool_lli_free(d40c, d); |
433 | d40_desc_remove(d); | 479 | d40_desc_remove(d); |
434 | desc = d; | 480 | desc = d; |
435 | memset(desc, 0, sizeof(*desc)); | 481 | memset(desc, 0, sizeof(*desc)); |
@@ -449,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
449 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 495 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
450 | { | 496 | { |
451 | 497 | ||
498 | d40_pool_lli_free(d40c, d40d); | ||
452 | d40_lcla_free_all(d40c, d40d); | 499 | d40_lcla_free_all(d40c, d40d); |
453 | kmem_cache_free(d40c->base->desc_slab, d40d); | 500 | kmem_cache_free(d40c->base->desc_slab, d40d); |
454 | } | 501 | } |
@@ -458,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | |||
458 | list_add_tail(&desc->node, &d40c->active); | 505 | list_add_tail(&desc->node, &d40c->active); |
459 | } | 506 | } |
460 | 507 | ||
461 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 508 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) |
462 | { | 509 | { |
463 | int curr_lcla = -EINVAL, next_lcla; | 510 | struct d40_phy_lli *lli_dst = desc->lli_phy.dst; |
511 | struct d40_phy_lli *lli_src = desc->lli_phy.src; | ||
512 | void __iomem *base = chan_base(chan); | ||
513 | |||
514 | writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); | ||
515 | writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); | ||
516 | writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); | ||
517 | writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); | ||
518 | |||
519 | writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); | ||
520 | writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); | ||
521 | writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); | ||
522 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | ||
523 | } | ||
464 | 524 | ||
465 | if (d40c->log_num == D40_PHY_CHAN) { | 525 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
466 | d40_phy_lli_write(d40c->base->virtbase, | 526 | { |
467 | d40c->phy_chan->num, | 527 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
468 | d40d->lli_phy.dst, | 528 | struct d40_log_lli_bidir *lli = &desc->lli_log; |
469 | d40d->lli_phy.src); | 529 | int lli_current = desc->lli_current; |
470 | d40d->lli_current = d40d->lli_len; | 530 | int lli_len = desc->lli_len; |
471 | } else { | 531 | bool cyclic = desc->cyclic; |
532 | int curr_lcla = -EINVAL; | ||
533 | int first_lcla = 0; | ||
534 | bool linkback; | ||
472 | 535 | ||
473 | if ((d40d->lli_len - d40d->lli_current) > 1) | 536 | /* |
474 | curr_lcla = d40_lcla_alloc_one(d40c, d40d); | 537 | * We may have partially running cyclic transfers, in case we did't get |
538 | * enough LCLA entries. | ||
539 | */ | ||
540 | linkback = cyclic && lli_current == 0; | ||
475 | 541 | ||
476 | d40_log_lli_lcpa_write(d40c->lcpa, | 542 | /* |
477 | &d40d->lli_log.dst[d40d->lli_current], | 543 | * For linkback, we need one LCLA even with only one link, because we |
478 | &d40d->lli_log.src[d40d->lli_current], | 544 | * can't link back to the one in LCPA space |
479 | curr_lcla); | 545 | */ |
546 | if (linkback || (lli_len - lli_current > 1)) { | ||
547 | curr_lcla = d40_lcla_alloc_one(chan, desc); | ||
548 | first_lcla = curr_lcla; | ||
549 | } | ||
480 | 550 | ||
481 | d40d->lli_current++; | 551 | /* |
482 | for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { | 552 | * For linkback, we normally load the LCPA in the loop since we need to |
483 | struct d40_log_lli *lcla; | 553 | * link it to the second LCLA and not the first. However, if we |
554 | * couldn't even get a first LCLA, then we have to run in LCPA and | ||
555 | * reload manually. | ||
556 | */ | ||
557 | if (!linkback || curr_lcla == -EINVAL) { | ||
558 | unsigned int flags = 0; | ||
484 | 559 | ||
485 | if (d40d->lli_current + 1 < d40d->lli_len) | 560 | if (curr_lcla == -EINVAL) |
486 | next_lcla = d40_lcla_alloc_one(d40c, d40d); | 561 | flags |= LLI_TERM_INT; |
487 | else | ||
488 | next_lcla = -EINVAL; | ||
489 | 562 | ||
490 | lcla = d40c->base->lcla_pool.base + | 563 | d40_log_lli_lcpa_write(chan->lcpa, |
491 | d40c->phy_chan->num * 1024 + | 564 | &lli->dst[lli_current], |
492 | 8 * curr_lcla * 2; | 565 | &lli->src[lli_current], |
566 | curr_lcla, | ||
567 | flags); | ||
568 | lli_current++; | ||
569 | } | ||
493 | 570 | ||
494 | d40_log_lli_lcla_write(lcla, | 571 | if (curr_lcla < 0) |
495 | &d40d->lli_log.dst[d40d->lli_current], | 572 | goto out; |
496 | &d40d->lli_log.src[d40d->lli_current], | ||
497 | next_lcla); | ||
498 | 573 | ||
499 | (void) dma_map_single(d40c->base->dev, lcla, | 574 | for (; lli_current < lli_len; lli_current++) { |
500 | 2 * sizeof(struct d40_log_lli), | 575 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + |
501 | DMA_TO_DEVICE); | 576 | 8 * curr_lcla * 2; |
577 | struct d40_log_lli *lcla = pool->base + lcla_offset; | ||
578 | unsigned int flags = 0; | ||
579 | int next_lcla; | ||
502 | 580 | ||
503 | curr_lcla = next_lcla; | 581 | if (lli_current + 1 < lli_len) |
582 | next_lcla = d40_lcla_alloc_one(chan, desc); | ||
583 | else | ||
584 | next_lcla = linkback ? first_lcla : -EINVAL; | ||
504 | 585 | ||
505 | if (curr_lcla == -EINVAL) { | 586 | if (cyclic || next_lcla == -EINVAL) |
506 | d40d->lli_current++; | 587 | flags |= LLI_TERM_INT; |
507 | break; | ||
508 | } | ||
509 | 588 | ||
589 | if (linkback && curr_lcla == first_lcla) { | ||
590 | /* First link goes in both LCPA and LCLA */ | ||
591 | d40_log_lli_lcpa_write(chan->lcpa, | ||
592 | &lli->dst[lli_current], | ||
593 | &lli->src[lli_current], | ||
594 | next_lcla, flags); | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * One unused LCLA in the cyclic case if the very first | ||
599 | * next_lcla fails... | ||
600 | */ | ||
601 | d40_log_lli_lcla_write(lcla, | ||
602 | &lli->dst[lli_current], | ||
603 | &lli->src[lli_current], | ||
604 | next_lcla, flags); | ||
605 | |||
606 | dma_sync_single_range_for_device(chan->base->dev, | ||
607 | pool->dma_addr, lcla_offset, | ||
608 | 2 * sizeof(struct d40_log_lli), | ||
609 | DMA_TO_DEVICE); | ||
610 | |||
611 | curr_lcla = next_lcla; | ||
612 | |||
613 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | ||
614 | lli_current++; | ||
615 | break; | ||
510 | } | 616 | } |
511 | } | 617 | } |
618 | |||
619 | out: | ||
620 | desc->lli_current = lli_current; | ||
621 | } | ||
622 | |||
623 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | ||
624 | { | ||
625 | if (chan_is_physical(d40c)) { | ||
626 | d40_phy_lli_load(d40c, d40d); | ||
627 | d40d->lli_current = d40d->lli_len; | ||
628 | } else | ||
629 | d40_log_lli_to_lcxa(d40c, d40d); | ||
512 | } | 630 | } |
513 | 631 | ||
514 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | 632 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
@@ -542,20 +660,66 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
542 | return d; | 660 | return d; |
543 | } | 661 | } |
544 | 662 | ||
545 | static struct d40_desc *d40_last_queued(struct d40_chan *d40c) | 663 | static int d40_psize_2_burst_size(bool is_log, int psize) |
546 | { | 664 | { |
547 | struct d40_desc *d; | 665 | if (is_log) { |
666 | if (psize == STEDMA40_PSIZE_LOG_1) | ||
667 | return 1; | ||
668 | } else { | ||
669 | if (psize == STEDMA40_PSIZE_PHY_1) | ||
670 | return 1; | ||
671 | } | ||
548 | 672 | ||
549 | if (list_empty(&d40c->queue)) | 673 | return 2 << psize; |
550 | return NULL; | ||
551 | list_for_each_entry(d, &d40c->queue, node) | ||
552 | if (list_is_last(&d->node, &d40c->queue)) | ||
553 | break; | ||
554 | return d; | ||
555 | } | 674 | } |
556 | 675 | ||
557 | /* Support functions for logical channels */ | 676 | /* |
677 | * The dma only supports transmitting packages up to | ||
678 | * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of | ||
679 | * dma elements required to send the entire sg list | ||
680 | */ | ||
681 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) | ||
682 | { | ||
683 | int dmalen; | ||
684 | u32 max_w = max(data_width1, data_width2); | ||
685 | u32 min_w = min(data_width1, data_width2); | ||
686 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | ||
558 | 687 | ||
688 | if (seg_max > STEDMA40_MAX_SEG_SIZE) | ||
689 | seg_max -= (1 << max_w); | ||
690 | |||
691 | if (!IS_ALIGNED(size, 1 << max_w)) | ||
692 | return -EINVAL; | ||
693 | |||
694 | if (size <= seg_max) | ||
695 | dmalen = 1; | ||
696 | else { | ||
697 | dmalen = size / seg_max; | ||
698 | if (dmalen * seg_max < size) | ||
699 | dmalen++; | ||
700 | } | ||
701 | return dmalen; | ||
702 | } | ||
703 | |||
704 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | ||
705 | u32 data_width1, u32 data_width2) | ||
706 | { | ||
707 | struct scatterlist *sg; | ||
708 | int i; | ||
709 | int len = 0; | ||
710 | int ret; | ||
711 | |||
712 | for_each_sg(sgl, sg, sg_len, i) { | ||
713 | ret = d40_size_2_dmalen(sg_dma_len(sg), | ||
714 | data_width1, data_width2); | ||
715 | if (ret < 0) | ||
716 | return ret; | ||
717 | len += ret; | ||
718 | } | ||
719 | return len; | ||
720 | } | ||
721 | |||
722 | /* Support functions for logical channels */ | ||
559 | 723 | ||
560 | static int d40_channel_execute_command(struct d40_chan *d40c, | 724 | static int d40_channel_execute_command(struct d40_chan *d40c, |
561 | enum d40_command command) | 725 | enum d40_command command) |
@@ -607,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, | |||
607 | } | 771 | } |
608 | 772 | ||
609 | if (i == D40_SUSPEND_MAX_IT) { | 773 | if (i == D40_SUSPEND_MAX_IT) { |
610 | dev_err(&d40c->chan.dev->device, | 774 | chan_err(d40c, |
611 | "[%s]: unable to suspend the chl %d (log: %d) status %x\n", | 775 | "unable to suspend the chl %d (log: %d) status %x\n", |
612 | __func__, d40c->phy_chan->num, d40c->log_num, | 776 | d40c->phy_chan->num, d40c->log_num, |
613 | status); | 777 | status); |
614 | dump_stack(); | 778 | dump_stack(); |
615 | ret = -EBUSY; | 779 | ret = -EBUSY; |
@@ -642,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c) | |||
642 | d40c->busy = false; | 806 | d40c->busy = false; |
643 | } | 807 | } |
644 | 808 | ||
809 | static void __d40_config_set_event(struct d40_chan *d40c, bool enable, | ||
810 | u32 event, int reg) | ||
811 | { | ||
812 | void __iomem *addr = chan_base(d40c) + reg; | ||
813 | int tries; | ||
814 | |||
815 | if (!enable) { | ||
816 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
817 | | ~D40_EVENTLINE_MASK(event), addr); | ||
818 | return; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * The hardware sometimes doesn't register the enable when src and dst | ||
823 | * event lines are active on the same logical channel. Retry to ensure | ||
824 | * it does. Usually only one retry is sufficient. | ||
825 | */ | ||
826 | tries = 100; | ||
827 | while (--tries) { | ||
828 | writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
829 | | ~D40_EVENTLINE_MASK(event), addr); | ||
830 | |||
831 | if (readl(addr) & D40_EVENTLINE_MASK(event)) | ||
832 | break; | ||
833 | } | ||
834 | |||
835 | if (tries != 99) | ||
836 | dev_dbg(chan2dev(d40c), | ||
837 | "[%s] workaround enable S%cLNK (%d tries)\n", | ||
838 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | ||
839 | 100 - tries); | ||
840 | |||
841 | WARN_ON(!tries); | ||
842 | } | ||
843 | |||
645 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | 844 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) |
646 | { | 845 | { |
647 | u32 val; | ||
648 | unsigned long flags; | 846 | unsigned long flags; |
649 | 847 | ||
650 | /* Notice, that disable requires the physical channel to be stopped */ | ||
651 | if (do_enable) | ||
652 | val = D40_ACTIVATE_EVENTLINE; | ||
653 | else | ||
654 | val = D40_DEACTIVATE_EVENTLINE; | ||
655 | |||
656 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | 848 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); |
657 | 849 | ||
658 | /* Enable event line connected to device (or memcpy) */ | 850 | /* Enable event line connected to device (or memcpy) */ |
@@ -660,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
660 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 852 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
661 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 853 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
662 | 854 | ||
663 | writel((val << D40_EVENTLINE_POS(event)) | | 855 | __d40_config_set_event(d40c, do_enable, event, |
664 | ~D40_EVENTLINE_MASK(event), | 856 | D40_CHAN_REG_SSLNK); |
665 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
666 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
667 | D40_CHAN_REG_SSLNK); | ||
668 | } | 857 | } |
858 | |||
669 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | 859 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
670 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 860 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
671 | 861 | ||
672 | writel((val << D40_EVENTLINE_POS(event)) | | 862 | __d40_config_set_event(d40c, do_enable, event, |
673 | ~D40_EVENTLINE_MASK(event), | 863 | D40_CHAN_REG_SDLNK); |
674 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
675 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
676 | D40_CHAN_REG_SDLNK); | ||
677 | } | 864 | } |
678 | 865 | ||
679 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | 866 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); |
@@ -681,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
681 | 868 | ||
682 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 869 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
683 | { | 870 | { |
871 | void __iomem *chanbase = chan_base(d40c); | ||
684 | u32 val; | 872 | u32 val; |
685 | 873 | ||
686 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 874 | val = readl(chanbase + D40_CHAN_REG_SSLNK); |
687 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 875 | val |= readl(chanbase + D40_CHAN_REG_SDLNK); |
688 | D40_CHAN_REG_SSLNK); | ||
689 | 876 | ||
690 | val |= readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
691 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
692 | D40_CHAN_REG_SDLNK); | ||
693 | return val; | 877 | return val; |
694 | } | 878 | } |
695 | 879 | ||
@@ -712,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c) | |||
712 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, | 896 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, |
713 | }; | 897 | }; |
714 | 898 | ||
715 | if (d40c->log_num == D40_PHY_CHAN) | 899 | if (chan_is_physical(d40c)) |
716 | return phy_map[d40c->dma_cfg.mode_opt]; | 900 | return phy_map[d40c->dma_cfg.mode_opt]; |
717 | else | 901 | else |
718 | return log_map[d40c->dma_cfg.mode_opt]; | 902 | return log_map[d40c->dma_cfg.mode_opt]; |
@@ -726,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c) | |||
726 | /* Odd addresses are even addresses + 4 */ | 910 | /* Odd addresses are even addresses + 4 */ |
727 | addr_base = (d40c->phy_chan->num % 2) * 4; | 911 | addr_base = (d40c->phy_chan->num % 2) * 4; |
728 | /* Setup channel mode to logical or physical */ | 912 | /* Setup channel mode to logical or physical */ |
729 | var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << | 913 | var = ((u32)(chan_is_logical(d40c)) + 1) << |
730 | D40_CHAN_POS(d40c->phy_chan->num); | 914 | D40_CHAN_POS(d40c->phy_chan->num); |
731 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | 915 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); |
732 | 916 | ||
@@ -735,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c) | |||
735 | 919 | ||
736 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | 920 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); |
737 | 921 | ||
738 | if (d40c->log_num != D40_PHY_CHAN) { | 922 | if (chan_is_logical(d40c)) { |
923 | int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) | ||
924 | & D40_SREG_ELEM_LOG_LIDX_MASK; | ||
925 | void __iomem *chanbase = chan_base(d40c); | ||
926 | |||
739 | /* Set default config for CFG reg */ | 927 | /* Set default config for CFG reg */ |
740 | writel(d40c->src_def_cfg, | 928 | writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); |
741 | d40c->base->virtbase + D40_DREG_PCBASE + | 929 | writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); |
742 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
743 | D40_CHAN_REG_SSCFG); | ||
744 | writel(d40c->dst_def_cfg, | ||
745 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
746 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
747 | D40_CHAN_REG_SDCFG); | ||
748 | 930 | ||
749 | /* Set LIDX for lcla */ | 931 | /* Set LIDX for lcla */ |
750 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 932 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
751 | D40_SREG_ELEM_LOG_LIDX_MASK, | 933 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
752 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
753 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
754 | D40_CHAN_REG_SDELT); | ||
755 | |||
756 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | ||
757 | D40_SREG_ELEM_LOG_LIDX_MASK, | ||
758 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
759 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
760 | D40_CHAN_REG_SSELT); | ||
761 | |||
762 | } | 934 | } |
763 | } | 935 | } |
764 | 936 | ||
@@ -766,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c) | |||
766 | { | 938 | { |
767 | u32 num_elt; | 939 | u32 num_elt; |
768 | 940 | ||
769 | if (d40c->log_num != D40_PHY_CHAN) | 941 | if (chan_is_logical(d40c)) |
770 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | 942 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
771 | >> D40_MEM_LCSP2_ECNT_POS; | 943 | >> D40_MEM_LCSP2_ECNT_POS; |
772 | else | 944 | else { |
773 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | 945 | u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); |
774 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 946 | num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) |
775 | D40_CHAN_REG_SDELT) & | 947 | >> D40_SREG_ELEM_PHY_ECNT_POS; |
776 | D40_SREG_ELEM_PHY_ECNT_MASK) >> | 948 | } |
777 | D40_SREG_ELEM_PHY_ECNT_POS; | 949 | |
778 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | 950 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
779 | } | 951 | } |
780 | 952 | ||
@@ -782,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) | |||
782 | { | 954 | { |
783 | bool is_link; | 955 | bool is_link; |
784 | 956 | ||
785 | if (d40c->log_num != D40_PHY_CHAN) | 957 | if (chan_is_logical(d40c)) |
786 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | 958 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; |
787 | else | 959 | else |
788 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 960 | is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) |
789 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 961 | & D40_SREG_LNK_PHYS_LNK_MASK; |
790 | D40_CHAN_REG_SDLNK) & | 962 | |
791 | D40_SREG_LNK_PHYS_LNK_MASK; | ||
792 | return is_link; | 963 | return is_link; |
793 | } | 964 | } |
794 | 965 | ||
795 | static int d40_pause(struct dma_chan *chan) | 966 | static int d40_pause(struct d40_chan *d40c) |
796 | { | 967 | { |
797 | struct d40_chan *d40c = | ||
798 | container_of(chan, struct d40_chan, chan); | ||
799 | int res = 0; | 968 | int res = 0; |
800 | unsigned long flags; | 969 | unsigned long flags; |
801 | 970 | ||
@@ -806,7 +975,7 @@ static int d40_pause(struct dma_chan *chan) | |||
806 | 975 | ||
807 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 976 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
808 | if (res == 0) { | 977 | if (res == 0) { |
809 | if (d40c->log_num != D40_PHY_CHAN) { | 978 | if (chan_is_logical(d40c)) { |
810 | d40_config_set_event(d40c, false); | 979 | d40_config_set_event(d40c, false); |
811 | /* Resume the other logical channels if any */ | 980 | /* Resume the other logical channels if any */ |
812 | if (d40_chan_has_events(d40c)) | 981 | if (d40_chan_has_events(d40c)) |
@@ -819,10 +988,8 @@ static int d40_pause(struct dma_chan *chan) | |||
819 | return res; | 988 | return res; |
820 | } | 989 | } |
821 | 990 | ||
822 | static int d40_resume(struct dma_chan *chan) | 991 | static int d40_resume(struct d40_chan *d40c) |
823 | { | 992 | { |
824 | struct d40_chan *d40c = | ||
825 | container_of(chan, struct d40_chan, chan); | ||
826 | int res = 0; | 993 | int res = 0; |
827 | unsigned long flags; | 994 | unsigned long flags; |
828 | 995 | ||
@@ -832,7 +999,7 @@ static int d40_resume(struct dma_chan *chan) | |||
832 | spin_lock_irqsave(&d40c->lock, flags); | 999 | spin_lock_irqsave(&d40c->lock, flags); |
833 | 1000 | ||
834 | if (d40c->base->rev == 0) | 1001 | if (d40c->base->rev == 0) |
835 | if (d40c->log_num != D40_PHY_CHAN) { | 1002 | if (chan_is_logical(d40c)) { |
836 | res = d40_channel_execute_command(d40c, | 1003 | res = d40_channel_execute_command(d40c, |
837 | D40_DMA_SUSPEND_REQ); | 1004 | D40_DMA_SUSPEND_REQ); |
838 | goto no_suspend; | 1005 | goto no_suspend; |
@@ -841,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan) | |||
841 | /* If bytes left to transfer or linked tx resume job */ | 1008 | /* If bytes left to transfer or linked tx resume job */ |
842 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | 1009 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { |
843 | 1010 | ||
844 | if (d40c->log_num != D40_PHY_CHAN) | 1011 | if (chan_is_logical(d40c)) |
845 | d40_config_set_event(d40c, true); | 1012 | d40_config_set_event(d40c, true); |
846 | 1013 | ||
847 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1014 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
@@ -852,75 +1019,20 @@ no_suspend: | |||
852 | return res; | 1019 | return res; |
853 | } | 1020 | } |
854 | 1021 | ||
855 | static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) | 1022 | static int d40_terminate_all(struct d40_chan *chan) |
856 | { | ||
857 | /* TODO: Write */ | ||
858 | } | ||
859 | |||
860 | static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) | ||
861 | { | 1023 | { |
862 | struct d40_desc *d40d_prev = NULL; | 1024 | unsigned long flags; |
863 | int i; | 1025 | int ret = 0; |
864 | u32 val; | ||
865 | |||
866 | if (!list_empty(&d40c->queue)) | ||
867 | d40d_prev = d40_last_queued(d40c); | ||
868 | else if (!list_empty(&d40c->active)) | ||
869 | d40d_prev = d40_first_active_get(d40c); | ||
870 | |||
871 | if (!d40d_prev) | ||
872 | return; | ||
873 | |||
874 | /* Here we try to join this job with previous jobs */ | ||
875 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
876 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
877 | D40_CHAN_REG_SSLNK); | ||
878 | |||
879 | /* Figure out which link we're currently transmitting */ | ||
880 | for (i = 0; i < d40d_prev->lli_len; i++) | ||
881 | if (val == d40d_prev->lli_phy.src[i].reg_lnk) | ||
882 | break; | ||
883 | |||
884 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
885 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
886 | D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; | ||
887 | |||
888 | if (i == (d40d_prev->lli_len - 1) && val > 0) { | ||
889 | /* Change the current one */ | ||
890 | writel(virt_to_phys(d40d->lli_phy.src), | ||
891 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
892 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
893 | D40_CHAN_REG_SSLNK); | ||
894 | writel(virt_to_phys(d40d->lli_phy.dst), | ||
895 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
896 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
897 | D40_CHAN_REG_SDLNK); | ||
898 | |||
899 | d40d->is_hw_linked = true; | ||
900 | |||
901 | } else if (i < d40d_prev->lli_len) { | ||
902 | (void) dma_unmap_single(d40c->base->dev, | ||
903 | virt_to_phys(d40d_prev->lli_phy.src), | ||
904 | d40d_prev->lli_pool.size, | ||
905 | DMA_TO_DEVICE); | ||
906 | 1026 | ||
907 | /* Keep the settings */ | 1027 | ret = d40_pause(chan); |
908 | val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & | 1028 | if (!ret && chan_is_physical(chan)) |
909 | ~D40_SREG_LNK_PHYS_LNK_MASK; | 1029 | ret = d40_channel_execute_command(chan, D40_DMA_STOP); |
910 | d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = | ||
911 | val | virt_to_phys(d40d->lli_phy.src); | ||
912 | 1030 | ||
913 | val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & | 1031 | spin_lock_irqsave(&chan->lock, flags); |
914 | ~D40_SREG_LNK_PHYS_LNK_MASK; | 1032 | d40_term_all(chan); |
915 | d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = | 1033 | spin_unlock_irqrestore(&chan->lock, flags); |
916 | val | virt_to_phys(d40d->lli_phy.dst); | ||
917 | 1034 | ||
918 | (void) dma_map_single(d40c->base->dev, | 1035 | return ret; |
919 | d40d_prev->lli_phy.src, | ||
920 | d40d_prev->lli_pool.size, | ||
921 | DMA_TO_DEVICE); | ||
922 | d40d->is_hw_linked = true; | ||
923 | } | ||
924 | } | 1036 | } |
925 | 1037 | ||
926 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 1038 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -931,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
931 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 1043 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
932 | unsigned long flags; | 1044 | unsigned long flags; |
933 | 1045 | ||
934 | (void) d40_pause(&d40c->chan); | ||
935 | |||
936 | spin_lock_irqsave(&d40c->lock, flags); | 1046 | spin_lock_irqsave(&d40c->lock, flags); |
937 | 1047 | ||
938 | d40c->chan.cookie++; | 1048 | d40c->chan.cookie++; |
@@ -942,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
942 | 1052 | ||
943 | d40d->txd.cookie = d40c->chan.cookie; | 1053 | d40d->txd.cookie = d40c->chan.cookie; |
944 | 1054 | ||
945 | if (d40c->log_num == D40_PHY_CHAN) | ||
946 | d40_tx_submit_phy(d40c, d40d); | ||
947 | else | ||
948 | d40_tx_submit_log(d40c, d40d); | ||
949 | |||
950 | d40_desc_queue(d40c, d40d); | 1055 | d40_desc_queue(d40c, d40d); |
951 | 1056 | ||
952 | spin_unlock_irqrestore(&d40c->lock, flags); | 1057 | spin_unlock_irqrestore(&d40c->lock, flags); |
953 | 1058 | ||
954 | (void) d40_resume(&d40c->chan); | ||
955 | |||
956 | return tx->cookie; | 1059 | return tx->cookie; |
957 | } | 1060 | } |
958 | 1061 | ||
@@ -961,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c) | |||
961 | if (d40c->base->rev == 0) { | 1064 | if (d40c->base->rev == 0) { |
962 | int err; | 1065 | int err; |
963 | 1066 | ||
964 | if (d40c->log_num != D40_PHY_CHAN) { | 1067 | if (chan_is_logical(d40c)) { |
965 | err = d40_channel_execute_command(d40c, | 1068 | err = d40_channel_execute_command(d40c, |
966 | D40_DMA_SUSPEND_REQ); | 1069 | D40_DMA_SUSPEND_REQ); |
967 | if (err) | 1070 | if (err) |
@@ -969,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c) | |||
969 | } | 1072 | } |
970 | } | 1073 | } |
971 | 1074 | ||
972 | if (d40c->log_num != D40_PHY_CHAN) | 1075 | if (chan_is_logical(d40c)) |
973 | d40_config_set_event(d40c, true); | 1076 | d40_config_set_event(d40c, true); |
974 | 1077 | ||
975 | return d40_channel_execute_command(d40c, D40_DMA_RUN); | 1078 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
@@ -992,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
992 | /* Add to active queue */ | 1095 | /* Add to active queue */ |
993 | d40_desc_submit(d40c, d40d); | 1096 | d40_desc_submit(d40c, d40d); |
994 | 1097 | ||
995 | /* | 1098 | /* Initiate DMA job */ |
996 | * If this job is already linked in hw, | 1099 | d40_desc_load(d40c, d40d); |
997 | * do not submit it. | ||
998 | */ | ||
999 | |||
1000 | if (!d40d->is_hw_linked) { | ||
1001 | /* Initiate DMA job */ | ||
1002 | d40_desc_load(d40c, d40d); | ||
1003 | 1100 | ||
1004 | /* Start dma job */ | 1101 | /* Start dma job */ |
1005 | err = d40_start(d40c); | 1102 | err = d40_start(d40c); |
1006 | 1103 | ||
1007 | if (err) | 1104 | if (err) |
1008 | return NULL; | 1105 | return NULL; |
1009 | } | ||
1010 | } | 1106 | } |
1011 | 1107 | ||
1012 | return d40d; | 1108 | return d40d; |
@@ -1023,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1023 | if (d40d == NULL) | 1119 | if (d40d == NULL) |
1024 | return; | 1120 | return; |
1025 | 1121 | ||
1026 | d40_lcla_free_all(d40c, d40d); | 1122 | if (d40d->cyclic) { |
1123 | /* | ||
1124 | * If this was a paritially loaded list, we need to reloaded | ||
1125 | * it, and only when the list is completed. We need to check | ||
1126 | * for done because the interrupt will hit for every link, and | ||
1127 | * not just the last one. | ||
1128 | */ | ||
1129 | if (d40d->lli_current < d40d->lli_len | ||
1130 | && !d40_tx_is_linked(d40c) | ||
1131 | && !d40_residue(d40c)) { | ||
1132 | d40_lcla_free_all(d40c, d40d); | ||
1133 | d40_desc_load(d40c, d40d); | ||
1134 | (void) d40_start(d40c); | ||
1027 | 1135 | ||
1028 | if (d40d->lli_current < d40d->lli_len) { | 1136 | if (d40d->lli_current == d40d->lli_len) |
1029 | d40_desc_load(d40c, d40d); | 1137 | d40d->lli_current = 0; |
1030 | /* Start dma job */ | 1138 | } |
1031 | (void) d40_start(d40c); | 1139 | } else { |
1032 | return; | 1140 | d40_lcla_free_all(d40c, d40d); |
1033 | } | ||
1034 | 1141 | ||
1035 | if (d40_queue_start(d40c) == NULL) | 1142 | if (d40d->lli_current < d40d->lli_len) { |
1036 | d40c->busy = false; | 1143 | d40_desc_load(d40c, d40d); |
1144 | /* Start dma job */ | ||
1145 | (void) d40_start(d40c); | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | if (d40_queue_start(d40c) == NULL) | ||
1150 | d40c->busy = false; | ||
1151 | } | ||
1037 | 1152 | ||
1038 | d40c->pending_tx++; | 1153 | d40c->pending_tx++; |
1039 | tasklet_schedule(&d40c->tasklet); | 1154 | tasklet_schedule(&d40c->tasklet); |
@@ -1052,11 +1167,11 @@ static void dma_tasklet(unsigned long data) | |||
1052 | 1167 | ||
1053 | /* Get first active entry from list */ | 1168 | /* Get first active entry from list */ |
1054 | d40d = d40_first_active_get(d40c); | 1169 | d40d = d40_first_active_get(d40c); |
1055 | |||
1056 | if (d40d == NULL) | 1170 | if (d40d == NULL) |
1057 | goto err; | 1171 | goto err; |
1058 | 1172 | ||
1059 | d40c->completed = d40d->txd.cookie; | 1173 | if (!d40d->cyclic) |
1174 | d40c->completed = d40d->txd.cookie; | ||
1060 | 1175 | ||
1061 | /* | 1176 | /* |
1062 | * If terminating a channel pending_tx is set to zero. | 1177 | * If terminating a channel pending_tx is set to zero. |
@@ -1071,16 +1186,18 @@ static void dma_tasklet(unsigned long data) | |||
1071 | callback = d40d->txd.callback; | 1186 | callback = d40d->txd.callback; |
1072 | callback_param = d40d->txd.callback_param; | 1187 | callback_param = d40d->txd.callback_param; |
1073 | 1188 | ||
1074 | if (async_tx_test_ack(&d40d->txd)) { | 1189 | if (!d40d->cyclic) { |
1075 | d40_pool_lli_free(d40d); | 1190 | if (async_tx_test_ack(&d40d->txd)) { |
1076 | d40_desc_remove(d40d); | 1191 | d40_pool_lli_free(d40c, d40d); |
1077 | d40_desc_free(d40c, d40d); | ||
1078 | } else { | ||
1079 | if (!d40d->is_in_client_list) { | ||
1080 | d40_desc_remove(d40d); | 1192 | d40_desc_remove(d40d); |
1081 | d40_lcla_free_all(d40c, d40d); | 1193 | d40_desc_free(d40c, d40d); |
1082 | list_add_tail(&d40d->node, &d40c->client); | 1194 | } else { |
1083 | d40d->is_in_client_list = true; | 1195 | if (!d40d->is_in_client_list) { |
1196 | d40_desc_remove(d40d); | ||
1197 | d40_lcla_free_all(d40c, d40d); | ||
1198 | list_add_tail(&d40d->node, &d40c->client); | ||
1199 | d40d->is_in_client_list = true; | ||
1200 | } | ||
1084 | } | 1201 | } |
1085 | } | 1202 | } |
1086 | 1203 | ||
@@ -1157,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
1157 | if (!il[row].is_error) | 1274 | if (!il[row].is_error) |
1158 | dma_tc_handle(d40c); | 1275 | dma_tc_handle(d40c); |
1159 | else | 1276 | else |
1160 | dev_err(base->dev, | 1277 | d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", |
1161 | "[%s] IRQ chan: %ld offset %d idx %d\n", | 1278 | chan, il[row].offset, idx); |
1162 | __func__, chan, il[row].offset, idx); | ||
1163 | 1279 | ||
1164 | spin_unlock(&d40c->lock); | 1280 | spin_unlock(&d40c->lock); |
1165 | } | 1281 | } |
@@ -1178,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1178 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; | 1294 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
1179 | 1295 | ||
1180 | if (!conf->dir) { | 1296 | if (!conf->dir) { |
1181 | dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", | 1297 | chan_err(d40c, "Invalid direction.\n"); |
1182 | __func__); | ||
1183 | res = -EINVAL; | 1298 | res = -EINVAL; |
1184 | } | 1299 | } |
1185 | 1300 | ||
@@ -1187,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1187 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && | 1302 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && |
1188 | d40c->runtime_addr == 0) { | 1303 | d40c->runtime_addr == 0) { |
1189 | 1304 | ||
1190 | dev_err(&d40c->chan.dev->device, | 1305 | chan_err(d40c, "Invalid TX channel address (%d)\n", |
1191 | "[%s] Invalid TX channel address (%d)\n", | 1306 | conf->dst_dev_type); |
1192 | __func__, conf->dst_dev_type); | ||
1193 | res = -EINVAL; | 1307 | res = -EINVAL; |
1194 | } | 1308 | } |
1195 | 1309 | ||
1196 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && | 1310 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && |
1197 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && | 1311 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && |
1198 | d40c->runtime_addr == 0) { | 1312 | d40c->runtime_addr == 0) { |
1199 | dev_err(&d40c->chan.dev->device, | 1313 | chan_err(d40c, "Invalid RX channel address (%d)\n", |
1200 | "[%s] Invalid RX channel address (%d)\n", | 1314 | conf->src_dev_type); |
1201 | __func__, conf->src_dev_type); | ||
1202 | res = -EINVAL; | 1315 | res = -EINVAL; |
1203 | } | 1316 | } |
1204 | 1317 | ||
1205 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && | 1318 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && |
1206 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | 1319 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { |
1207 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", | 1320 | chan_err(d40c, "Invalid dst\n"); |
1208 | __func__); | ||
1209 | res = -EINVAL; | 1321 | res = -EINVAL; |
1210 | } | 1322 | } |
1211 | 1323 | ||
1212 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && | 1324 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && |
1213 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | 1325 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { |
1214 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", | 1326 | chan_err(d40c, "Invalid src\n"); |
1215 | __func__); | ||
1216 | res = -EINVAL; | 1327 | res = -EINVAL; |
1217 | } | 1328 | } |
1218 | 1329 | ||
1219 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | 1330 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && |
1220 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | 1331 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { |
1221 | dev_err(&d40c->chan.dev->device, | 1332 | chan_err(d40c, "No event line\n"); |
1222 | "[%s] No event line\n", __func__); | ||
1223 | res = -EINVAL; | 1333 | res = -EINVAL; |
1224 | } | 1334 | } |
1225 | 1335 | ||
1226 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | 1336 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && |
1227 | (src_event_group != dst_event_group)) { | 1337 | (src_event_group != dst_event_group)) { |
1228 | dev_err(&d40c->chan.dev->device, | 1338 | chan_err(d40c, "Invalid event group\n"); |
1229 | "[%s] Invalid event group\n", __func__); | ||
1230 | res = -EINVAL; | 1339 | res = -EINVAL; |
1231 | } | 1340 | } |
1232 | 1341 | ||
@@ -1235,9 +1344,20 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1235 | * DMAC HW supports it. Will be added to this driver, | 1344 | * DMAC HW supports it. Will be added to this driver, |
1236 | * in case any dma client requires it. | 1345 | * in case any dma client requires it. |
1237 | */ | 1346 | */ |
1238 | dev_err(&d40c->chan.dev->device, | 1347 | chan_err(d40c, "periph to periph not supported\n"); |
1239 | "[%s] periph to periph not supported\n", | 1348 | res = -EINVAL; |
1240 | __func__); | 1349 | } |
1350 | |||
1351 | if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * | ||
1352 | (1 << conf->src_info.data_width) != | ||
1353 | d40_psize_2_burst_size(is_log, conf->dst_info.psize) * | ||
1354 | (1 << conf->dst_info.data_width)) { | ||
1355 | /* | ||
1356 | * The DMAC hardware only supports | ||
1357 | * src (burst x width) == dst (burst x width) | ||
1358 | */ | ||
1359 | |||
1360 | chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); | ||
1241 | res = -EINVAL; | 1361 | res = -EINVAL; |
1242 | } | 1362 | } |
1243 | 1363 | ||
@@ -1440,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) | |||
1440 | dma_has_cap(DMA_SLAVE, cap)) { | 1560 | dma_has_cap(DMA_SLAVE, cap)) { |
1441 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | 1561 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; |
1442 | } else { | 1562 | } else { |
1443 | dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", | 1563 | chan_err(d40c, "No memcpy\n"); |
1444 | __func__); | ||
1445 | return -EINVAL; | 1564 | return -EINVAL; |
1446 | } | 1565 | } |
1447 | 1566 | ||
@@ -1466,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1466 | /* Release client owned descriptors */ | 1585 | /* Release client owned descriptors */ |
1467 | if (!list_empty(&d40c->client)) | 1586 | if (!list_empty(&d40c->client)) |
1468 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 1587 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
1469 | d40_pool_lli_free(d); | 1588 | d40_pool_lli_free(d40c, d); |
1470 | d40_desc_remove(d); | 1589 | d40_desc_remove(d); |
1471 | d40_desc_free(d40c, d); | 1590 | d40_desc_free(d40c, d); |
1472 | } | 1591 | } |
1473 | 1592 | ||
1474 | if (phy == NULL) { | 1593 | if (phy == NULL) { |
1475 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", | 1594 | chan_err(d40c, "phy == null\n"); |
1476 | __func__); | ||
1477 | return -EINVAL; | 1595 | return -EINVAL; |
1478 | } | 1596 | } |
1479 | 1597 | ||
1480 | if (phy->allocated_src == D40_ALLOC_FREE && | 1598 | if (phy->allocated_src == D40_ALLOC_FREE && |
1481 | phy->allocated_dst == D40_ALLOC_FREE) { | 1599 | phy->allocated_dst == D40_ALLOC_FREE) { |
1482 | dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", | 1600 | chan_err(d40c, "channel already free\n"); |
1483 | __func__); | ||
1484 | return -EINVAL; | 1601 | return -EINVAL; |
1485 | } | 1602 | } |
1486 | 1603 | ||
@@ -1492,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1492 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1609 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1493 | is_src = true; | 1610 | is_src = true; |
1494 | } else { | 1611 | } else { |
1495 | dev_err(&d40c->chan.dev->device, | 1612 | chan_err(d40c, "Unknown direction\n"); |
1496 | "[%s] Unknown direction\n", __func__); | ||
1497 | return -EINVAL; | 1613 | return -EINVAL; |
1498 | } | 1614 | } |
1499 | 1615 | ||
1500 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1616 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1501 | if (res) { | 1617 | if (res) { |
1502 | dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", | 1618 | chan_err(d40c, "suspend failed\n"); |
1503 | __func__); | ||
1504 | return res; | 1619 | return res; |
1505 | } | 1620 | } |
1506 | 1621 | ||
1507 | if (d40c->log_num != D40_PHY_CHAN) { | 1622 | if (chan_is_logical(d40c)) { |
1508 | /* Release logical channel, deactivate the event line */ | 1623 | /* Release logical channel, deactivate the event line */ |
1509 | 1624 | ||
1510 | d40_config_set_event(d40c, false); | 1625 | d40_config_set_event(d40c, false); |
@@ -1520,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1520 | res = d40_channel_execute_command(d40c, | 1635 | res = d40_channel_execute_command(d40c, |
1521 | D40_DMA_RUN); | 1636 | D40_DMA_RUN); |
1522 | if (res) { | 1637 | if (res) { |
1523 | dev_err(&d40c->chan.dev->device, | 1638 | chan_err(d40c, |
1524 | "[%s] Executing RUN command\n", | 1639 | "Executing RUN command\n"); |
1525 | __func__); | ||
1526 | return res; | 1640 | return res; |
1527 | } | 1641 | } |
1528 | } | 1642 | } |
@@ -1535,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1535 | /* Release physical channel */ | 1649 | /* Release physical channel */ |
1536 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1650 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1537 | if (res) { | 1651 | if (res) { |
1538 | dev_err(&d40c->chan.dev->device, | 1652 | chan_err(d40c, "Failed to stop channel\n"); |
1539 | "[%s] Failed to stop channel\n", __func__); | ||
1540 | return res; | 1653 | return res; |
1541 | } | 1654 | } |
1542 | d40c->phy_chan = NULL; | 1655 | d40c->phy_chan = NULL; |
@@ -1548,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1548 | 1661 | ||
1549 | static bool d40_is_paused(struct d40_chan *d40c) | 1662 | static bool d40_is_paused(struct d40_chan *d40c) |
1550 | { | 1663 | { |
1664 | void __iomem *chanbase = chan_base(d40c); | ||
1551 | bool is_paused = false; | 1665 | bool is_paused = false; |
1552 | unsigned long flags; | 1666 | unsigned long flags; |
1553 | void __iomem *active_reg; | 1667 | void __iomem *active_reg; |
@@ -1556,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1556 | 1670 | ||
1557 | spin_lock_irqsave(&d40c->lock, flags); | 1671 | spin_lock_irqsave(&d40c->lock, flags); |
1558 | 1672 | ||
1559 | if (d40c->log_num == D40_PHY_CHAN) { | 1673 | if (chan_is_physical(d40c)) { |
1560 | if (d40c->phy_chan->num % 2 == 0) | 1674 | if (d40c->phy_chan->num % 2 == 0) |
1561 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 1675 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1562 | else | 1676 | else |
@@ -1574,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1574 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1688 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1575 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1689 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1576 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1690 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1577 | status = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1691 | status = readl(chanbase + D40_CHAN_REG_SDLNK); |
1578 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
1579 | D40_CHAN_REG_SDLNK); | ||
1580 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1692 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1581 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1693 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1582 | status = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1694 | status = readl(chanbase + D40_CHAN_REG_SSLNK); |
1583 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
1584 | D40_CHAN_REG_SSLNK); | ||
1585 | } else { | 1695 | } else { |
1586 | dev_err(&d40c->chan.dev->device, | 1696 | chan_err(d40c, "Unknown direction\n"); |
1587 | "[%s] Unknown direction\n", __func__); | ||
1588 | goto _exit; | 1697 | goto _exit; |
1589 | } | 1698 | } |
1590 | 1699 | ||
@@ -1614,102 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan) | |||
1614 | return bytes_left; | 1723 | return bytes_left; |
1615 | } | 1724 | } |
1616 | 1725 | ||
1617 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | 1726 | static int |
1618 | struct scatterlist *sgl_dst, | 1727 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, |
1619 | struct scatterlist *sgl_src, | 1728 | struct scatterlist *sg_src, struct scatterlist *sg_dst, |
1620 | unsigned int sgl_len, | 1729 | unsigned int sg_len, dma_addr_t src_dev_addr, |
1621 | unsigned long dma_flags) | 1730 | dma_addr_t dst_dev_addr) |
1622 | { | 1731 | { |
1623 | int res; | 1732 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1624 | struct d40_desc *d40d; | 1733 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
1625 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1734 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
1626 | chan); | 1735 | int ret; |
1627 | unsigned long flags; | ||
1628 | 1736 | ||
1629 | if (d40c->phy_chan == NULL) { | 1737 | ret = d40_log_sg_to_lli(sg_src, sg_len, |
1630 | dev_err(&d40c->chan.dev->device, | 1738 | src_dev_addr, |
1631 | "[%s] Unallocated channel.\n", __func__); | 1739 | desc->lli_log.src, |
1632 | return ERR_PTR(-EINVAL); | 1740 | chan->log_def.lcsp1, |
1633 | } | 1741 | src_info->data_width, |
1742 | dst_info->data_width); | ||
1634 | 1743 | ||
1635 | spin_lock_irqsave(&d40c->lock, flags); | 1744 | ret = d40_log_sg_to_lli(sg_dst, sg_len, |
1636 | d40d = d40_desc_get(d40c); | 1745 | dst_dev_addr, |
1746 | desc->lli_log.dst, | ||
1747 | chan->log_def.lcsp3, | ||
1748 | dst_info->data_width, | ||
1749 | src_info->data_width); | ||
1637 | 1750 | ||
1638 | if (d40d == NULL) | 1751 | return ret < 0 ? ret : 0; |
1752 | } | ||
1753 | |||
1754 | static int | ||
1755 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | ||
1756 | struct scatterlist *sg_src, struct scatterlist *sg_dst, | ||
1757 | unsigned int sg_len, dma_addr_t src_dev_addr, | ||
1758 | dma_addr_t dst_dev_addr) | ||
1759 | { | ||
1760 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | ||
1761 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | ||
1762 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | ||
1763 | unsigned long flags = 0; | ||
1764 | int ret; | ||
1765 | |||
1766 | if (desc->cyclic) | ||
1767 | flags |= LLI_CYCLIC | LLI_TERM_INT; | ||
1768 | |||
1769 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, | ||
1770 | desc->lli_phy.src, | ||
1771 | virt_to_phys(desc->lli_phy.src), | ||
1772 | chan->src_def_cfg, | ||
1773 | src_info, dst_info, flags); | ||
1774 | |||
1775 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, | ||
1776 | desc->lli_phy.dst, | ||
1777 | virt_to_phys(desc->lli_phy.dst), | ||
1778 | chan->dst_def_cfg, | ||
1779 | dst_info, src_info, flags); | ||
1780 | |||
1781 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, | ||
1782 | desc->lli_pool.size, DMA_TO_DEVICE); | ||
1783 | |||
1784 | return ret < 0 ? ret : 0; | ||
1785 | } | ||
1786 | |||
1787 | |||
1788 | static struct d40_desc * | ||
1789 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | ||
1790 | unsigned int sg_len, unsigned long dma_flags) | ||
1791 | { | ||
1792 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | ||
1793 | struct d40_desc *desc; | ||
1794 | int ret; | ||
1795 | |||
1796 | desc = d40_desc_get(chan); | ||
1797 | if (!desc) | ||
1798 | return NULL; | ||
1799 | |||
1800 | desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, | ||
1801 | cfg->dst_info.data_width); | ||
1802 | if (desc->lli_len < 0) { | ||
1803 | chan_err(chan, "Unaligned size\n"); | ||
1639 | goto err; | 1804 | goto err; |
1805 | } | ||
1640 | 1806 | ||
1641 | d40d->lli_len = sgl_len; | 1807 | ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); |
1642 | d40d->lli_current = 0; | 1808 | if (ret < 0) { |
1643 | d40d->txd.flags = dma_flags; | 1809 | chan_err(chan, "Could not allocate lli\n"); |
1810 | goto err; | ||
1811 | } | ||
1644 | 1812 | ||
1645 | if (d40c->log_num != D40_PHY_CHAN) { | ||
1646 | 1813 | ||
1647 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 1814 | desc->lli_current = 0; |
1648 | dev_err(&d40c->chan.dev->device, | 1815 | desc->txd.flags = dma_flags; |
1649 | "[%s] Out of memory\n", __func__); | 1816 | desc->txd.tx_submit = d40_tx_submit; |
1650 | goto err; | ||
1651 | } | ||
1652 | 1817 | ||
1653 | (void) d40_log_sg_to_lli(sgl_src, | 1818 | dma_async_tx_descriptor_init(&desc->txd, &chan->chan); |
1654 | sgl_len, | 1819 | |
1655 | d40d->lli_log.src, | 1820 | return desc; |
1656 | d40c->log_def.lcsp1, | 1821 | |
1657 | d40c->dma_cfg.src_info.data_width); | 1822 | err: |
1658 | 1823 | d40_desc_free(chan, desc); | |
1659 | (void) d40_log_sg_to_lli(sgl_dst, | 1824 | return NULL; |
1660 | sgl_len, | 1825 | } |
1661 | d40d->lli_log.dst, | 1826 | |
1662 | d40c->log_def.lcsp3, | 1827 | static dma_addr_t |
1663 | d40c->dma_cfg.dst_info.data_width); | 1828 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) |
1664 | } else { | 1829 | { |
1665 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 1830 | struct stedma40_platform_data *plat = chan->base->plat_data; |
1666 | dev_err(&d40c->chan.dev->device, | 1831 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1667 | "[%s] Out of memory\n", __func__); | 1832 | dma_addr_t addr; |
1668 | goto err; | ||
1669 | } | ||
1670 | 1833 | ||
1671 | res = d40_phy_sg_to_lli(sgl_src, | 1834 | if (chan->runtime_addr) |
1672 | sgl_len, | 1835 | return chan->runtime_addr; |
1673 | 0, | ||
1674 | d40d->lli_phy.src, | ||
1675 | virt_to_phys(d40d->lli_phy.src), | ||
1676 | d40c->src_def_cfg, | ||
1677 | d40c->dma_cfg.src_info.data_width, | ||
1678 | d40c->dma_cfg.src_info.psize); | ||
1679 | 1836 | ||
1680 | if (res < 0) | 1837 | if (direction == DMA_FROM_DEVICE) |
1681 | goto err; | 1838 | addr = plat->dev_rx[cfg->src_dev_type]; |
1839 | else if (direction == DMA_TO_DEVICE) | ||
1840 | addr = plat->dev_tx[cfg->dst_dev_type]; | ||
1682 | 1841 | ||
1683 | res = d40_phy_sg_to_lli(sgl_dst, | 1842 | return addr; |
1684 | sgl_len, | 1843 | } |
1685 | 0, | ||
1686 | d40d->lli_phy.dst, | ||
1687 | virt_to_phys(d40d->lli_phy.dst), | ||
1688 | d40c->dst_def_cfg, | ||
1689 | d40c->dma_cfg.dst_info.data_width, | ||
1690 | d40c->dma_cfg.dst_info.psize); | ||
1691 | 1844 | ||
1692 | if (res < 0) | 1845 | static struct dma_async_tx_descriptor * |
1693 | goto err; | 1846 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
1847 | struct scatterlist *sg_dst, unsigned int sg_len, | ||
1848 | enum dma_data_direction direction, unsigned long dma_flags) | ||
1849 | { | ||
1850 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | ||
1851 | dma_addr_t src_dev_addr = 0; | ||
1852 | dma_addr_t dst_dev_addr = 0; | ||
1853 | struct d40_desc *desc; | ||
1854 | unsigned long flags; | ||
1855 | int ret; | ||
1694 | 1856 | ||
1695 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 1857 | if (!chan->phy_chan) { |
1696 | d40d->lli_pool.size, DMA_TO_DEVICE); | 1858 | chan_err(chan, "Cannot prepare unallocated channel\n"); |
1859 | return NULL; | ||
1697 | } | 1860 | } |
1698 | 1861 | ||
1699 | dma_async_tx_descriptor_init(&d40d->txd, chan); | ||
1700 | 1862 | ||
1701 | d40d->txd.tx_submit = d40_tx_submit; | 1863 | spin_lock_irqsave(&chan->lock, flags); |
1702 | 1864 | ||
1703 | spin_unlock_irqrestore(&d40c->lock, flags); | 1865 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
1866 | if (desc == NULL) | ||
1867 | goto err; | ||
1868 | |||
1869 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) | ||
1870 | desc->cyclic = true; | ||
1871 | |||
1872 | if (direction != DMA_NONE) { | ||
1873 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | ||
1874 | |||
1875 | if (direction == DMA_FROM_DEVICE) | ||
1876 | src_dev_addr = dev_addr; | ||
1877 | else if (direction == DMA_TO_DEVICE) | ||
1878 | dst_dev_addr = dev_addr; | ||
1879 | } | ||
1880 | |||
1881 | if (chan_is_logical(chan)) | ||
1882 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, | ||
1883 | sg_len, src_dev_addr, dst_dev_addr); | ||
1884 | else | ||
1885 | ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, | ||
1886 | sg_len, src_dev_addr, dst_dev_addr); | ||
1887 | |||
1888 | if (ret) { | ||
1889 | chan_err(chan, "Failed to prepare %s sg job: %d\n", | ||
1890 | chan_is_logical(chan) ? "log" : "phy", ret); | ||
1891 | goto err; | ||
1892 | } | ||
1893 | |||
1894 | spin_unlock_irqrestore(&chan->lock, flags); | ||
1895 | |||
1896 | return &desc->txd; | ||
1704 | 1897 | ||
1705 | return &d40d->txd; | ||
1706 | err: | 1898 | err: |
1707 | if (d40d) | 1899 | if (desc) |
1708 | d40_desc_free(d40c, d40d); | 1900 | d40_desc_free(chan, desc); |
1709 | spin_unlock_irqrestore(&d40c->lock, flags); | 1901 | spin_unlock_irqrestore(&chan->lock, flags); |
1710 | return NULL; | 1902 | return NULL; |
1711 | } | 1903 | } |
1712 | EXPORT_SYMBOL(stedma40_memcpy_sg); | ||
1713 | 1904 | ||
1714 | bool stedma40_filter(struct dma_chan *chan, void *data) | 1905 | bool stedma40_filter(struct dma_chan *chan, void *data) |
1715 | { | 1906 | { |
@@ -1732,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data) | |||
1732 | } | 1923 | } |
1733 | EXPORT_SYMBOL(stedma40_filter); | 1924 | EXPORT_SYMBOL(stedma40_filter); |
1734 | 1925 | ||
1926 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) | ||
1927 | { | ||
1928 | bool realtime = d40c->dma_cfg.realtime; | ||
1929 | bool highprio = d40c->dma_cfg.high_priority; | ||
1930 | u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; | ||
1931 | u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; | ||
1932 | u32 event = D40_TYPE_TO_EVENT(dev_type); | ||
1933 | u32 group = D40_TYPE_TO_GROUP(dev_type); | ||
1934 | u32 bit = 1 << event; | ||
1935 | |||
1936 | /* Destination event lines are stored in the upper halfword */ | ||
1937 | if (!src) | ||
1938 | bit <<= 16; | ||
1939 | |||
1940 | writel(bit, d40c->base->virtbase + prioreg + group * 4); | ||
1941 | writel(bit, d40c->base->virtbase + rtreg + group * 4); | ||
1942 | } | ||
1943 | |||
1944 | static void d40_set_prio_realtime(struct d40_chan *d40c) | ||
1945 | { | ||
1946 | if (d40c->base->rev < 3) | ||
1947 | return; | ||
1948 | |||
1949 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | ||
1950 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | ||
1951 | __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); | ||
1952 | |||
1953 | if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || | ||
1954 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | ||
1955 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); | ||
1956 | } | ||
1957 | |||
1735 | /* DMA ENGINE functions */ | 1958 | /* DMA ENGINE functions */ |
1736 | static int d40_alloc_chan_resources(struct dma_chan *chan) | 1959 | static int d40_alloc_chan_resources(struct dma_chan *chan) |
1737 | { | 1960 | { |
@@ -1748,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1748 | if (!d40c->configured) { | 1971 | if (!d40c->configured) { |
1749 | err = d40_config_memcpy(d40c); | 1972 | err = d40_config_memcpy(d40c); |
1750 | if (err) { | 1973 | if (err) { |
1751 | dev_err(&d40c->chan.dev->device, | 1974 | chan_err(d40c, "Failed to configure memcpy channel\n"); |
1752 | "[%s] Failed to configure memcpy channel\n", | ||
1753 | __func__); | ||
1754 | goto fail; | 1975 | goto fail; |
1755 | } | 1976 | } |
1756 | } | 1977 | } |
@@ -1758,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1758 | 1979 | ||
1759 | err = d40_allocate_channel(d40c); | 1980 | err = d40_allocate_channel(d40c); |
1760 | if (err) { | 1981 | if (err) { |
1761 | dev_err(&d40c->chan.dev->device, | 1982 | chan_err(d40c, "Failed to allocate channel\n"); |
1762 | "[%s] Failed to allocate channel\n", __func__); | ||
1763 | goto fail; | 1983 | goto fail; |
1764 | } | 1984 | } |
1765 | 1985 | ||
1766 | /* Fill in basic CFG register values */ | 1986 | /* Fill in basic CFG register values */ |
1767 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 1987 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
1768 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); | 1988 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
1989 | |||
1990 | d40_set_prio_realtime(d40c); | ||
1769 | 1991 | ||
1770 | if (d40c->log_num != D40_PHY_CHAN) { | 1992 | if (chan_is_logical(d40c)) { |
1771 | d40_log_cfg(&d40c->dma_cfg, | 1993 | d40_log_cfg(&d40c->dma_cfg, |
1772 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 1994 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
1773 | 1995 | ||
@@ -1800,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1800 | unsigned long flags; | 2022 | unsigned long flags; |
1801 | 2023 | ||
1802 | if (d40c->phy_chan == NULL) { | 2024 | if (d40c->phy_chan == NULL) { |
1803 | dev_err(&d40c->chan.dev->device, | 2025 | chan_err(d40c, "Cannot free unallocated channel\n"); |
1804 | "[%s] Cannot free unallocated channel\n", __func__); | ||
1805 | return; | 2026 | return; |
1806 | } | 2027 | } |
1807 | 2028 | ||
@@ -1811,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1811 | err = d40_free_dma(d40c); | 2032 | err = d40_free_dma(d40c); |
1812 | 2033 | ||
1813 | if (err) | 2034 | if (err) |
1814 | dev_err(&d40c->chan.dev->device, | 2035 | chan_err(d40c, "Failed to free channel\n"); |
1815 | "[%s] Failed to free channel\n", __func__); | ||
1816 | spin_unlock_irqrestore(&d40c->lock, flags); | 2036 | spin_unlock_irqrestore(&d40c->lock, flags); |
1817 | } | 2037 | } |
1818 | 2038 | ||
@@ -1822,226 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1822 | size_t size, | 2042 | size_t size, |
1823 | unsigned long dma_flags) | 2043 | unsigned long dma_flags) |
1824 | { | 2044 | { |
1825 | struct d40_desc *d40d; | 2045 | struct scatterlist dst_sg; |
1826 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 2046 | struct scatterlist src_sg; |
1827 | chan); | ||
1828 | unsigned long flags; | ||
1829 | int err = 0; | ||
1830 | 2047 | ||
1831 | if (d40c->phy_chan == NULL) { | 2048 | sg_init_table(&dst_sg, 1); |
1832 | dev_err(&d40c->chan.dev->device, | 2049 | sg_init_table(&src_sg, 1); |
1833 | "[%s] Channel is not allocated.\n", __func__); | ||
1834 | return ERR_PTR(-EINVAL); | ||
1835 | } | ||
1836 | 2050 | ||
1837 | spin_lock_irqsave(&d40c->lock, flags); | 2051 | sg_dma_address(&dst_sg) = dst; |
1838 | d40d = d40_desc_get(d40c); | 2052 | sg_dma_address(&src_sg) = src; |
1839 | 2053 | ||
1840 | if (d40d == NULL) { | 2054 | sg_dma_len(&dst_sg) = size; |
1841 | dev_err(&d40c->chan.dev->device, | 2055 | sg_dma_len(&src_sg) = size; |
1842 | "[%s] Descriptor is NULL\n", __func__); | ||
1843 | goto err; | ||
1844 | } | ||
1845 | |||
1846 | d40d->txd.flags = dma_flags; | ||
1847 | |||
1848 | dma_async_tx_descriptor_init(&d40d->txd, chan); | ||
1849 | |||
1850 | d40d->txd.tx_submit = d40_tx_submit; | ||
1851 | |||
1852 | if (d40c->log_num != D40_PHY_CHAN) { | ||
1853 | |||
1854 | if (d40_pool_lli_alloc(d40d, 1, true) < 0) { | ||
1855 | dev_err(&d40c->chan.dev->device, | ||
1856 | "[%s] Out of memory\n", __func__); | ||
1857 | goto err; | ||
1858 | } | ||
1859 | d40d->lli_len = 1; | ||
1860 | d40d->lli_current = 0; | ||
1861 | |||
1862 | d40_log_fill_lli(d40d->lli_log.src, | ||
1863 | src, | ||
1864 | size, | ||
1865 | d40c->log_def.lcsp1, | ||
1866 | d40c->dma_cfg.src_info.data_width, | ||
1867 | true); | ||
1868 | |||
1869 | d40_log_fill_lli(d40d->lli_log.dst, | ||
1870 | dst, | ||
1871 | size, | ||
1872 | d40c->log_def.lcsp3, | ||
1873 | d40c->dma_cfg.dst_info.data_width, | ||
1874 | true); | ||
1875 | |||
1876 | } else { | ||
1877 | |||
1878 | if (d40_pool_lli_alloc(d40d, 1, false) < 0) { | ||
1879 | dev_err(&d40c->chan.dev->device, | ||
1880 | "[%s] Out of memory\n", __func__); | ||
1881 | goto err; | ||
1882 | } | ||
1883 | |||
1884 | err = d40_phy_fill_lli(d40d->lli_phy.src, | ||
1885 | src, | ||
1886 | size, | ||
1887 | d40c->dma_cfg.src_info.psize, | ||
1888 | 0, | ||
1889 | d40c->src_def_cfg, | ||
1890 | true, | ||
1891 | d40c->dma_cfg.src_info.data_width, | ||
1892 | false); | ||
1893 | if (err) | ||
1894 | goto err_fill_lli; | ||
1895 | |||
1896 | err = d40_phy_fill_lli(d40d->lli_phy.dst, | ||
1897 | dst, | ||
1898 | size, | ||
1899 | d40c->dma_cfg.dst_info.psize, | ||
1900 | 0, | ||
1901 | d40c->dst_def_cfg, | ||
1902 | true, | ||
1903 | d40c->dma_cfg.dst_info.data_width, | ||
1904 | false); | ||
1905 | 2056 | ||
1906 | if (err) | 2057 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); |
1907 | goto err_fill_lli; | ||
1908 | |||
1909 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | ||
1910 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
1911 | } | ||
1912 | |||
1913 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1914 | return &d40d->txd; | ||
1915 | |||
1916 | err_fill_lli: | ||
1917 | dev_err(&d40c->chan.dev->device, | ||
1918 | "[%s] Failed filling in PHY LLI\n", __func__); | ||
1919 | err: | ||
1920 | if (d40d) | ||
1921 | d40_desc_free(d40c, d40d); | ||
1922 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1923 | return NULL; | ||
1924 | } | 2058 | } |
1925 | 2059 | ||
1926 | static struct dma_async_tx_descriptor * | 2060 | static struct dma_async_tx_descriptor * |
1927 | d40_prep_sg(struct dma_chan *chan, | 2061 | d40_prep_memcpy_sg(struct dma_chan *chan, |
1928 | struct scatterlist *dst_sg, unsigned int dst_nents, | 2062 | struct scatterlist *dst_sg, unsigned int dst_nents, |
1929 | struct scatterlist *src_sg, unsigned int src_nents, | 2063 | struct scatterlist *src_sg, unsigned int src_nents, |
1930 | unsigned long dma_flags) | 2064 | unsigned long dma_flags) |
1931 | { | 2065 | { |
1932 | if (dst_nents != src_nents) | 2066 | if (dst_nents != src_nents) |
1933 | return NULL; | 2067 | return NULL; |
1934 | 2068 | ||
1935 | return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); | 2069 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); |
1936 | } | ||
1937 | |||
1938 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, | ||
1939 | struct d40_chan *d40c, | ||
1940 | struct scatterlist *sgl, | ||
1941 | unsigned int sg_len, | ||
1942 | enum dma_data_direction direction, | ||
1943 | unsigned long dma_flags) | ||
1944 | { | ||
1945 | dma_addr_t dev_addr = 0; | ||
1946 | int total_size; | ||
1947 | |||
1948 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { | ||
1949 | dev_err(&d40c->chan.dev->device, | ||
1950 | "[%s] Out of memory\n", __func__); | ||
1951 | return -ENOMEM; | ||
1952 | } | ||
1953 | |||
1954 | d40d->lli_len = sg_len; | ||
1955 | d40d->lli_current = 0; | ||
1956 | |||
1957 | if (direction == DMA_FROM_DEVICE) | ||
1958 | if (d40c->runtime_addr) | ||
1959 | dev_addr = d40c->runtime_addr; | ||
1960 | else | ||
1961 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | ||
1962 | else if (direction == DMA_TO_DEVICE) | ||
1963 | if (d40c->runtime_addr) | ||
1964 | dev_addr = d40c->runtime_addr; | ||
1965 | else | ||
1966 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
1967 | |||
1968 | else | ||
1969 | return -EINVAL; | ||
1970 | |||
1971 | total_size = d40_log_sg_to_dev(sgl, sg_len, | ||
1972 | &d40d->lli_log, | ||
1973 | &d40c->log_def, | ||
1974 | d40c->dma_cfg.src_info.data_width, | ||
1975 | d40c->dma_cfg.dst_info.data_width, | ||
1976 | direction, | ||
1977 | dev_addr); | ||
1978 | |||
1979 | if (total_size < 0) | ||
1980 | return -EINVAL; | ||
1981 | |||
1982 | return 0; | ||
1983 | } | ||
1984 | |||
1985 | static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | ||
1986 | struct d40_chan *d40c, | ||
1987 | struct scatterlist *sgl, | ||
1988 | unsigned int sgl_len, | ||
1989 | enum dma_data_direction direction, | ||
1990 | unsigned long dma_flags) | ||
1991 | { | ||
1992 | dma_addr_t src_dev_addr; | ||
1993 | dma_addr_t dst_dev_addr; | ||
1994 | int res; | ||
1995 | |||
1996 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | ||
1997 | dev_err(&d40c->chan.dev->device, | ||
1998 | "[%s] Out of memory\n", __func__); | ||
1999 | return -ENOMEM; | ||
2000 | } | ||
2001 | |||
2002 | d40d->lli_len = sgl_len; | ||
2003 | d40d->lli_current = 0; | ||
2004 | |||
2005 | if (direction == DMA_FROM_DEVICE) { | ||
2006 | dst_dev_addr = 0; | ||
2007 | if (d40c->runtime_addr) | ||
2008 | src_dev_addr = d40c->runtime_addr; | ||
2009 | else | ||
2010 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | ||
2011 | } else if (direction == DMA_TO_DEVICE) { | ||
2012 | if (d40c->runtime_addr) | ||
2013 | dst_dev_addr = d40c->runtime_addr; | ||
2014 | else | ||
2015 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
2016 | src_dev_addr = 0; | ||
2017 | } else | ||
2018 | return -EINVAL; | ||
2019 | |||
2020 | res = d40_phy_sg_to_lli(sgl, | ||
2021 | sgl_len, | ||
2022 | src_dev_addr, | ||
2023 | d40d->lli_phy.src, | ||
2024 | virt_to_phys(d40d->lli_phy.src), | ||
2025 | d40c->src_def_cfg, | ||
2026 | d40c->dma_cfg.src_info.data_width, | ||
2027 | d40c->dma_cfg.src_info.psize); | ||
2028 | if (res < 0) | ||
2029 | return res; | ||
2030 | |||
2031 | res = d40_phy_sg_to_lli(sgl, | ||
2032 | sgl_len, | ||
2033 | dst_dev_addr, | ||
2034 | d40d->lli_phy.dst, | ||
2035 | virt_to_phys(d40d->lli_phy.dst), | ||
2036 | d40c->dst_def_cfg, | ||
2037 | d40c->dma_cfg.dst_info.data_width, | ||
2038 | d40c->dma_cfg.dst_info.psize); | ||
2039 | if (res < 0) | ||
2040 | return res; | ||
2041 | |||
2042 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | ||
2043 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
2044 | return 0; | ||
2045 | } | 2070 | } |
2046 | 2071 | ||
2047 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2072 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
@@ -2050,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2050 | enum dma_data_direction direction, | 2075 | enum dma_data_direction direction, |
2051 | unsigned long dma_flags) | 2076 | unsigned long dma_flags) |
2052 | { | 2077 | { |
2053 | struct d40_desc *d40d; | 2078 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) |
2054 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 2079 | return NULL; |
2055 | chan); | ||
2056 | unsigned long flags; | ||
2057 | int err; | ||
2058 | |||
2059 | if (d40c->phy_chan == NULL) { | ||
2060 | dev_err(&d40c->chan.dev->device, | ||
2061 | "[%s] Cannot prepare unallocated channel\n", __func__); | ||
2062 | return ERR_PTR(-EINVAL); | ||
2063 | } | ||
2064 | 2080 | ||
2065 | spin_lock_irqsave(&d40c->lock, flags); | 2081 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
2066 | d40d = d40_desc_get(d40c); | 2082 | } |
2067 | 2083 | ||
2068 | if (d40d == NULL) | 2084 | static struct dma_async_tx_descriptor * |
2069 | goto err; | 2085 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2086 | size_t buf_len, size_t period_len, | ||
2087 | enum dma_data_direction direction) | ||
2088 | { | ||
2089 | unsigned int periods = buf_len / period_len; | ||
2090 | struct dma_async_tx_descriptor *txd; | ||
2091 | struct scatterlist *sg; | ||
2092 | int i; | ||
2070 | 2093 | ||
2071 | if (d40c->log_num != D40_PHY_CHAN) | 2094 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); |
2072 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | 2095 | for (i = 0; i < periods; i++) { |
2073 | direction, dma_flags); | 2096 | sg_dma_address(&sg[i]) = dma_addr; |
2074 | else | 2097 | sg_dma_len(&sg[i]) = period_len; |
2075 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, | 2098 | dma_addr += period_len; |
2076 | direction, dma_flags); | ||
2077 | if (err) { | ||
2078 | dev_err(&d40c->chan.dev->device, | ||
2079 | "[%s] Failed to prepare %s slave sg job: %d\n", | ||
2080 | __func__, | ||
2081 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); | ||
2082 | goto err; | ||
2083 | } | 2099 | } |
2084 | 2100 | ||
2085 | d40d->txd.flags = dma_flags; | 2101 | sg[periods].offset = 0; |
2086 | 2102 | sg[periods].length = 0; | |
2087 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 2103 | sg[periods].page_link = |
2104 | ((unsigned long)sg | 0x01) & ~0x02; | ||
2088 | 2105 | ||
2089 | d40d->txd.tx_submit = d40_tx_submit; | 2106 | txd = d40_prep_sg(chan, sg, sg, periods, direction, |
2107 | DMA_PREP_INTERRUPT); | ||
2090 | 2108 | ||
2091 | spin_unlock_irqrestore(&d40c->lock, flags); | 2109 | kfree(sg); |
2092 | return &d40d->txd; | ||
2093 | 2110 | ||
2094 | err: | 2111 | return txd; |
2095 | if (d40d) | ||
2096 | d40_desc_free(d40c, d40d); | ||
2097 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2098 | return NULL; | ||
2099 | } | 2112 | } |
2100 | 2113 | ||
2101 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 2114 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
@@ -2108,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2108 | int ret; | 2121 | int ret; |
2109 | 2122 | ||
2110 | if (d40c->phy_chan == NULL) { | 2123 | if (d40c->phy_chan == NULL) { |
2111 | dev_err(&d40c->chan.dev->device, | 2124 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
2112 | "[%s] Cannot read status of unallocated channel\n", | ||
2113 | __func__); | ||
2114 | return -EINVAL; | 2125 | return -EINVAL; |
2115 | } | 2126 | } |
2116 | 2127 | ||
@@ -2134,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2134 | unsigned long flags; | 2145 | unsigned long flags; |
2135 | 2146 | ||
2136 | if (d40c->phy_chan == NULL) { | 2147 | if (d40c->phy_chan == NULL) { |
2137 | dev_err(&d40c->chan.dev->device, | 2148 | chan_err(d40c, "Channel is not allocated!\n"); |
2138 | "[%s] Channel is not allocated!\n", __func__); | ||
2139 | return; | 2149 | return; |
2140 | } | 2150 | } |
2141 | 2151 | ||
@@ -2228,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2228 | return; | 2238 | return; |
2229 | } | 2239 | } |
2230 | 2240 | ||
2231 | if (d40c->log_num != D40_PHY_CHAN) { | 2241 | if (chan_is_logical(d40c)) { |
2232 | if (config_maxburst >= 16) | 2242 | if (config_maxburst >= 16) |
2233 | psize = STEDMA40_PSIZE_LOG_16; | 2243 | psize = STEDMA40_PSIZE_LOG_16; |
2234 | else if (config_maxburst >= 8) | 2244 | else if (config_maxburst >= 8) |
@@ -2244,6 +2254,8 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2244 | psize = STEDMA40_PSIZE_PHY_8; | 2254 | psize = STEDMA40_PSIZE_PHY_8; |
2245 | else if (config_maxburst >= 4) | 2255 | else if (config_maxburst >= 4) |
2246 | psize = STEDMA40_PSIZE_PHY_4; | 2256 | psize = STEDMA40_PSIZE_PHY_4; |
2257 | else if (config_maxburst >= 2) | ||
2258 | psize = STEDMA40_PSIZE_PHY_2; | ||
2247 | else | 2259 | else |
2248 | psize = STEDMA40_PSIZE_PHY_1; | 2260 | psize = STEDMA40_PSIZE_PHY_1; |
2249 | } | 2261 | } |
@@ -2259,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2259 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2271 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
2260 | 2272 | ||
2261 | /* Fill in register values */ | 2273 | /* Fill in register values */ |
2262 | if (d40c->log_num != D40_PHY_CHAN) | 2274 | if (chan_is_logical(d40c)) |
2263 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 2275 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
2264 | else | 2276 | else |
2265 | d40_phy_cfg(cfg, &d40c->src_def_cfg, | 2277 | d40_phy_cfg(cfg, &d40c->src_def_cfg, |
@@ -2280,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2280 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 2292 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2281 | unsigned long arg) | 2293 | unsigned long arg) |
2282 | { | 2294 | { |
2283 | unsigned long flags; | ||
2284 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2295 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2285 | 2296 | ||
2286 | if (d40c->phy_chan == NULL) { | 2297 | if (d40c->phy_chan == NULL) { |
2287 | dev_err(&d40c->chan.dev->device, | 2298 | chan_err(d40c, "Channel is not allocated!\n"); |
2288 | "[%s] Channel is not allocated!\n", __func__); | ||
2289 | return -EINVAL; | 2299 | return -EINVAL; |
2290 | } | 2300 | } |
2291 | 2301 | ||
2292 | switch (cmd) { | 2302 | switch (cmd) { |
2293 | case DMA_TERMINATE_ALL: | 2303 | case DMA_TERMINATE_ALL: |
2294 | spin_lock_irqsave(&d40c->lock, flags); | 2304 | return d40_terminate_all(d40c); |
2295 | d40_term_all(d40c); | ||
2296 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2297 | return 0; | ||
2298 | case DMA_PAUSE: | 2305 | case DMA_PAUSE: |
2299 | return d40_pause(chan); | 2306 | return d40_pause(d40c); |
2300 | case DMA_RESUME: | 2307 | case DMA_RESUME: |
2301 | return d40_resume(chan); | 2308 | return d40_resume(d40c); |
2302 | case DMA_SLAVE_CONFIG: | 2309 | case DMA_SLAVE_CONFIG: |
2303 | d40_set_runtime_config(chan, | 2310 | d40_set_runtime_config(chan, |
2304 | (struct dma_slave_config *) arg); | 2311 | (struct dma_slave_config *) arg); |
@@ -2343,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2343 | } | 2350 | } |
2344 | } | 2351 | } |
2345 | 2352 | ||
2353 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | ||
2354 | { | ||
2355 | if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) | ||
2356 | dev->device_prep_slave_sg = d40_prep_slave_sg; | ||
2357 | |||
2358 | if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { | ||
2359 | dev->device_prep_dma_memcpy = d40_prep_memcpy; | ||
2360 | |||
2361 | /* | ||
2362 | * This controller can only access address at even | ||
2363 | * 32bit boundaries, i.e. 2^2 | ||
2364 | */ | ||
2365 | dev->copy_align = 2; | ||
2366 | } | ||
2367 | |||
2368 | if (dma_has_cap(DMA_SG, dev->cap_mask)) | ||
2369 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; | ||
2370 | |||
2371 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | ||
2372 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | ||
2373 | |||
2374 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; | ||
2375 | dev->device_free_chan_resources = d40_free_chan_resources; | ||
2376 | dev->device_issue_pending = d40_issue_pending; | ||
2377 | dev->device_tx_status = d40_tx_status; | ||
2378 | dev->device_control = d40_control; | ||
2379 | dev->dev = base->dev; | ||
2380 | } | ||
2381 | |||
2346 | static int __init d40_dmaengine_init(struct d40_base *base, | 2382 | static int __init d40_dmaengine_init(struct d40_base *base, |
2347 | int num_reserved_chans) | 2383 | int num_reserved_chans) |
2348 | { | 2384 | { |
@@ -2353,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2353 | 2389 | ||
2354 | dma_cap_zero(base->dma_slave.cap_mask); | 2390 | dma_cap_zero(base->dma_slave.cap_mask); |
2355 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 2391 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
2392 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | ||
2356 | 2393 | ||
2357 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; | 2394 | d40_ops_init(base, &base->dma_slave); |
2358 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; | ||
2359 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2360 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
2361 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; | ||
2362 | base->dma_slave.device_tx_status = d40_tx_status; | ||
2363 | base->dma_slave.device_issue_pending = d40_issue_pending; | ||
2364 | base->dma_slave.device_control = d40_control; | ||
2365 | base->dma_slave.dev = base->dev; | ||
2366 | 2395 | ||
2367 | err = dma_async_device_register(&base->dma_slave); | 2396 | err = dma_async_device_register(&base->dma_slave); |
2368 | 2397 | ||
2369 | if (err) { | 2398 | if (err) { |
2370 | dev_err(base->dev, | 2399 | d40_err(base->dev, "Failed to register slave channels\n"); |
2371 | "[%s] Failed to register slave channels\n", | ||
2372 | __func__); | ||
2373 | goto failure1; | 2400 | goto failure1; |
2374 | } | 2401 | } |
2375 | 2402 | ||
@@ -2378,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2378 | 2405 | ||
2379 | dma_cap_zero(base->dma_memcpy.cap_mask); | 2406 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2380 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 2407 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
2381 | dma_cap_set(DMA_SG, base->dma_slave.cap_mask); | 2408 | dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); |
2382 | 2409 | ||
2383 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; | 2410 | d40_ops_init(base, &base->dma_memcpy); |
2384 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; | ||
2385 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2386 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
2387 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; | ||
2388 | base->dma_memcpy.device_tx_status = d40_tx_status; | ||
2389 | base->dma_memcpy.device_issue_pending = d40_issue_pending; | ||
2390 | base->dma_memcpy.device_control = d40_control; | ||
2391 | base->dma_memcpy.dev = base->dev; | ||
2392 | /* | ||
2393 | * This controller can only access address at even | ||
2394 | * 32bit boundaries, i.e. 2^2 | ||
2395 | */ | ||
2396 | base->dma_memcpy.copy_align = 2; | ||
2397 | 2411 | ||
2398 | err = dma_async_device_register(&base->dma_memcpy); | 2412 | err = dma_async_device_register(&base->dma_memcpy); |
2399 | 2413 | ||
2400 | if (err) { | 2414 | if (err) { |
2401 | dev_err(base->dev, | 2415 | d40_err(base->dev, |
2402 | "[%s] Failed to regsiter memcpy only channels\n", | 2416 | "Failed to regsiter memcpy only channels\n"); |
2403 | __func__); | ||
2404 | goto failure2; | 2417 | goto failure2; |
2405 | } | 2418 | } |
2406 | 2419 | ||
@@ -2410,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2410 | dma_cap_zero(base->dma_both.cap_mask); | 2423 | dma_cap_zero(base->dma_both.cap_mask); |
2411 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 2424 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2412 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 2425 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2413 | dma_cap_set(DMA_SG, base->dma_slave.cap_mask); | 2426 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); |
2414 | 2427 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | |
2415 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; | 2428 | |
2416 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; | 2429 | d40_ops_init(base, &base->dma_both); |
2417 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2418 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
2419 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; | ||
2420 | base->dma_both.device_tx_status = d40_tx_status; | ||
2421 | base->dma_both.device_issue_pending = d40_issue_pending; | ||
2422 | base->dma_both.device_control = d40_control; | ||
2423 | base->dma_both.dev = base->dev; | ||
2424 | base->dma_both.copy_align = 2; | ||
2425 | err = dma_async_device_register(&base->dma_both); | 2430 | err = dma_async_device_register(&base->dma_both); |
2426 | 2431 | ||
2427 | if (err) { | 2432 | if (err) { |
2428 | dev_err(base->dev, | 2433 | d40_err(base->dev, |
2429 | "[%s] Failed to register logical and physical capable channels\n", | 2434 | "Failed to register logical and physical capable channels\n"); |
2430 | __func__); | ||
2431 | goto failure3; | 2435 | goto failure3; |
2432 | } | 2436 | } |
2433 | return 0; | 2437 | return 0; |
@@ -2503,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2503 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | 2507 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, |
2504 | /* | 2508 | /* |
2505 | * D40_DREG_PERIPHID2 Depends on HW revision: | 2509 | * D40_DREG_PERIPHID2 Depends on HW revision: |
2506 | * MOP500/HREF ED has 0x0008, | 2510 | * DB8500ed has 0x0008, |
2507 | * ? has 0x0018, | 2511 | * ? has 0x0018, |
2508 | * HREF V1 has 0x0028 | 2512 | * DB8500v1 has 0x0028 |
2513 | * DB8500v2 has 0x0038 | ||
2509 | */ | 2514 | */ |
2510 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | 2515 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, |
2511 | 2516 | ||
@@ -2529,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2529 | clk = clk_get(&pdev->dev, NULL); | 2534 | clk = clk_get(&pdev->dev, NULL); |
2530 | 2535 | ||
2531 | if (IS_ERR(clk)) { | 2536 | if (IS_ERR(clk)) { |
2532 | dev_err(&pdev->dev, "[%s] No matching clock found\n", | 2537 | d40_err(&pdev->dev, "No matching clock found\n"); |
2533 | __func__); | ||
2534 | goto failure; | 2538 | goto failure; |
2535 | } | 2539 | } |
2536 | 2540 | ||
@@ -2553,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2553 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 2557 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { |
2554 | if (dma_id_regs[i].val != | 2558 | if (dma_id_regs[i].val != |
2555 | readl(virtbase + dma_id_regs[i].reg)) { | 2559 | readl(virtbase + dma_id_regs[i].reg)) { |
2556 | dev_err(&pdev->dev, | 2560 | d40_err(&pdev->dev, |
2557 | "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 2561 | "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", |
2558 | __func__, | ||
2559 | dma_id_regs[i].val, | 2562 | dma_id_regs[i].val, |
2560 | dma_id_regs[i].reg, | 2563 | dma_id_regs[i].reg, |
2561 | readl(virtbase + dma_id_regs[i].reg)); | 2564 | readl(virtbase + dma_id_regs[i].reg)); |
@@ -2568,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2568 | 2571 | ||
2569 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != | 2572 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != |
2570 | D40_HW_DESIGNER) { | 2573 | D40_HW_DESIGNER) { |
2571 | dev_err(&pdev->dev, | 2574 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
2572 | "[%s] Unknown designer! Got %x wanted %x\n", | 2575 | val & D40_DREG_PERIPHID2_DESIGNER_MASK, |
2573 | __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK, | ||
2574 | D40_HW_DESIGNER); | 2576 | D40_HW_DESIGNER); |
2575 | goto failure; | 2577 | goto failure; |
2576 | } | 2578 | } |
@@ -2600,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2600 | sizeof(struct d40_chan), GFP_KERNEL); | 2602 | sizeof(struct d40_chan), GFP_KERNEL); |
2601 | 2603 | ||
2602 | if (base == NULL) { | 2604 | if (base == NULL) { |
2603 | dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); | 2605 | d40_err(&pdev->dev, "Out of memory\n"); |
2604 | goto failure; | 2606 | goto failure; |
2605 | } | 2607 | } |
2606 | 2608 | ||
@@ -2747,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base) | |||
2747 | 2749 | ||
2748 | static int __init d40_lcla_allocate(struct d40_base *base) | 2750 | static int __init d40_lcla_allocate(struct d40_base *base) |
2749 | { | 2751 | { |
2752 | struct d40_lcla_pool *pool = &base->lcla_pool; | ||
2750 | unsigned long *page_list; | 2753 | unsigned long *page_list; |
2751 | int i, j; | 2754 | int i, j; |
2752 | int ret = 0; | 2755 | int ret = 0; |
@@ -2772,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
2772 | base->lcla_pool.pages); | 2775 | base->lcla_pool.pages); |
2773 | if (!page_list[i]) { | 2776 | if (!page_list[i]) { |
2774 | 2777 | ||
2775 | dev_err(base->dev, | 2778 | d40_err(base->dev, "Failed to allocate %d pages.\n", |
2776 | "[%s] Failed to allocate %d pages.\n", | 2779 | base->lcla_pool.pages); |
2777 | __func__, base->lcla_pool.pages); | ||
2778 | 2780 | ||
2779 | for (j = 0; j < i; j++) | 2781 | for (j = 0; j < i; j++) |
2780 | free_pages(page_list[j], base->lcla_pool.pages); | 2782 | free_pages(page_list[j], base->lcla_pool.pages); |
@@ -2812,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
2812 | LCLA_ALIGNMENT); | 2814 | LCLA_ALIGNMENT); |
2813 | } | 2815 | } |
2814 | 2816 | ||
2817 | pool->dma_addr = dma_map_single(base->dev, pool->base, | ||
2818 | SZ_1K * base->num_phy_chans, | ||
2819 | DMA_TO_DEVICE); | ||
2820 | if (dma_mapping_error(base->dev, pool->dma_addr)) { | ||
2821 | pool->dma_addr = 0; | ||
2822 | ret = -ENOMEM; | ||
2823 | goto failure; | ||
2824 | } | ||
2825 | |||
2815 | writel(virt_to_phys(base->lcla_pool.base), | 2826 | writel(virt_to_phys(base->lcla_pool.base), |
2816 | base->virtbase + D40_DREG_LCLA); | 2827 | base->virtbase + D40_DREG_LCLA); |
2817 | failure: | 2828 | failure: |
@@ -2844,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2844 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | 2855 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); |
2845 | if (!res) { | 2856 | if (!res) { |
2846 | ret = -ENOENT; | 2857 | ret = -ENOENT; |
2847 | dev_err(&pdev->dev, | 2858 | d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); |
2848 | "[%s] No \"lcpa\" memory resource\n", | ||
2849 | __func__); | ||
2850 | goto failure; | 2859 | goto failure; |
2851 | } | 2860 | } |
2852 | base->lcpa_size = resource_size(res); | 2861 | base->lcpa_size = resource_size(res); |
@@ -2855,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2855 | if (request_mem_region(res->start, resource_size(res), | 2864 | if (request_mem_region(res->start, resource_size(res), |
2856 | D40_NAME " I/O lcpa") == NULL) { | 2865 | D40_NAME " I/O lcpa") == NULL) { |
2857 | ret = -EBUSY; | 2866 | ret = -EBUSY; |
2858 | dev_err(&pdev->dev, | 2867 | d40_err(&pdev->dev, |
2859 | "[%s] Failed to request LCPA region 0x%x-0x%x\n", | 2868 | "Failed to request LCPA region 0x%x-0x%x\n", |
2860 | __func__, res->start, res->end); | 2869 | res->start, res->end); |
2861 | goto failure; | 2870 | goto failure; |
2862 | } | 2871 | } |
2863 | 2872 | ||
@@ -2873,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2873 | base->lcpa_base = ioremap(res->start, resource_size(res)); | 2882 | base->lcpa_base = ioremap(res->start, resource_size(res)); |
2874 | if (!base->lcpa_base) { | 2883 | if (!base->lcpa_base) { |
2875 | ret = -ENOMEM; | 2884 | ret = -ENOMEM; |
2876 | dev_err(&pdev->dev, | 2885 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
2877 | "[%s] Failed to ioremap LCPA region\n", | ||
2878 | __func__); | ||
2879 | goto failure; | 2886 | goto failure; |
2880 | } | 2887 | } |
2881 | 2888 | ||
2882 | ret = d40_lcla_allocate(base); | 2889 | ret = d40_lcla_allocate(base); |
2883 | if (ret) { | 2890 | if (ret) { |
2884 | dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", | 2891 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
2885 | __func__); | ||
2886 | goto failure; | 2892 | goto failure; |
2887 | } | 2893 | } |
2888 | 2894 | ||
@@ -2891,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2891 | base->irq = platform_get_irq(pdev, 0); | 2897 | base->irq = platform_get_irq(pdev, 0); |
2892 | 2898 | ||
2893 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | 2899 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); |
2894 | |||
2895 | if (ret) { | 2900 | if (ret) { |
2896 | dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); | 2901 | d40_err(&pdev->dev, "No IRQ defined\n"); |
2897 | goto failure; | 2902 | goto failure; |
2898 | } | 2903 | } |
2899 | 2904 | ||
@@ -2912,6 +2917,12 @@ failure: | |||
2912 | kmem_cache_destroy(base->desc_slab); | 2917 | kmem_cache_destroy(base->desc_slab); |
2913 | if (base->virtbase) | 2918 | if (base->virtbase) |
2914 | iounmap(base->virtbase); | 2919 | iounmap(base->virtbase); |
2920 | |||
2921 | if (base->lcla_pool.dma_addr) | ||
2922 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | ||
2923 | SZ_1K * base->num_phy_chans, | ||
2924 | DMA_TO_DEVICE); | ||
2925 | |||
2915 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) | 2926 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
2916 | free_pages((unsigned long)base->lcla_pool.base, | 2927 | free_pages((unsigned long)base->lcla_pool.base, |
2917 | base->lcla_pool.pages); | 2928 | base->lcla_pool.pages); |
@@ -2936,7 +2947,7 @@ failure: | |||
2936 | kfree(base); | 2947 | kfree(base); |
2937 | } | 2948 | } |
2938 | 2949 | ||
2939 | dev_err(&pdev->dev, "[%s] probe failed\n", __func__); | 2950 | d40_err(&pdev->dev, "probe failed\n"); |
2940 | return ret; | 2951 | return ret; |
2941 | } | 2952 | } |
2942 | 2953 | ||
@@ -2947,7 +2958,7 @@ static struct platform_driver d40_driver = { | |||
2947 | }, | 2958 | }, |
2948 | }; | 2959 | }; |
2949 | 2960 | ||
2950 | int __init stedma40_init(void) | 2961 | static int __init stedma40_init(void) |
2951 | { | 2962 | { |
2952 | return platform_driver_probe(&d40_driver, d40_probe); | 2963 | return platform_driver_probe(&d40_driver, d40_probe); |
2953 | } | 2964 | } |
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 8557cb88b25..cad9e1daedf 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) ST-Ericsson SA 2007-2010 | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
3 | * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson | 3 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
6 | */ | 6 | */ |
@@ -122,16 +122,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | |||
122 | *dst_cfg = dst; | 122 | *dst_cfg = dst; |
123 | } | 123 | } |
124 | 124 | ||
125 | int d40_phy_fill_lli(struct d40_phy_lli *lli, | 125 | static int d40_phy_fill_lli(struct d40_phy_lli *lli, |
126 | dma_addr_t data, | 126 | dma_addr_t data, |
127 | u32 data_size, | 127 | u32 data_size, |
128 | int psize, | 128 | dma_addr_t next_lli, |
129 | dma_addr_t next_lli, | 129 | u32 reg_cfg, |
130 | u32 reg_cfg, | 130 | struct stedma40_half_channel_info *info, |
131 | bool term_int, | 131 | unsigned int flags) |
132 | u32 data_width, | ||
133 | bool is_device) | ||
134 | { | 132 | { |
133 | bool addr_inc = flags & LLI_ADDR_INC; | ||
134 | bool term_int = flags & LLI_TERM_INT; | ||
135 | unsigned int data_width = info->data_width; | ||
136 | int psize = info->psize; | ||
135 | int num_elems; | 137 | int num_elems; |
136 | 138 | ||
137 | if (psize == STEDMA40_PSIZE_PHY_1) | 139 | if (psize == STEDMA40_PSIZE_PHY_1) |
@@ -139,13 +141,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
139 | else | 141 | else |
140 | num_elems = 2 << psize; | 142 | num_elems = 2 << psize; |
141 | 143 | ||
142 | /* | ||
143 | * Size is 16bit. data_width is 8, 16, 32 or 64 bit | ||
144 | * Block large than 64 KiB must be split. | ||
145 | */ | ||
146 | if (data_size > (0xffff << data_width)) | ||
147 | return -EINVAL; | ||
148 | |||
149 | /* Must be aligned */ | 144 | /* Must be aligned */ |
150 | if (!IS_ALIGNED(data, 0x1 << data_width)) | 145 | if (!IS_ALIGNED(data, 0x1 << data_width)) |
151 | return -EINVAL; | 146 | return -EINVAL; |
@@ -161,7 +156,7 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
161 | * Distance to next element sized entry. | 156 | * Distance to next element sized entry. |
162 | * Usually the size of the element unless you want gaps. | 157 | * Usually the size of the element unless you want gaps. |
163 | */ | 158 | */ |
164 | if (!is_device) | 159 | if (addr_inc) |
165 | lli->reg_elt |= (0x1 << data_width) << | 160 | lli->reg_elt |= (0x1 << data_width) << |
166 | D40_SREG_ELEM_PHY_EIDX_POS; | 161 | D40_SREG_ELEM_PHY_EIDX_POS; |
167 | 162 | ||
@@ -187,97 +182,137 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
187 | return 0; | 182 | return 0; |
188 | } | 183 | } |
189 | 184 | ||
185 | static int d40_seg_size(int size, int data_width1, int data_width2) | ||
186 | { | ||
187 | u32 max_w = max(data_width1, data_width2); | ||
188 | u32 min_w = min(data_width1, data_width2); | ||
189 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | ||
190 | |||
191 | if (seg_max > STEDMA40_MAX_SEG_SIZE) | ||
192 | seg_max -= (1 << max_w); | ||
193 | |||
194 | if (size <= seg_max) | ||
195 | return size; | ||
196 | |||
197 | if (size <= 2 * seg_max) | ||
198 | return ALIGN(size / 2, 1 << max_w); | ||
199 | |||
200 | return seg_max; | ||
201 | } | ||
202 | |||
203 | static struct d40_phy_lli * | ||
204 | d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, | ||
205 | dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, | ||
206 | struct stedma40_half_channel_info *info, | ||
207 | struct stedma40_half_channel_info *otherinfo, | ||
208 | unsigned long flags) | ||
209 | { | ||
210 | bool lastlink = flags & LLI_LAST_LINK; | ||
211 | bool addr_inc = flags & LLI_ADDR_INC; | ||
212 | bool term_int = flags & LLI_TERM_INT; | ||
213 | bool cyclic = flags & LLI_CYCLIC; | ||
214 | int err; | ||
215 | dma_addr_t next = lli_phys; | ||
216 | int size_rest = size; | ||
217 | int size_seg = 0; | ||
218 | |||
219 | /* | ||
220 | * This piece may be split up based on d40_seg_size(); we only want the | ||
221 | * term int on the last part. | ||
222 | */ | ||
223 | if (term_int) | ||
224 | flags &= ~LLI_TERM_INT; | ||
225 | |||
226 | do { | ||
227 | size_seg = d40_seg_size(size_rest, info->data_width, | ||
228 | otherinfo->data_width); | ||
229 | size_rest -= size_seg; | ||
230 | |||
231 | if (size_rest == 0 && term_int) | ||
232 | flags |= LLI_TERM_INT; | ||
233 | |||
234 | if (size_rest == 0 && lastlink) | ||
235 | next = cyclic ? first_phys : 0; | ||
236 | else | ||
237 | next = ALIGN(next + sizeof(struct d40_phy_lli), | ||
238 | D40_LLI_ALIGN); | ||
239 | |||
240 | err = d40_phy_fill_lli(lli, addr, size_seg, next, | ||
241 | reg_cfg, info, flags); | ||
242 | |||
243 | if (err) | ||
244 | goto err; | ||
245 | |||
246 | lli++; | ||
247 | if (addr_inc) | ||
248 | addr += size_seg; | ||
249 | } while (size_rest); | ||
250 | |||
251 | return lli; | ||
252 | |||
253 | err: | ||
254 | return NULL; | ||
255 | } | ||
256 | |||
190 | int d40_phy_sg_to_lli(struct scatterlist *sg, | 257 | int d40_phy_sg_to_lli(struct scatterlist *sg, |
191 | int sg_len, | 258 | int sg_len, |
192 | dma_addr_t target, | 259 | dma_addr_t target, |
193 | struct d40_phy_lli *lli, | 260 | struct d40_phy_lli *lli_sg, |
194 | dma_addr_t lli_phys, | 261 | dma_addr_t lli_phys, |
195 | u32 reg_cfg, | 262 | u32 reg_cfg, |
196 | u32 data_width, | 263 | struct stedma40_half_channel_info *info, |
197 | int psize) | 264 | struct stedma40_half_channel_info *otherinfo, |
265 | unsigned long flags) | ||
198 | { | 266 | { |
199 | int total_size = 0; | 267 | int total_size = 0; |
200 | int i; | 268 | int i; |
201 | struct scatterlist *current_sg = sg; | 269 | struct scatterlist *current_sg = sg; |
202 | dma_addr_t next_lli_phys; | 270 | struct d40_phy_lli *lli = lli_sg; |
203 | dma_addr_t dst; | 271 | dma_addr_t l_phys = lli_phys; |
204 | int err = 0; | 272 | |
273 | if (!target) | ||
274 | flags |= LLI_ADDR_INC; | ||
205 | 275 | ||
206 | for_each_sg(sg, current_sg, sg_len, i) { | 276 | for_each_sg(sg, current_sg, sg_len, i) { |
277 | dma_addr_t sg_addr = sg_dma_address(current_sg); | ||
278 | unsigned int len = sg_dma_len(current_sg); | ||
279 | dma_addr_t dst = target ?: sg_addr; | ||
207 | 280 | ||
208 | total_size += sg_dma_len(current_sg); | 281 | total_size += sg_dma_len(current_sg); |
209 | 282 | ||
210 | /* If this scatter list entry is the last one, no next link */ | 283 | if (i == sg_len - 1) |
211 | if (sg_len - 1 == i) | 284 | flags |= LLI_TERM_INT | LLI_LAST_LINK; |
212 | next_lli_phys = 0; | ||
213 | else | ||
214 | next_lli_phys = ALIGN(lli_phys + (i + 1) * | ||
215 | sizeof(struct d40_phy_lli), | ||
216 | D40_LLI_ALIGN); | ||
217 | 285 | ||
218 | if (target) | 286 | l_phys = ALIGN(lli_phys + (lli - lli_sg) * |
219 | dst = target; | 287 | sizeof(struct d40_phy_lli), D40_LLI_ALIGN); |
220 | else | 288 | |
221 | dst = sg_phys(current_sg); | 289 | lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, |
222 | 290 | reg_cfg, info, otherinfo, flags); | |
223 | err = d40_phy_fill_lli(&lli[i], | 291 | |
224 | dst, | 292 | if (lli == NULL) |
225 | sg_dma_len(current_sg), | 293 | return -EINVAL; |
226 | psize, | ||
227 | next_lli_phys, | ||
228 | reg_cfg, | ||
229 | !next_lli_phys, | ||
230 | data_width, | ||
231 | target == dst); | ||
232 | if (err) | ||
233 | goto err; | ||
234 | } | 294 | } |
235 | 295 | ||
236 | return total_size; | 296 | return total_size; |
237 | err: | ||
238 | return err; | ||
239 | } | 297 | } |
240 | 298 | ||
241 | 299 | ||
242 | void d40_phy_lli_write(void __iomem *virtbase, | ||
243 | u32 phy_chan_num, | ||
244 | struct d40_phy_lli *lli_dst, | ||
245 | struct d40_phy_lli *lli_src) | ||
246 | { | ||
247 | |||
248 | writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE + | ||
249 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG); | ||
250 | writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE + | ||
251 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | ||
252 | writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE + | ||
253 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR); | ||
254 | writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE + | ||
255 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK); | ||
256 | |||
257 | writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE + | ||
258 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG); | ||
259 | writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE + | ||
260 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | ||
261 | writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE + | ||
262 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR); | ||
263 | writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE + | ||
264 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK); | ||
265 | |||
266 | } | ||
267 | |||
268 | /* DMA logical lli operations */ | 300 | /* DMA logical lli operations */ |
269 | 301 | ||
270 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, | 302 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, |
271 | struct d40_log_lli *lli_src, | 303 | struct d40_log_lli *lli_src, |
272 | int next) | 304 | int next, unsigned int flags) |
273 | { | 305 | { |
306 | bool interrupt = flags & LLI_TERM_INT; | ||
274 | u32 slos = 0; | 307 | u32 slos = 0; |
275 | u32 dlos = 0; | 308 | u32 dlos = 0; |
276 | 309 | ||
277 | if (next != -EINVAL) { | 310 | if (next != -EINVAL) { |
278 | slos = next * 2; | 311 | slos = next * 2; |
279 | dlos = next * 2 + 1; | 312 | dlos = next * 2 + 1; |
280 | } else { | 313 | } |
314 | |||
315 | if (interrupt) { | ||
281 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; | 316 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; |
282 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; | 317 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; |
283 | } | 318 | } |
@@ -292,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst, | |||
292 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | 327 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
293 | struct d40_log_lli *lli_dst, | 328 | struct d40_log_lli *lli_dst, |
294 | struct d40_log_lli *lli_src, | 329 | struct d40_log_lli *lli_src, |
295 | int next) | 330 | int next, unsigned int flags) |
296 | { | 331 | { |
297 | d40_log_lli_link(lli_dst, lli_src, next); | 332 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
298 | 333 | ||
299 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); | 334 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); |
300 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); | 335 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); |
@@ -305,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | |||
305 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 340 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
306 | struct d40_log_lli *lli_dst, | 341 | struct d40_log_lli *lli_dst, |
307 | struct d40_log_lli *lli_src, | 342 | struct d40_log_lli *lli_src, |
308 | int next) | 343 | int next, unsigned int flags) |
309 | { | 344 | { |
310 | d40_log_lli_link(lli_dst, lli_src, next); | 345 | d40_log_lli_link(lli_dst, lli_src, next, flags); |
311 | 346 | ||
312 | writel(lli_src->lcsp02, &lcla[0].lcsp02); | 347 | writel(lli_src->lcsp02, &lcla[0].lcsp02); |
313 | writel(lli_src->lcsp13, &lcla[0].lcsp13); | 348 | writel(lli_src->lcsp13, &lcla[0].lcsp13); |
@@ -315,17 +350,22 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | |||
315 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); | 350 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); |
316 | } | 351 | } |
317 | 352 | ||
318 | void d40_log_fill_lli(struct d40_log_lli *lli, | 353 | static void d40_log_fill_lli(struct d40_log_lli *lli, |
319 | dma_addr_t data, u32 data_size, | 354 | dma_addr_t data, u32 data_size, |
320 | u32 reg_cfg, | 355 | u32 reg_cfg, |
321 | u32 data_width, | 356 | u32 data_width, |
322 | bool addr_inc) | 357 | unsigned int flags) |
323 | { | 358 | { |
359 | bool addr_inc = flags & LLI_ADDR_INC; | ||
360 | |||
324 | lli->lcsp13 = reg_cfg; | 361 | lli->lcsp13 = reg_cfg; |
325 | 362 | ||
326 | /* The number of elements to transfer */ | 363 | /* The number of elements to transfer */ |
327 | lli->lcsp02 = ((data_size >> data_width) << | 364 | lli->lcsp02 = ((data_size >> data_width) << |
328 | D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; | 365 | D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; |
366 | |||
367 | BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); | ||
368 | |||
329 | /* 16 LSBs address of the current element */ | 369 | /* 16 LSBs address of the current element */ |
330 | lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; | 370 | lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; |
331 | /* 16 MSBs address of the current element */ | 371 | /* 16 MSBs address of the current element */ |
@@ -336,67 +376,65 @@ void d40_log_fill_lli(struct d40_log_lli *lli, | |||
336 | 376 | ||
337 | } | 377 | } |
338 | 378 | ||
339 | int d40_log_sg_to_dev(struct scatterlist *sg, | 379 | static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, |
340 | int sg_len, | 380 | dma_addr_t addr, |
341 | struct d40_log_lli_bidir *lli, | 381 | int size, |
342 | struct d40_def_lcsp *lcsp, | 382 | u32 lcsp13, /* src or dst*/ |
343 | u32 src_data_width, | 383 | u32 data_width1, |
344 | u32 dst_data_width, | 384 | u32 data_width2, |
345 | enum dma_data_direction direction, | 385 | unsigned int flags) |
346 | dma_addr_t dev_addr) | ||
347 | { | 386 | { |
348 | int total_size = 0; | 387 | bool addr_inc = flags & LLI_ADDR_INC; |
349 | struct scatterlist *current_sg = sg; | 388 | struct d40_log_lli *lli = lli_sg; |
350 | int i; | 389 | int size_rest = size; |
351 | 390 | int size_seg = 0; | |
352 | for_each_sg(sg, current_sg, sg_len, i) { | 391 | |
353 | total_size += sg_dma_len(current_sg); | 392 | do { |
354 | 393 | size_seg = d40_seg_size(size_rest, data_width1, data_width2); | |
355 | if (direction == DMA_TO_DEVICE) { | 394 | size_rest -= size_seg; |
356 | d40_log_fill_lli(&lli->src[i], | 395 | |
357 | sg_phys(current_sg), | 396 | d40_log_fill_lli(lli, |
358 | sg_dma_len(current_sg), | 397 | addr, |
359 | lcsp->lcsp1, src_data_width, | 398 | size_seg, |
360 | true); | 399 | lcsp13, data_width1, |
361 | d40_log_fill_lli(&lli->dst[i], | 400 | flags); |
362 | dev_addr, | 401 | if (addr_inc) |
363 | sg_dma_len(current_sg), | 402 | addr += size_seg; |
364 | lcsp->lcsp3, dst_data_width, | 403 | lli++; |
365 | false); | 404 | } while (size_rest); |
366 | } else { | 405 | |
367 | d40_log_fill_lli(&lli->dst[i], | 406 | return lli; |
368 | sg_phys(current_sg), | ||
369 | sg_dma_len(current_sg), | ||
370 | lcsp->lcsp3, dst_data_width, | ||
371 | true); | ||
372 | d40_log_fill_lli(&lli->src[i], | ||
373 | dev_addr, | ||
374 | sg_dma_len(current_sg), | ||
375 | lcsp->lcsp1, src_data_width, | ||
376 | false); | ||
377 | } | ||
378 | } | ||
379 | return total_size; | ||
380 | } | 407 | } |
381 | 408 | ||
382 | int d40_log_sg_to_lli(struct scatterlist *sg, | 409 | int d40_log_sg_to_lli(struct scatterlist *sg, |
383 | int sg_len, | 410 | int sg_len, |
411 | dma_addr_t dev_addr, | ||
384 | struct d40_log_lli *lli_sg, | 412 | struct d40_log_lli *lli_sg, |
385 | u32 lcsp13, /* src or dst*/ | 413 | u32 lcsp13, /* src or dst*/ |
386 | u32 data_width) | 414 | u32 data_width1, u32 data_width2) |
387 | { | 415 | { |
388 | int total_size = 0; | 416 | int total_size = 0; |
389 | struct scatterlist *current_sg = sg; | 417 | struct scatterlist *current_sg = sg; |
390 | int i; | 418 | int i; |
419 | struct d40_log_lli *lli = lli_sg; | ||
420 | unsigned long flags = 0; | ||
421 | |||
422 | if (!dev_addr) | ||
423 | flags |= LLI_ADDR_INC; | ||
391 | 424 | ||
392 | for_each_sg(sg, current_sg, sg_len, i) { | 425 | for_each_sg(sg, current_sg, sg_len, i) { |
426 | dma_addr_t sg_addr = sg_dma_address(current_sg); | ||
427 | unsigned int len = sg_dma_len(current_sg); | ||
428 | dma_addr_t addr = dev_addr ?: sg_addr; | ||
429 | |||
393 | total_size += sg_dma_len(current_sg); | 430 | total_size += sg_dma_len(current_sg); |
394 | 431 | ||
395 | d40_log_fill_lli(&lli_sg[i], | 432 | lli = d40_log_buf_to_lli(lli, addr, len, |
396 | sg_phys(current_sg), | 433 | lcsp13, |
397 | sg_dma_len(current_sg), | 434 | data_width1, |
398 | lcsp13, data_width, | 435 | data_width2, |
399 | true); | 436 | flags); |
400 | } | 437 | } |
438 | |||
401 | return total_size; | 439 | return total_size; |
402 | } | 440 | } |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 9e419b90754..195ee65ee7f 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -163,6 +163,22 @@ | |||
163 | #define D40_DREG_LCEIS1 0x0B4 | 163 | #define D40_DREG_LCEIS1 0x0B4 |
164 | #define D40_DREG_LCEIS2 0x0B8 | 164 | #define D40_DREG_LCEIS2 0x0B8 |
165 | #define D40_DREG_LCEIS3 0x0BC | 165 | #define D40_DREG_LCEIS3 0x0BC |
166 | #define D40_DREG_PSEG1 0x110 | ||
167 | #define D40_DREG_PSEG2 0x114 | ||
168 | #define D40_DREG_PSEG3 0x118 | ||
169 | #define D40_DREG_PSEG4 0x11C | ||
170 | #define D40_DREG_PCEG1 0x120 | ||
171 | #define D40_DREG_PCEG2 0x124 | ||
172 | #define D40_DREG_PCEG3 0x128 | ||
173 | #define D40_DREG_PCEG4 0x12C | ||
174 | #define D40_DREG_RSEG1 0x130 | ||
175 | #define D40_DREG_RSEG2 0x134 | ||
176 | #define D40_DREG_RSEG3 0x138 | ||
177 | #define D40_DREG_RSEG4 0x13C | ||
178 | #define D40_DREG_RCEG1 0x140 | ||
179 | #define D40_DREG_RCEG2 0x144 | ||
180 | #define D40_DREG_RCEG3 0x148 | ||
181 | #define D40_DREG_RCEG4 0x14C | ||
166 | #define D40_DREG_STFU 0xFC8 | 182 | #define D40_DREG_STFU 0xFC8 |
167 | #define D40_DREG_ICFG 0xFCC | 183 | #define D40_DREG_ICFG 0xFCC |
168 | #define D40_DREG_PERIPHID0 0xFE0 | 184 | #define D40_DREG_PERIPHID0 0xFE0 |
@@ -277,6 +293,13 @@ struct d40_def_lcsp { | |||
277 | 293 | ||
278 | /* Physical channels */ | 294 | /* Physical channels */ |
279 | 295 | ||
296 | enum d40_lli_flags { | ||
297 | LLI_ADDR_INC = 1 << 0, | ||
298 | LLI_TERM_INT = 1 << 1, | ||
299 | LLI_CYCLIC = 1 << 2, | ||
300 | LLI_LAST_LINK = 1 << 3, | ||
301 | }; | ||
302 | |||
280 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | 303 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, |
281 | u32 *src_cfg, | 304 | u32 *src_cfg, |
282 | u32 *dst_cfg, | 305 | u32 *dst_cfg, |
@@ -292,56 +315,27 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
292 | struct d40_phy_lli *lli, | 315 | struct d40_phy_lli *lli, |
293 | dma_addr_t lli_phys, | 316 | dma_addr_t lli_phys, |
294 | u32 reg_cfg, | 317 | u32 reg_cfg, |
295 | u32 data_width, | 318 | struct stedma40_half_channel_info *info, |
296 | int psize); | 319 | struct stedma40_half_channel_info *otherinfo, |
297 | 320 | unsigned long flags); | |
298 | int d40_phy_fill_lli(struct d40_phy_lli *lli, | ||
299 | dma_addr_t data, | ||
300 | u32 data_size, | ||
301 | int psize, | ||
302 | dma_addr_t next_lli, | ||
303 | u32 reg_cfg, | ||
304 | bool term_int, | ||
305 | u32 data_width, | ||
306 | bool is_device); | ||
307 | |||
308 | void d40_phy_lli_write(void __iomem *virtbase, | ||
309 | u32 phy_chan_num, | ||
310 | struct d40_phy_lli *lli_dst, | ||
311 | struct d40_phy_lli *lli_src); | ||
312 | 321 | ||
313 | /* Logical channels */ | 322 | /* Logical channels */ |
314 | 323 | ||
315 | void d40_log_fill_lli(struct d40_log_lli *lli, | ||
316 | dma_addr_t data, | ||
317 | u32 data_size, | ||
318 | u32 reg_cfg, | ||
319 | u32 data_width, | ||
320 | bool addr_inc); | ||
321 | |||
322 | int d40_log_sg_to_dev(struct scatterlist *sg, | ||
323 | int sg_len, | ||
324 | struct d40_log_lli_bidir *lli, | ||
325 | struct d40_def_lcsp *lcsp, | ||
326 | u32 src_data_width, | ||
327 | u32 dst_data_width, | ||
328 | enum dma_data_direction direction, | ||
329 | dma_addr_t dev_addr); | ||
330 | |||
331 | int d40_log_sg_to_lli(struct scatterlist *sg, | 324 | int d40_log_sg_to_lli(struct scatterlist *sg, |
332 | int sg_len, | 325 | int sg_len, |
326 | dma_addr_t dev_addr, | ||
333 | struct d40_log_lli *lli_sg, | 327 | struct d40_log_lli *lli_sg, |
334 | u32 lcsp13, /* src or dst*/ | 328 | u32 lcsp13, /* src or dst*/ |
335 | u32 data_width); | 329 | u32 data_width1, u32 data_width2); |
336 | 330 | ||
337 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | 331 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
338 | struct d40_log_lli *lli_dst, | 332 | struct d40_log_lli *lli_dst, |
339 | struct d40_log_lli *lli_src, | 333 | struct d40_log_lli *lli_src, |
340 | int next); | 334 | int next, unsigned int flags); |
341 | 335 | ||
342 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | 336 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, |
343 | struct d40_log_lli *lli_dst, | 337 | struct d40_log_lli *lli_dst, |
344 | struct d40_log_lli *lli_src, | 338 | struct d40_log_lli *lli_src, |
345 | int next); | 339 | int next, unsigned int flags); |
346 | 340 | ||
347 | #endif /* STE_DMA40_LLI_H */ | 341 | #endif /* STE_DMA40_LLI_H */ |