diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Makefile | 4 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 1040 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 4 | ||||
-rw-r--r-- | drivers/dma/imx-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 4 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 41 | ||||
-rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 15 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 5 |
9 files changed, 536 insertions, 581 deletions
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a8a84f4587f2..64b21f5cd740 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -1,8 +1,8 @@ | |||
1 | ifeq ($(CONFIG_DMADEVICES_DEBUG),y) | 1 | ifeq ($(CONFIG_DMADEVICES_DEBUG),y) |
2 | EXTRA_CFLAGS += -DDEBUG | 2 | ccflags-y += -DDEBUG |
3 | endif | 3 | endif |
4 | ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) | 4 | ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) |
5 | EXTRA_CFLAGS += -DVERBOSE_DEBUG | 5 | ccflags-y += -DVERBOSE_DEBUG |
6 | endif | 6 | endif |
7 | 7 | ||
8 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 8 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index b605cc9ac3a2..bebc678ed4fc 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 |
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
21 | * | 21 | * |
22 | * The full GNU General Public License is iin this distribution in the | 22 | * The full GNU General Public License is in this distribution in the |
23 | * file called COPYING. | 23 | * file called COPYING. |
24 | * | 24 | * |
25 | * Documentation: ARM DDI 0196G == PL080 | 25 | * Documentation: ARM DDI 0196G == PL080 |
@@ -53,7 +53,23 @@ | |||
53 | * | 53 | * |
54 | * ASSUMES default (little) endianness for DMA transfers | 54 | * ASSUMES default (little) endianness for DMA transfers |
55 | * | 55 | * |
56 | * Only DMAC flow control is implemented | 56 | * The PL08x has two flow control settings: |
57 | * - DMAC flow control: the transfer size defines the number of transfers | ||
58 | * which occur for the current LLI entry, and the DMAC raises TC at the | ||
59 | * end of every LLI entry. Observed behaviour shows the DMAC listening | ||
60 | * to both the BREQ and SREQ signals (contrary to documented), | ||
61 | * transferring data if either is active. The LBREQ and LSREQ signals | ||
62 | * are ignored. | ||
63 | * | ||
64 | * - Peripheral flow control: the transfer size is ignored (and should be | ||
65 | * zero). The data is transferred from the current LLI entry, until | ||
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | ||
67 | * will then move to the next LLI entry. | ||
68 | * | ||
69 | * Only the former works sanely with scatter lists, so we only implement | ||
70 | * the DMAC flow control method. However, peripherals which use the LBREQ | ||
71 | * and LSREQ signals (eg, MMCI) are unable to use this mode, which through | ||
72 | * these hardware restrictions prevents them from using scatter DMA. | ||
57 | * | 73 | * |
58 | * Global TODO: | 74 | * Global TODO: |
59 | * - Break out common code from arch/arm/mach-s3c64xx and share | 75 | * - Break out common code from arch/arm/mach-s3c64xx and share |
@@ -61,50 +77,41 @@ | |||
61 | #include <linux/device.h> | 77 | #include <linux/device.h> |
62 | #include <linux/init.h> | 78 | #include <linux/init.h> |
63 | #include <linux/module.h> | 79 | #include <linux/module.h> |
64 | #include <linux/pci.h> | ||
65 | #include <linux/interrupt.h> | 80 | #include <linux/interrupt.h> |
66 | #include <linux/slab.h> | 81 | #include <linux/slab.h> |
67 | #include <linux/dmapool.h> | 82 | #include <linux/dmapool.h> |
68 | #include <linux/amba/bus.h> | ||
69 | #include <linux/dmaengine.h> | 83 | #include <linux/dmaengine.h> |
84 | #include <linux/amba/bus.h> | ||
70 | #include <linux/amba/pl08x.h> | 85 | #include <linux/amba/pl08x.h> |
71 | #include <linux/debugfs.h> | 86 | #include <linux/debugfs.h> |
72 | #include <linux/seq_file.h> | 87 | #include <linux/seq_file.h> |
73 | 88 | ||
74 | #include <asm/hardware/pl080.h> | 89 | #include <asm/hardware/pl080.h> |
75 | #include <asm/dma.h> | ||
76 | #include <asm/mach/dma.h> | ||
77 | #include <asm/atomic.h> | ||
78 | #include <asm/processor.h> | ||
79 | #include <asm/cacheflush.h> | ||
80 | 90 | ||
81 | #define DRIVER_NAME "pl08xdmac" | 91 | #define DRIVER_NAME "pl08xdmac" |
82 | 92 | ||
83 | /** | 93 | /** |
84 | * struct vendor_data - vendor-specific config parameters | 94 | * struct vendor_data - vendor-specific config parameters |
85 | * for PL08x derivates | 95 | * for PL08x derivatives |
86 | * @name: the name of this specific variant | ||
87 | * @channels: the number of channels available in this variant | 96 | * @channels: the number of channels available in this variant |
88 | * @dualmaster: whether this version supports dual AHB masters | 97 | * @dualmaster: whether this version supports dual AHB masters |
89 | * or not. | 98 | * or not. |
90 | */ | 99 | */ |
91 | struct vendor_data { | 100 | struct vendor_data { |
92 | char *name; | ||
93 | u8 channels; | 101 | u8 channels; |
94 | bool dualmaster; | 102 | bool dualmaster; |
95 | }; | 103 | }; |
96 | 104 | ||
97 | /* | 105 | /* |
98 | * PL08X private data structures | 106 | * PL08X private data structures |
99 | * An LLI struct - see pl08x TRM | 107 | * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, |
100 | * Note that next uses bit[0] as a bus bit, | 108 | * start & end do not - their bus bit info is in cctl. Also note that these |
101 | * start & end do not - their bus bit info | 109 | * are fixed 32-bit quantities. |
102 | * is in cctl | ||
103 | */ | 110 | */ |
104 | struct lli { | 111 | struct pl08x_lli { |
105 | dma_addr_t src; | 112 | u32 src; |
106 | dma_addr_t dst; | 113 | u32 dst; |
107 | dma_addr_t next; | 114 | u32 lli; |
108 | u32 cctl; | 115 | u32 cctl; |
109 | }; | 116 | }; |
110 | 117 | ||
@@ -119,6 +126,8 @@ struct lli { | |||
119 | * @phy_chans: array of data for the physical channels | 126 | * @phy_chans: array of data for the physical channels |
120 | * @pool: a pool for the LLI descriptors | 127 | * @pool: a pool for the LLI descriptors |
121 | * @pool_ctr: counter of LLIs in the pool | 128 | * @pool_ctr: counter of LLIs in the pool |
129 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches | ||
130 | * @mem_buses: set to indicate memory transfers on AHB2. | ||
122 | * @lock: a spinlock for this struct | 131 | * @lock: a spinlock for this struct |
123 | */ | 132 | */ |
124 | struct pl08x_driver_data { | 133 | struct pl08x_driver_data { |
@@ -126,11 +135,13 @@ struct pl08x_driver_data { | |||
126 | struct dma_device memcpy; | 135 | struct dma_device memcpy; |
127 | void __iomem *base; | 136 | void __iomem *base; |
128 | struct amba_device *adev; | 137 | struct amba_device *adev; |
129 | struct vendor_data *vd; | 138 | const struct vendor_data *vd; |
130 | struct pl08x_platform_data *pd; | 139 | struct pl08x_platform_data *pd; |
131 | struct pl08x_phy_chan *phy_chans; | 140 | struct pl08x_phy_chan *phy_chans; |
132 | struct dma_pool *pool; | 141 | struct dma_pool *pool; |
133 | int pool_ctr; | 142 | int pool_ctr; |
143 | u8 lli_buses; | ||
144 | u8 mem_buses; | ||
134 | spinlock_t lock; | 145 | spinlock_t lock; |
135 | }; | 146 | }; |
136 | 147 | ||
@@ -152,9 +163,9 @@ struct pl08x_driver_data { | |||
152 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 163 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
153 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 164 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
154 | 165 | ||
155 | /* Maximimum times we call dma_pool_alloc on this pool without freeing */ | 166 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
156 | #define PL08X_MAX_ALLOCS 0x40 | 167 | #define PL08X_MAX_ALLOCS 0x40 |
157 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) | 168 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) |
158 | #define PL08X_ALIGN 8 | 169 | #define PL08X_ALIGN 8 |
159 | 170 | ||
160 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | 171 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) |
@@ -162,6 +173,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | |||
162 | return container_of(chan, struct pl08x_dma_chan, chan); | 173 | return container_of(chan, struct pl08x_dma_chan, chan); |
163 | } | 174 | } |
164 | 175 | ||
176 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) | ||
177 | { | ||
178 | return container_of(tx, struct pl08x_txd, tx); | ||
179 | } | ||
180 | |||
165 | /* | 181 | /* |
166 | * Physical channel handling | 182 | * Physical channel handling |
167 | */ | 183 | */ |
@@ -177,88 +193,47 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |||
177 | 193 | ||
178 | /* | 194 | /* |
179 | * Set the initial DMA register values i.e. those for the first LLI | 195 | * Set the initial DMA register values i.e. those for the first LLI |
180 | * The next lli pointer and the configuration interrupt bit have | 196 | * The next LLI pointer and the configuration interrupt bit have |
181 | * been set when the LLIs were constructed | 197 | * been set when the LLIs were constructed. Poke them into the hardware |
198 | * and start the transfer. | ||
182 | */ | 199 | */ |
183 | static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, | 200 | static void pl08x_start_txd(struct pl08x_dma_chan *plchan, |
184 | struct pl08x_phy_chan *ch) | 201 | struct pl08x_txd *txd) |
185 | { | ||
186 | /* Wait for channel inactive */ | ||
187 | while (pl08x_phy_channel_busy(ch)) | ||
188 | ; | ||
189 | |||
190 | dev_vdbg(&pl08x->adev->dev, | ||
191 | "WRITE channel %d: csrc=%08x, cdst=%08x, " | ||
192 | "cctl=%08x, clli=%08x, ccfg=%08x\n", | ||
193 | ch->id, | ||
194 | ch->csrc, | ||
195 | ch->cdst, | ||
196 | ch->cctl, | ||
197 | ch->clli, | ||
198 | ch->ccfg); | ||
199 | |||
200 | writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); | ||
201 | writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); | ||
202 | writel(ch->clli, ch->base + PL080_CH_LLI); | ||
203 | writel(ch->cctl, ch->base + PL080_CH_CONTROL); | ||
204 | writel(ch->ccfg, ch->base + PL080_CH_CONFIG); | ||
205 | } | ||
206 | |||
207 | static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) | ||
208 | { | 202 | { |
209 | struct pl08x_channel_data *cd = plchan->cd; | 203 | struct pl08x_driver_data *pl08x = plchan->host; |
210 | struct pl08x_phy_chan *phychan = plchan->phychan; | 204 | struct pl08x_phy_chan *phychan = plchan->phychan; |
211 | struct pl08x_txd *txd = plchan->at; | 205 | struct pl08x_lli *lli = &txd->llis_va[0]; |
212 | |||
213 | /* Copy the basic control register calculated at transfer config */ | ||
214 | phychan->csrc = txd->csrc; | ||
215 | phychan->cdst = txd->cdst; | ||
216 | phychan->clli = txd->clli; | ||
217 | phychan->cctl = txd->cctl; | ||
218 | |||
219 | /* Assign the signal to the proper control registers */ | ||
220 | phychan->ccfg = cd->ccfg; | ||
221 | phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; | ||
222 | phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; | ||
223 | /* If it wasn't set from AMBA, ignore it */ | ||
224 | if (txd->direction == DMA_TO_DEVICE) | ||
225 | /* Select signal as destination */ | ||
226 | phychan->ccfg |= | ||
227 | (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); | ||
228 | else if (txd->direction == DMA_FROM_DEVICE) | ||
229 | /* Select signal as source */ | ||
230 | phychan->ccfg |= | ||
231 | (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); | ||
232 | /* Always enable error interrupts */ | ||
233 | phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; | ||
234 | /* Always enable terminal interrupts */ | ||
235 | phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Enable the DMA channel | ||
240 | * Assumes all other configuration bits have been set | ||
241 | * as desired before this code is called | ||
242 | */ | ||
243 | static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, | ||
244 | struct pl08x_phy_chan *ch) | ||
245 | { | ||
246 | u32 val; | 206 | u32 val; |
247 | 207 | ||
248 | /* | 208 | plchan->at = txd; |
249 | * Do not access config register until channel shows as disabled | ||
250 | */ | ||
251 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) | ||
252 | ; | ||
253 | 209 | ||
254 | /* | 210 | /* Wait for channel inactive */ |
255 | * Do not access config register until channel shows as inactive | 211 | while (pl08x_phy_channel_busy(phychan)) |
256 | */ | 212 | cpu_relax(); |
257 | val = readl(ch->base + PL080_CH_CONFIG); | 213 | |
214 | dev_vdbg(&pl08x->adev->dev, | ||
215 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | ||
216 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | ||
217 | phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, | ||
218 | txd->ccfg); | ||
219 | |||
220 | writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); | ||
221 | writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); | ||
222 | writel(lli->lli, phychan->base + PL080_CH_LLI); | ||
223 | writel(lli->cctl, phychan->base + PL080_CH_CONTROL); | ||
224 | writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); | ||
225 | |||
226 | /* Enable the DMA channel */ | ||
227 | /* Do not access config register until channel shows as disabled */ | ||
228 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) | ||
229 | cpu_relax(); | ||
230 | |||
231 | /* Do not access config register until channel shows as inactive */ | ||
232 | val = readl(phychan->base + PL080_CH_CONFIG); | ||
258 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | 233 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) |
259 | val = readl(ch->base + PL080_CH_CONFIG); | 234 | val = readl(phychan->base + PL080_CH_CONFIG); |
260 | 235 | ||
261 | writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); | 236 | writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); |
262 | } | 237 | } |
263 | 238 | ||
264 | /* | 239 | /* |
@@ -282,7 +257,7 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |||
282 | 257 | ||
283 | /* Wait for channel inactive */ | 258 | /* Wait for channel inactive */ |
284 | while (pl08x_phy_channel_busy(ch)) | 259 | while (pl08x_phy_channel_busy(ch)) |
285 | ; | 260 | cpu_relax(); |
286 | } | 261 | } |
287 | 262 | ||
288 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | 263 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) |
@@ -333,54 +308,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl) | |||
333 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | 308 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) |
334 | { | 309 | { |
335 | struct pl08x_phy_chan *ch; | 310 | struct pl08x_phy_chan *ch; |
336 | struct pl08x_txd *txdi = NULL; | ||
337 | struct pl08x_txd *txd; | 311 | struct pl08x_txd *txd; |
338 | unsigned long flags; | 312 | unsigned long flags; |
339 | u32 bytes = 0; | 313 | size_t bytes = 0; |
340 | 314 | ||
341 | spin_lock_irqsave(&plchan->lock, flags); | 315 | spin_lock_irqsave(&plchan->lock, flags); |
342 | |||
343 | ch = plchan->phychan; | 316 | ch = plchan->phychan; |
344 | txd = plchan->at; | 317 | txd = plchan->at; |
345 | 318 | ||
346 | /* | 319 | /* |
347 | * Next follow the LLIs to get the number of pending bytes in the | 320 | * Follow the LLIs to get the number of remaining |
348 | * currently active transaction. | 321 | * bytes in the currently active transaction. |
349 | */ | 322 | */ |
350 | if (ch && txd) { | 323 | if (ch && txd) { |
351 | struct lli *llis_va = txd->llis_va; | 324 | u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; |
352 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | ||
353 | u32 clli = readl(ch->base + PL080_CH_LLI); | ||
354 | 325 | ||
355 | /* First get the bytes in the current active LLI */ | 326 | /* First get the remaining bytes in the active transfer */ |
356 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | 327 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); |
357 | 328 | ||
358 | if (clli) { | 329 | if (clli) { |
359 | int i = 0; | 330 | struct pl08x_lli *llis_va = txd->llis_va; |
331 | dma_addr_t llis_bus = txd->llis_bus; | ||
332 | int index; | ||
333 | |||
334 | BUG_ON(clli < llis_bus || clli >= llis_bus + | ||
335 | sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); | ||
336 | |||
337 | /* | ||
338 | * Locate the next LLI - as this is an array, | ||
339 | * it's simple maths to find. | ||
340 | */ | ||
341 | index = (clli - llis_bus) / sizeof(struct pl08x_lli); | ||
360 | 342 | ||
361 | /* Forward to the LLI pointed to by clli */ | 343 | for (; index < MAX_NUM_TSFR_LLIS; index++) { |
362 | while ((clli != (u32) &(llis_bus[i])) && | 344 | bytes += get_bytes_in_cctl(llis_va[index].cctl); |
363 | (i < MAX_NUM_TSFR_LLIS)) | ||
364 | i++; | ||
365 | 345 | ||
366 | while (clli) { | ||
367 | bytes += get_bytes_in_cctl(llis_va[i].cctl); | ||
368 | /* | 346 | /* |
369 | * A clli of 0x00000000 will terminate the | 347 | * A LLI pointer of 0 terminates the LLI list |
370 | * LLI list | ||
371 | */ | 348 | */ |
372 | clli = llis_va[i].next; | 349 | if (!llis_va[index].lli) |
373 | i++; | 350 | break; |
374 | } | 351 | } |
375 | } | 352 | } |
376 | } | 353 | } |
377 | 354 | ||
378 | /* Sum up all queued transactions */ | 355 | /* Sum up all queued transactions */ |
379 | if (!list_empty(&plchan->desc_list)) { | 356 | if (!list_empty(&plchan->pend_list)) { |
380 | list_for_each_entry(txdi, &plchan->desc_list, node) { | 357 | struct pl08x_txd *txdi; |
358 | list_for_each_entry(txdi, &plchan->pend_list, node) { | ||
381 | bytes += txdi->len; | 359 | bytes += txdi->len; |
382 | } | 360 | } |
383 | |||
384 | } | 361 | } |
385 | 362 | ||
386 | spin_unlock_irqrestore(&plchan->lock, flags); | 363 | spin_unlock_irqrestore(&plchan->lock, flags); |
@@ -465,11 +442,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) | |||
465 | } | 442 | } |
466 | 443 | ||
467 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | 444 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, |
468 | u32 tsize) | 445 | size_t tsize) |
469 | { | 446 | { |
470 | u32 retbits = cctl; | 447 | u32 retbits = cctl; |
471 | 448 | ||
472 | /* Remove all src, dst and transfersize bits */ | 449 | /* Remove all src, dst and transfer size bits */ |
473 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; | 450 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; |
474 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; | 451 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; |
475 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; | 452 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; |
@@ -509,38 +486,45 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |||
509 | return retbits; | 486 | return retbits; |
510 | } | 487 | } |
511 | 488 | ||
489 | struct pl08x_lli_build_data { | ||
490 | struct pl08x_txd *txd; | ||
491 | struct pl08x_driver_data *pl08x; | ||
492 | struct pl08x_bus_data srcbus; | ||
493 | struct pl08x_bus_data dstbus; | ||
494 | size_t remainder; | ||
495 | }; | ||
496 | |||
512 | /* | 497 | /* |
513 | * Autoselect a master bus to use for the transfer | 498 | * Autoselect a master bus to use for the transfer |
514 | * this prefers the destination bus if both available | 499 | * this prefers the destination bus if both available |
515 | * if fixed address on one bus the other will be chosen | 500 | * if fixed address on one bus the other will be chosen |
516 | */ | 501 | */ |
517 | void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, | 502 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, |
518 | struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, | 503 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) |
519 | struct pl08x_bus_data **sbus, u32 cctl) | ||
520 | { | 504 | { |
521 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | 505 | if (!(cctl & PL080_CONTROL_DST_INCR)) { |
522 | *mbus = src_bus; | 506 | *mbus = &bd->srcbus; |
523 | *sbus = dst_bus; | 507 | *sbus = &bd->dstbus; |
524 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | 508 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { |
525 | *mbus = dst_bus; | 509 | *mbus = &bd->dstbus; |
526 | *sbus = src_bus; | 510 | *sbus = &bd->srcbus; |
527 | } else { | 511 | } else { |
528 | if (dst_bus->buswidth == 4) { | 512 | if (bd->dstbus.buswidth == 4) { |
529 | *mbus = dst_bus; | 513 | *mbus = &bd->dstbus; |
530 | *sbus = src_bus; | 514 | *sbus = &bd->srcbus; |
531 | } else if (src_bus->buswidth == 4) { | 515 | } else if (bd->srcbus.buswidth == 4) { |
532 | *mbus = src_bus; | 516 | *mbus = &bd->srcbus; |
533 | *sbus = dst_bus; | 517 | *sbus = &bd->dstbus; |
534 | } else if (dst_bus->buswidth == 2) { | 518 | } else if (bd->dstbus.buswidth == 2) { |
535 | *mbus = dst_bus; | 519 | *mbus = &bd->dstbus; |
536 | *sbus = src_bus; | 520 | *sbus = &bd->srcbus; |
537 | } else if (src_bus->buswidth == 2) { | 521 | } else if (bd->srcbus.buswidth == 2) { |
538 | *mbus = src_bus; | 522 | *mbus = &bd->srcbus; |
539 | *sbus = dst_bus; | 523 | *sbus = &bd->dstbus; |
540 | } else { | 524 | } else { |
541 | /* src_bus->buswidth == 1 */ | 525 | /* bd->srcbus.buswidth == 1 */ |
542 | *mbus = dst_bus; | 526 | *mbus = &bd->dstbus; |
543 | *sbus = src_bus; | 527 | *sbus = &bd->srcbus; |
544 | } | 528 | } |
545 | } | 529 | } |
546 | } | 530 | } |
@@ -549,55 +533,41 @@ void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, | |||
549 | * Fills in one LLI for a certain transfer descriptor | 533 | * Fills in one LLI for a certain transfer descriptor |
550 | * and advance the counter | 534 | * and advance the counter |
551 | */ | 535 | */ |
552 | int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, | 536 | static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, |
553 | struct pl08x_txd *txd, int num_llis, int len, | 537 | int num_llis, int len, u32 cctl) |
554 | u32 cctl, u32 *remainder) | ||
555 | { | 538 | { |
556 | struct lli *llis_va = txd->llis_va; | 539 | struct pl08x_lli *llis_va = bd->txd->llis_va; |
557 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | 540 | dma_addr_t llis_bus = bd->txd->llis_bus; |
558 | 541 | ||
559 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | 542 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); |
560 | 543 | ||
561 | llis_va[num_llis].cctl = cctl; | 544 | llis_va[num_llis].cctl = cctl; |
562 | llis_va[num_llis].src = txd->srcbus.addr; | 545 | llis_va[num_llis].src = bd->srcbus.addr; |
563 | llis_va[num_llis].dst = txd->dstbus.addr; | 546 | llis_va[num_llis].dst = bd->dstbus.addr; |
564 | 547 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | |
565 | /* | 548 | if (bd->pl08x->lli_buses & PL08X_AHB2) |
566 | * On versions with dual masters, you can optionally AND on | 549 | llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; |
567 | * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read | ||
568 | * in new LLIs with that controller, but we always try to | ||
569 | * choose AHB1 to point into memory. The idea is to have AHB2 | ||
570 | * fixed on the peripheral and AHB1 messing around in the | ||
571 | * memory. So we don't manipulate this bit currently. | ||
572 | */ | ||
573 | |||
574 | llis_va[num_llis].next = | ||
575 | (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); | ||
576 | 550 | ||
577 | if (cctl & PL080_CONTROL_SRC_INCR) | 551 | if (cctl & PL080_CONTROL_SRC_INCR) |
578 | txd->srcbus.addr += len; | 552 | bd->srcbus.addr += len; |
579 | if (cctl & PL080_CONTROL_DST_INCR) | 553 | if (cctl & PL080_CONTROL_DST_INCR) |
580 | txd->dstbus.addr += len; | 554 | bd->dstbus.addr += len; |
581 | 555 | ||
582 | *remainder -= len; | 556 | BUG_ON(bd->remainder < len); |
583 | 557 | ||
584 | return num_llis + 1; | 558 | bd->remainder -= len; |
585 | } | 559 | } |
586 | 560 | ||
587 | /* | 561 | /* |
588 | * Return number of bytes to fill to boundary, or len | 562 | * Return number of bytes to fill to boundary, or len. |
563 | * This calculation works for any value of addr. | ||
589 | */ | 564 | */ |
590 | static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | 565 | static inline size_t pl08x_pre_boundary(u32 addr, size_t len) |
591 | { | 566 | { |
592 | u32 boundary; | 567 | size_t boundary_len = PL08X_BOUNDARY_SIZE - |
568 | (addr & (PL08X_BOUNDARY_SIZE - 1)); | ||
593 | 569 | ||
594 | boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) | 570 | return min(boundary_len, len); |
595 | << PL08X_BOUNDARY_SHIFT; | ||
596 | |||
597 | if (boundary < addr + len) | ||
598 | return boundary - addr; | ||
599 | else | ||
600 | return len; | ||
601 | } | 571 | } |
602 | 572 | ||
603 | /* | 573 | /* |
@@ -608,20 +578,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | |||
608 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | 578 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, |
609 | struct pl08x_txd *txd) | 579 | struct pl08x_txd *txd) |
610 | { | 580 | { |
611 | struct pl08x_channel_data *cd = txd->cd; | ||
612 | struct pl08x_bus_data *mbus, *sbus; | 581 | struct pl08x_bus_data *mbus, *sbus; |
613 | u32 remainder; | 582 | struct pl08x_lli_build_data bd; |
614 | int num_llis = 0; | 583 | int num_llis = 0; |
615 | u32 cctl; | 584 | u32 cctl; |
616 | int max_bytes_per_lli; | 585 | size_t max_bytes_per_lli; |
617 | int total_bytes = 0; | 586 | size_t total_bytes = 0; |
618 | struct lli *llis_va; | 587 | struct pl08x_lli *llis_va; |
619 | struct lli *llis_bus; | ||
620 | |||
621 | if (!txd) { | ||
622 | dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); | ||
623 | return 0; | ||
624 | } | ||
625 | 588 | ||
626 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | 589 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, |
627 | &txd->llis_bus); | 590 | &txd->llis_bus); |
@@ -632,101 +595,67 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
632 | 595 | ||
633 | pl08x->pool_ctr++; | 596 | pl08x->pool_ctr++; |
634 | 597 | ||
635 | /* | 598 | /* Get the default CCTL */ |
636 | * Initialize bus values for this transfer | 599 | cctl = txd->cctl; |
637 | * from the passed optimal values | ||
638 | */ | ||
639 | if (!cd) { | ||
640 | dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); | ||
641 | return 0; | ||
642 | } | ||
643 | 600 | ||
644 | /* Get the default CCTL from the platform data */ | 601 | bd.txd = txd; |
645 | cctl = cd->cctl; | 602 | bd.pl08x = pl08x; |
646 | 603 | bd.srcbus.addr = txd->src_addr; | |
647 | /* | 604 | bd.dstbus.addr = txd->dst_addr; |
648 | * On the PL080 we have two bus masters and we | ||
649 | * should select one for source and one for | ||
650 | * destination. We try to use AHB2 for the | ||
651 | * bus which does not increment (typically the | ||
652 | * peripheral) else we just choose something. | ||
653 | */ | ||
654 | cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
655 | if (pl08x->vd->dualmaster) { | ||
656 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
657 | /* Source increments, use AHB2 for destination */ | ||
658 | cctl |= PL080_CONTROL_DST_AHB2; | ||
659 | else if (cctl & PL080_CONTROL_DST_INCR) | ||
660 | /* Destination increments, use AHB2 for source */ | ||
661 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
662 | else | ||
663 | /* Just pick something, source AHB1 dest AHB2 */ | ||
664 | cctl |= PL080_CONTROL_DST_AHB2; | ||
665 | } | ||
666 | 605 | ||
667 | /* Find maximum width of the source bus */ | 606 | /* Find maximum width of the source bus */ |
668 | txd->srcbus.maxwidth = | 607 | bd.srcbus.maxwidth = |
669 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> | 608 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> |
670 | PL080_CONTROL_SWIDTH_SHIFT); | 609 | PL080_CONTROL_SWIDTH_SHIFT); |
671 | 610 | ||
672 | /* Find maximum width of the destination bus */ | 611 | /* Find maximum width of the destination bus */ |
673 | txd->dstbus.maxwidth = | 612 | bd.dstbus.maxwidth = |
674 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | 613 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> |
675 | PL080_CONTROL_DWIDTH_SHIFT); | 614 | PL080_CONTROL_DWIDTH_SHIFT); |
676 | 615 | ||
677 | /* Set up the bus widths to the maximum */ | 616 | /* Set up the bus widths to the maximum */ |
678 | txd->srcbus.buswidth = txd->srcbus.maxwidth; | 617 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
679 | txd->dstbus.buswidth = txd->dstbus.maxwidth; | 618 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
680 | dev_vdbg(&pl08x->adev->dev, | 619 | dev_vdbg(&pl08x->adev->dev, |
681 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | 620 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", |
682 | __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); | 621 | __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); |
683 | 622 | ||
684 | 623 | ||
685 | /* | 624 | /* |
686 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | 625 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) |
687 | */ | 626 | */ |
688 | max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * | 627 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * |
689 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 628 | PL080_CONTROL_TRANSFER_SIZE_MASK; |
690 | dev_vdbg(&pl08x->adev->dev, | 629 | dev_vdbg(&pl08x->adev->dev, |
691 | "%s max bytes per lli = %d\n", | 630 | "%s max bytes per lli = %zu\n", |
692 | __func__, max_bytes_per_lli); | 631 | __func__, max_bytes_per_lli); |
693 | 632 | ||
694 | /* We need to count this down to zero */ | 633 | /* We need to count this down to zero */ |
695 | remainder = txd->len; | 634 | bd.remainder = txd->len; |
696 | dev_vdbg(&pl08x->adev->dev, | 635 | dev_vdbg(&pl08x->adev->dev, |
697 | "%s remainder = %d\n", | 636 | "%s remainder = %zu\n", |
698 | __func__, remainder); | 637 | __func__, bd.remainder); |
699 | 638 | ||
700 | /* | 639 | /* |
701 | * Choose bus to align to | 640 | * Choose bus to align to |
702 | * - prefers destination bus if both available | 641 | * - prefers destination bus if both available |
703 | * - if fixed address on one bus chooses other | 642 | * - if fixed address on one bus chooses other |
704 | * - modifies cctl to choose an apropriate master | 643 | * - modifies cctl to choose an appropriate master |
705 | */ | ||
706 | pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, | ||
707 | &mbus, &sbus, cctl); | ||
708 | |||
709 | |||
710 | /* | ||
711 | * The lowest bit of the LLI register | ||
712 | * is also used to indicate which master to | ||
713 | * use for reading the LLIs. | ||
714 | */ | 644 | */ |
645 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | ||
715 | 646 | ||
716 | if (txd->len < mbus->buswidth) { | 647 | if (txd->len < mbus->buswidth) { |
717 | /* | 648 | /* |
718 | * Less than a bus width available | 649 | * Less than a bus width available |
719 | * - send as single bytes | 650 | * - send as single bytes |
720 | */ | 651 | */ |
721 | while (remainder) { | 652 | while (bd.remainder) { |
722 | dev_vdbg(&pl08x->adev->dev, | 653 | dev_vdbg(&pl08x->adev->dev, |
723 | "%s single byte LLIs for a transfer of " | 654 | "%s single byte LLIs for a transfer of " |
724 | "less than a bus width (remain %08x)\n", | 655 | "less than a bus width (remain 0x%08x)\n", |
725 | __func__, remainder); | 656 | __func__, bd.remainder); |
726 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 657 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
727 | num_llis = | 658 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
728 | pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, | ||
729 | cctl, &remainder); | ||
730 | total_bytes++; | 659 | total_bytes++; |
731 | } | 660 | } |
732 | } else { | 661 | } else { |
@@ -737,11 +666,10 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
737 | while ((mbus->addr) % (mbus->buswidth)) { | 666 | while ((mbus->addr) % (mbus->buswidth)) { |
738 | dev_vdbg(&pl08x->adev->dev, | 667 | dev_vdbg(&pl08x->adev->dev, |
739 | "%s adjustment lli for less than bus width " | 668 | "%s adjustment lli for less than bus width " |
740 | "(remain %08x)\n", | 669 | "(remain 0x%08x)\n", |
741 | __func__, remainder); | 670 | __func__, bd.remainder); |
742 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 671 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
743 | num_llis = pl08x_fill_lli_for_desc | 672 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
744 | (pl08x, txd, num_llis, 1, cctl, &remainder); | ||
745 | total_bytes++; | 673 | total_bytes++; |
746 | } | 674 | } |
747 | 675 | ||
@@ -761,53 +689,43 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
761 | * Make largest possible LLIs until less than one bus | 689 | * Make largest possible LLIs until less than one bus |
762 | * width left | 690 | * width left |
763 | */ | 691 | */ |
764 | while (remainder > (mbus->buswidth - 1)) { | 692 | while (bd.remainder > (mbus->buswidth - 1)) { |
765 | int lli_len, target_len; | 693 | size_t lli_len, target_len, tsize, odd_bytes; |
766 | int tsize; | ||
767 | int odd_bytes; | ||
768 | 694 | ||
769 | /* | 695 | /* |
770 | * If enough left try to send max possible, | 696 | * If enough left try to send max possible, |
771 | * otherwise try to send the remainder | 697 | * otherwise try to send the remainder |
772 | */ | 698 | */ |
773 | target_len = remainder; | 699 | target_len = min(bd.remainder, max_bytes_per_lli); |
774 | if (remainder > max_bytes_per_lli) | ||
775 | target_len = max_bytes_per_lli; | ||
776 | 700 | ||
777 | /* | 701 | /* |
778 | * Set bus lengths for incrementing busses | 702 | * Set bus lengths for incrementing buses to the |
779 | * to number of bytes which fill to next memory | 703 | * number of bytes which fill to next memory boundary, |
780 | * boundary | 704 | * limiting on the target length calculated above. |
781 | */ | 705 | */ |
782 | if (cctl & PL080_CONTROL_SRC_INCR) | 706 | if (cctl & PL080_CONTROL_SRC_INCR) |
783 | txd->srcbus.fill_bytes = | 707 | bd.srcbus.fill_bytes = |
784 | pl08x_pre_boundary( | 708 | pl08x_pre_boundary(bd.srcbus.addr, |
785 | txd->srcbus.addr, | 709 | target_len); |
786 | remainder); | ||
787 | else | 710 | else |
788 | txd->srcbus.fill_bytes = | 711 | bd.srcbus.fill_bytes = target_len; |
789 | max_bytes_per_lli; | ||
790 | 712 | ||
791 | if (cctl & PL080_CONTROL_DST_INCR) | 713 | if (cctl & PL080_CONTROL_DST_INCR) |
792 | txd->dstbus.fill_bytes = | 714 | bd.dstbus.fill_bytes = |
793 | pl08x_pre_boundary( | 715 | pl08x_pre_boundary(bd.dstbus.addr, |
794 | txd->dstbus.addr, | 716 | target_len); |
795 | remainder); | ||
796 | else | 717 | else |
797 | txd->dstbus.fill_bytes = | 718 | bd.dstbus.fill_bytes = target_len; |
798 | max_bytes_per_lli; | ||
799 | 719 | ||
800 | /* | 720 | /* Find the nearest */ |
801 | * Find the nearest | 721 | lli_len = min(bd.srcbus.fill_bytes, |
802 | */ | 722 | bd.dstbus.fill_bytes); |
803 | lli_len = min(txd->srcbus.fill_bytes, | ||
804 | txd->dstbus.fill_bytes); | ||
805 | 723 | ||
806 | BUG_ON(lli_len > remainder); | 724 | BUG_ON(lli_len > bd.remainder); |
807 | 725 | ||
808 | if (lli_len <= 0) { | 726 | if (lli_len <= 0) { |
809 | dev_err(&pl08x->adev->dev, | 727 | dev_err(&pl08x->adev->dev, |
810 | "%s lli_len is %d, <= 0\n", | 728 | "%s lli_len is %zu, <= 0\n", |
811 | __func__, lli_len); | 729 | __func__, lli_len); |
812 | return 0; | 730 | return 0; |
813 | } | 731 | } |
@@ -826,7 +744,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
826 | /* | 744 | /* |
827 | * So now we know how many bytes to transfer | 745 | * So now we know how many bytes to transfer |
828 | * to get to the nearest boundary | 746 | * to get to the nearest boundary |
829 | * The next lli will past the boundary | 747 | * The next LLI will past the boundary |
830 | * - however we may be working to a boundary | 748 | * - however we may be working to a boundary |
831 | * on the slave bus | 749 | * on the slave bus |
832 | * We need to ensure the master stays aligned | 750 | * We need to ensure the master stays aligned |
@@ -855,21 +773,20 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
855 | 773 | ||
856 | if (target_len != lli_len) { | 774 | if (target_len != lli_len) { |
857 | dev_vdbg(&pl08x->adev->dev, | 775 | dev_vdbg(&pl08x->adev->dev, |
858 | "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", | 776 | "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", |
859 | __func__, target_len, lli_len, txd->len); | 777 | __func__, target_len, lli_len, txd->len); |
860 | } | 778 | } |
861 | 779 | ||
862 | cctl = pl08x_cctl_bits(cctl, | 780 | cctl = pl08x_cctl_bits(cctl, |
863 | txd->srcbus.buswidth, | 781 | bd.srcbus.buswidth, |
864 | txd->dstbus.buswidth, | 782 | bd.dstbus.buswidth, |
865 | tsize); | 783 | tsize); |
866 | 784 | ||
867 | dev_vdbg(&pl08x->adev->dev, | 785 | dev_vdbg(&pl08x->adev->dev, |
868 | "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", | 786 | "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", |
869 | __func__, lli_len, remainder); | 787 | __func__, lli_len, bd.remainder); |
870 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, | 788 | pl08x_fill_lli_for_desc(&bd, num_llis++, |
871 | num_llis, lli_len, cctl, | 789 | lli_len, cctl); |
872 | &remainder); | ||
873 | total_bytes += lli_len; | 790 | total_bytes += lli_len; |
874 | } | 791 | } |
875 | 792 | ||
@@ -881,15 +798,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
881 | */ | 798 | */ |
882 | int j; | 799 | int j; |
883 | for (j = 0; (j < mbus->buswidth) | 800 | for (j = 0; (j < mbus->buswidth) |
884 | && (remainder); j++) { | 801 | && (bd.remainder); j++) { |
885 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 802 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
886 | dev_vdbg(&pl08x->adev->dev, | 803 | dev_vdbg(&pl08x->adev->dev, |
887 | "%s align with boundardy, single byte (remain %08x)\n", | 804 | "%s align with boundary, single byte (remain 0x%08zx)\n", |
888 | __func__, remainder); | 805 | __func__, bd.remainder); |
889 | num_llis = | 806 | pl08x_fill_lli_for_desc(&bd, |
890 | pl08x_fill_lli_for_desc(pl08x, | 807 | num_llis++, 1, cctl); |
891 | txd, num_llis, 1, | ||
892 | cctl, &remainder); | ||
893 | total_bytes++; | 808 | total_bytes++; |
894 | } | 809 | } |
895 | } | 810 | } |
@@ -898,25 +813,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
898 | /* | 813 | /* |
899 | * Send any odd bytes | 814 | * Send any odd bytes |
900 | */ | 815 | */ |
901 | if (remainder < 0) { | 816 | while (bd.remainder) { |
902 | dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", | ||
903 | __func__, remainder); | ||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | while (remainder) { | ||
908 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 817 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
909 | dev_vdbg(&pl08x->adev->dev, | 818 | dev_vdbg(&pl08x->adev->dev, |
910 | "%s align with boundardy, single odd byte (remain %d)\n", | 819 | "%s align with boundary, single odd byte (remain %zu)\n", |
911 | __func__, remainder); | 820 | __func__, bd.remainder); |
912 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, | 821 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
913 | 1, cctl, &remainder); | ||
914 | total_bytes++; | 822 | total_bytes++; |
915 | } | 823 | } |
916 | } | 824 | } |
917 | if (total_bytes != txd->len) { | 825 | if (total_bytes != txd->len) { |
918 | dev_err(&pl08x->adev->dev, | 826 | dev_err(&pl08x->adev->dev, |
919 | "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", | 827 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", |
920 | __func__, total_bytes, txd->len); | 828 | __func__, total_bytes, txd->len); |
921 | return 0; | 829 | return 0; |
922 | } | 830 | } |
@@ -927,41 +835,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
927 | __func__, (u32) MAX_NUM_TSFR_LLIS); | 835 | __func__, (u32) MAX_NUM_TSFR_LLIS); |
928 | return 0; | 836 | return 0; |
929 | } | 837 | } |
838 | |||
839 | llis_va = txd->llis_va; | ||
930 | /* | 840 | /* |
931 | * Decide whether this is a loop or a terminated transfer | 841 | * The final LLI terminates the LLI. |
932 | */ | 842 | */ |
933 | llis_va = txd->llis_va; | 843 | llis_va[num_llis - 1].lli = 0; |
934 | llis_bus = (struct lli *) txd->llis_bus; | 844 | /* |
935 | 845 | * The final LLI element shall also fire an interrupt | |
936 | if (cd->circular_buffer) { | 846 | */ |
937 | /* | 847 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; |
938 | * Loop the circular buffer so that the next element | ||
939 | * points back to the beginning of the LLI. | ||
940 | */ | ||
941 | llis_va[num_llis - 1].next = | ||
942 | (dma_addr_t)((unsigned int)&(llis_bus[0])); | ||
943 | } else { | ||
944 | /* | ||
945 | * On non-circular buffers, the final LLI terminates | ||
946 | * the LLI. | ||
947 | */ | ||
948 | llis_va[num_llis - 1].next = 0; | ||
949 | /* | ||
950 | * The final LLI element shall also fire an interrupt | ||
951 | */ | ||
952 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
953 | } | ||
954 | |||
955 | /* Now store the channel register values */ | ||
956 | txd->csrc = llis_va[0].src; | ||
957 | txd->cdst = llis_va[0].dst; | ||
958 | if (num_llis > 1) | ||
959 | txd->clli = llis_va[0].next; | ||
960 | else | ||
961 | txd->clli = 0; | ||
962 | |||
963 | txd->cctl = llis_va[0].cctl; | ||
964 | /* ccfg will be set at physical channel allocation time */ | ||
965 | 848 | ||
966 | #ifdef VERBOSE_DEBUG | 849 | #ifdef VERBOSE_DEBUG |
967 | { | 850 | { |
@@ -969,13 +852,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
969 | 852 | ||
970 | for (i = 0; i < num_llis; i++) { | 853 | for (i = 0; i < num_llis; i++) { |
971 | dev_vdbg(&pl08x->adev->dev, | 854 | dev_vdbg(&pl08x->adev->dev, |
972 | "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", | 855 | "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", |
973 | i, | 856 | i, |
974 | &llis_va[i], | 857 | &llis_va[i], |
975 | llis_va[i].src, | 858 | llis_va[i].src, |
976 | llis_va[i].dst, | 859 | llis_va[i].dst, |
977 | llis_va[i].cctl, | 860 | llis_va[i].cctl, |
978 | llis_va[i].next | 861 | llis_va[i].lli |
979 | ); | 862 | ); |
980 | } | 863 | } |
981 | } | 864 | } |
@@ -988,14 +871,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
988 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | 871 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, |
989 | struct pl08x_txd *txd) | 872 | struct pl08x_txd *txd) |
990 | { | 873 | { |
991 | if (!txd) | ||
992 | dev_err(&pl08x->adev->dev, | ||
993 | "%s no descriptor to free\n", | ||
994 | __func__); | ||
995 | |||
996 | /* Free the LLI */ | 874 | /* Free the LLI */ |
997 | dma_pool_free(pl08x->pool, txd->llis_va, | 875 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); |
998 | txd->llis_bus); | ||
999 | 876 | ||
1000 | pl08x->pool_ctr--; | 877 | pl08x->pool_ctr--; |
1001 | 878 | ||
@@ -1008,9 +885,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |||
1008 | struct pl08x_txd *txdi = NULL; | 885 | struct pl08x_txd *txdi = NULL; |
1009 | struct pl08x_txd *next; | 886 | struct pl08x_txd *next; |
1010 | 887 | ||
1011 | if (!list_empty(&plchan->desc_list)) { | 888 | if (!list_empty(&plchan->pend_list)) { |
1012 | list_for_each_entry_safe(txdi, | 889 | list_for_each_entry_safe(txdi, |
1013 | next, &plchan->desc_list, node) { | 890 | next, &plchan->pend_list, node) { |
1014 | list_del(&txdi->node); | 891 | list_del(&txdi->node); |
1015 | pl08x_free_txd(pl08x, txdi); | 892 | pl08x_free_txd(pl08x, txdi); |
1016 | } | 893 | } |
@@ -1069,6 +946,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
1069 | return -EBUSY; | 946 | return -EBUSY; |
1070 | } | 947 | } |
1071 | ch->signal = ret; | 948 | ch->signal = ret; |
949 | |||
950 | /* Assign the flow control signal to this channel */ | ||
951 | if (txd->direction == DMA_TO_DEVICE) | ||
952 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
953 | else if (txd->direction == DMA_FROM_DEVICE) | ||
954 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
1072 | } | 955 | } |
1073 | 956 | ||
1074 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | 957 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", |
@@ -1076,19 +959,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
1076 | ch->signal, | 959 | ch->signal, |
1077 | plchan->name); | 960 | plchan->name); |
1078 | 961 | ||
962 | plchan->phychan_hold++; | ||
1079 | plchan->phychan = ch; | 963 | plchan->phychan = ch; |
1080 | 964 | ||
1081 | return 0; | 965 | return 0; |
1082 | } | 966 | } |
1083 | 967 | ||
968 | static void release_phy_channel(struct pl08x_dma_chan *plchan) | ||
969 | { | ||
970 | struct pl08x_driver_data *pl08x = plchan->host; | ||
971 | |||
972 | if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { | ||
973 | pl08x->pd->put_signal(plchan); | ||
974 | plchan->phychan->signal = -1; | ||
975 | } | ||
976 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
977 | plchan->phychan = NULL; | ||
978 | } | ||
979 | |||
1084 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | 980 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) |
1085 | { | 981 | { |
1086 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | 982 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); |
983 | struct pl08x_txd *txd = to_pl08x_txd(tx); | ||
984 | unsigned long flags; | ||
985 | |||
986 | spin_lock_irqsave(&plchan->lock, flags); | ||
987 | |||
988 | plchan->chan.cookie += 1; | ||
989 | if (plchan->chan.cookie < 0) | ||
990 | plchan->chan.cookie = 1; | ||
991 | tx->cookie = plchan->chan.cookie; | ||
992 | |||
993 | /* Put this onto the pending list */ | ||
994 | list_add_tail(&txd->node, &plchan->pend_list); | ||
1087 | 995 | ||
1088 | atomic_inc(&plchan->last_issued); | 996 | /* |
1089 | tx->cookie = atomic_read(&plchan->last_issued); | 997 | * If there was no physical channel available for this memcpy, |
1090 | /* This unlock follows the lock in the prep() function */ | 998 | * stack the request up and indicate that the channel is waiting |
1091 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | 999 | * for a free physical channel. |
1000 | */ | ||
1001 | if (!plchan->slave && !plchan->phychan) { | ||
1002 | /* Do this memcpy whenever there is a channel ready */ | ||
1003 | plchan->state = PL08X_CHAN_WAITING; | ||
1004 | plchan->waiting = txd; | ||
1005 | } else { | ||
1006 | plchan->phychan_hold--; | ||
1007 | } | ||
1008 | |||
1009 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1092 | 1010 | ||
1093 | return tx->cookie; | 1011 | return tx->cookie; |
1094 | } | 1012 | } |
@@ -1118,7 +1036,7 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1118 | enum dma_status ret; | 1036 | enum dma_status ret; |
1119 | u32 bytesleft = 0; | 1037 | u32 bytesleft = 0; |
1120 | 1038 | ||
1121 | last_used = atomic_read(&plchan->last_issued); | 1039 | last_used = plchan->chan.cookie; |
1122 | last_complete = plchan->lc; | 1040 | last_complete = plchan->lc; |
1123 | 1041 | ||
1124 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 1042 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
@@ -1134,7 +1052,7 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1134 | /* | 1052 | /* |
1135 | * This cookie not complete yet | 1053 | * This cookie not complete yet |
1136 | */ | 1054 | */ |
1137 | last_used = atomic_read(&plchan->last_issued); | 1055 | last_used = plchan->chan.cookie; |
1138 | last_complete = plchan->lc; | 1056 | last_complete = plchan->lc; |
1139 | 1057 | ||
1140 | /* Get number of bytes left in the active transactions and queue */ | 1058 | /* Get number of bytes left in the active transactions and queue */ |
@@ -1199,37 +1117,35 @@ static const struct burst_table burst_sizes[] = { | |||
1199 | }, | 1117 | }, |
1200 | }; | 1118 | }; |
1201 | 1119 | ||
1202 | static void dma_set_runtime_config(struct dma_chan *chan, | 1120 | static int dma_set_runtime_config(struct dma_chan *chan, |
1203 | struct dma_slave_config *config) | 1121 | struct dma_slave_config *config) |
1204 | { | 1122 | { |
1205 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1123 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1206 | struct pl08x_driver_data *pl08x = plchan->host; | 1124 | struct pl08x_driver_data *pl08x = plchan->host; |
1207 | struct pl08x_channel_data *cd = plchan->cd; | 1125 | struct pl08x_channel_data *cd = plchan->cd; |
1208 | enum dma_slave_buswidth addr_width; | 1126 | enum dma_slave_buswidth addr_width; |
1127 | dma_addr_t addr; | ||
1209 | u32 maxburst; | 1128 | u32 maxburst; |
1210 | u32 cctl = 0; | 1129 | u32 cctl = 0; |
1211 | /* Mask out all except src and dst channel */ | 1130 | int i; |
1212 | u32 ccfg = cd->ccfg & 0x000003DEU; | 1131 | |
1213 | int i = 0; | 1132 | if (!plchan->slave) |
1133 | return -EINVAL; | ||
1214 | 1134 | ||
1215 | /* Transfer direction */ | 1135 | /* Transfer direction */ |
1216 | plchan->runtime_direction = config->direction; | 1136 | plchan->runtime_direction = config->direction; |
1217 | if (config->direction == DMA_TO_DEVICE) { | 1137 | if (config->direction == DMA_TO_DEVICE) { |
1218 | plchan->runtime_addr = config->dst_addr; | 1138 | addr = config->dst_addr; |
1219 | cctl |= PL080_CONTROL_SRC_INCR; | ||
1220 | ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1221 | addr_width = config->dst_addr_width; | 1139 | addr_width = config->dst_addr_width; |
1222 | maxburst = config->dst_maxburst; | 1140 | maxburst = config->dst_maxburst; |
1223 | } else if (config->direction == DMA_FROM_DEVICE) { | 1141 | } else if (config->direction == DMA_FROM_DEVICE) { |
1224 | plchan->runtime_addr = config->src_addr; | 1142 | addr = config->src_addr; |
1225 | cctl |= PL080_CONTROL_DST_INCR; | ||
1226 | ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1227 | addr_width = config->src_addr_width; | 1143 | addr_width = config->src_addr_width; |
1228 | maxburst = config->src_maxburst; | 1144 | maxburst = config->src_maxburst; |
1229 | } else { | 1145 | } else { |
1230 | dev_err(&pl08x->adev->dev, | 1146 | dev_err(&pl08x->adev->dev, |
1231 | "bad runtime_config: alien transfer direction\n"); | 1147 | "bad runtime_config: alien transfer direction\n"); |
1232 | return; | 1148 | return -EINVAL; |
1233 | } | 1149 | } |
1234 | 1150 | ||
1235 | switch (addr_width) { | 1151 | switch (addr_width) { |
@@ -1248,42 +1164,40 @@ static void dma_set_runtime_config(struct dma_chan *chan, | |||
1248 | default: | 1164 | default: |
1249 | dev_err(&pl08x->adev->dev, | 1165 | dev_err(&pl08x->adev->dev, |
1250 | "bad runtime_config: alien address width\n"); | 1166 | "bad runtime_config: alien address width\n"); |
1251 | return; | 1167 | return -EINVAL; |
1252 | } | 1168 | } |
1253 | 1169 | ||
1254 | /* | 1170 | /* |
1255 | * Now decide on a maxburst: | 1171 | * Now decide on a maxburst: |
1256 | * If this channel will only request single transfers, set | 1172 | * If this channel will only request single transfers, set this |
1257 | * this down to ONE element. | 1173 | * down to ONE element. Also select one element if no maxburst |
1174 | * is specified. | ||
1258 | */ | 1175 | */ |
1259 | if (plchan->cd->single) { | 1176 | if (plchan->cd->single || maxburst == 0) { |
1260 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1177 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | |
1261 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | 1178 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); |
1262 | } else { | 1179 | } else { |
1263 | while (i < ARRAY_SIZE(burst_sizes)) { | 1180 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) |
1264 | if (burst_sizes[i].burstwords <= maxburst) | 1181 | if (burst_sizes[i].burstwords <= maxburst) |
1265 | break; | 1182 | break; |
1266 | i++; | ||
1267 | } | ||
1268 | cctl |= burst_sizes[i].reg; | 1183 | cctl |= burst_sizes[i].reg; |
1269 | } | 1184 | } |
1270 | 1185 | ||
1271 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | 1186 | plchan->runtime_addr = addr; |
1272 | cctl &= ~PL080_CONTROL_PROT_MASK; | ||
1273 | cctl |= PL080_CONTROL_PROT_SYS; | ||
1274 | 1187 | ||
1275 | /* Modify the default channel data to fit PrimeCell request */ | 1188 | /* Modify the default channel data to fit PrimeCell request */ |
1276 | cd->cctl = cctl; | 1189 | cd->cctl = cctl; |
1277 | cd->ccfg = ccfg; | ||
1278 | 1190 | ||
1279 | dev_dbg(&pl08x->adev->dev, | 1191 | dev_dbg(&pl08x->adev->dev, |
1280 | "configured channel %s (%s) for %s, data width %d, " | 1192 | "configured channel %s (%s) for %s, data width %d, " |
1281 | "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", | 1193 | "maxburst %d words, LE, CCTL=0x%08x\n", |
1282 | dma_chan_name(chan), plchan->name, | 1194 | dma_chan_name(chan), plchan->name, |
1283 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 1195 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
1284 | addr_width, | 1196 | addr_width, |
1285 | maxburst, | 1197 | maxburst, |
1286 | cctl, ccfg); | 1198 | cctl); |
1199 | |||
1200 | return 0; | ||
1287 | } | 1201 | } |
1288 | 1202 | ||
1289 | /* | 1203 | /* |
@@ -1293,35 +1207,26 @@ static void dma_set_runtime_config(struct dma_chan *chan, | |||
1293 | static void pl08x_issue_pending(struct dma_chan *chan) | 1207 | static void pl08x_issue_pending(struct dma_chan *chan) |
1294 | { | 1208 | { |
1295 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1209 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1296 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1297 | unsigned long flags; | 1210 | unsigned long flags; |
1298 | 1211 | ||
1299 | spin_lock_irqsave(&plchan->lock, flags); | 1212 | spin_lock_irqsave(&plchan->lock, flags); |
1300 | /* Something is already active */ | 1213 | /* Something is already active, or we're waiting for a channel... */ |
1301 | if (plchan->at) { | 1214 | if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { |
1302 | spin_unlock_irqrestore(&plchan->lock, flags); | 1215 | spin_unlock_irqrestore(&plchan->lock, flags); |
1303 | return; | ||
1304 | } | ||
1305 | |||
1306 | /* Didn't get a physical channel so waiting for it ... */ | ||
1307 | if (plchan->state == PL08X_CHAN_WAITING) | ||
1308 | return; | 1216 | return; |
1217 | } | ||
1309 | 1218 | ||
1310 | /* Take the first element in the queue and execute it */ | 1219 | /* Take the first element in the queue and execute it */ |
1311 | if (!list_empty(&plchan->desc_list)) { | 1220 | if (!list_empty(&plchan->pend_list)) { |
1312 | struct pl08x_txd *next; | 1221 | struct pl08x_txd *next; |
1313 | 1222 | ||
1314 | next = list_first_entry(&plchan->desc_list, | 1223 | next = list_first_entry(&plchan->pend_list, |
1315 | struct pl08x_txd, | 1224 | struct pl08x_txd, |
1316 | node); | 1225 | node); |
1317 | list_del(&next->node); | 1226 | list_del(&next->node); |
1318 | plchan->at = next; | ||
1319 | plchan->state = PL08X_CHAN_RUNNING; | 1227 | plchan->state = PL08X_CHAN_RUNNING; |
1320 | 1228 | ||
1321 | /* Configure the physical channel for the active txd */ | 1229 | pl08x_start_txd(plchan, next); |
1322 | pl08x_config_phychan_for_txd(plchan); | ||
1323 | pl08x_set_cregs(pl08x, plchan->phychan); | ||
1324 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | ||
1325 | } | 1230 | } |
1326 | 1231 | ||
1327 | spin_unlock_irqrestore(&plchan->lock, flags); | 1232 | spin_unlock_irqrestore(&plchan->lock, flags); |
@@ -1330,30 +1235,17 @@ static void pl08x_issue_pending(struct dma_chan *chan) | |||
1330 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | 1235 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, |
1331 | struct pl08x_txd *txd) | 1236 | struct pl08x_txd *txd) |
1332 | { | 1237 | { |
1333 | int num_llis; | ||
1334 | struct pl08x_driver_data *pl08x = plchan->host; | 1238 | struct pl08x_driver_data *pl08x = plchan->host; |
1335 | int ret; | 1239 | unsigned long flags; |
1240 | int num_llis, ret; | ||
1336 | 1241 | ||
1337 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | 1242 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); |
1338 | 1243 | if (!num_llis) { | |
1339 | if (!num_llis) | 1244 | kfree(txd); |
1340 | return -EINVAL; | 1245 | return -EINVAL; |
1246 | } | ||
1341 | 1247 | ||
1342 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); | 1248 | spin_lock_irqsave(&plchan->lock, flags); |
1343 | |||
1344 | /* | ||
1345 | * If this device is not using a circular buffer then | ||
1346 | * queue this new descriptor for transfer. | ||
1347 | * The descriptor for a circular buffer continues | ||
1348 | * to be used until the channel is freed. | ||
1349 | */ | ||
1350 | if (txd->cd->circular_buffer) | ||
1351 | dev_err(&pl08x->adev->dev, | ||
1352 | "%s attempting to queue a circular buffer\n", | ||
1353 | __func__); | ||
1354 | else | ||
1355 | list_add_tail(&txd->node, | ||
1356 | &plchan->desc_list); | ||
1357 | 1249 | ||
1358 | /* | 1250 | /* |
1359 | * See if we already have a physical channel allocated, | 1251 | * See if we already have a physical channel allocated, |
@@ -1362,24 +1254,23 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1362 | ret = prep_phy_channel(plchan, txd); | 1254 | ret = prep_phy_channel(plchan, txd); |
1363 | if (ret) { | 1255 | if (ret) { |
1364 | /* | 1256 | /* |
1365 | * No physical channel available, we will | 1257 | * No physical channel was available. |
1366 | * stack up the memcpy channels until there is a channel | 1258 | * |
1367 | * available to handle it whereas slave transfers may | 1259 | * memcpy transfers can be sorted out at submission time. |
1368 | * have been denied due to platform channel muxing restrictions | 1260 | * |
1369 | * and since there is no guarantee that this will ever be | 1261 | * Slave transfers may have been denied due to platform |
1370 | * resolved, and since the signal must be aquired AFTER | 1262 | * channel muxing restrictions. Since there is no guarantee |
1371 | * aquiring the physical channel, we will let them be NACK:ed | 1263 | * that this will ever be resolved, and the signal must be |
1372 | * with -EBUSY here. The drivers can alway retry the prep() | 1264 | * acquired AFTER acquiring the physical channel, we will let |
1373 | * call if they are eager on doing this using DMA. | 1265 | * them be NACK:ed with -EBUSY here. The drivers can retry |
1266 | * the prep() call if they are eager on doing this using DMA. | ||
1374 | */ | 1267 | */ |
1375 | if (plchan->slave) { | 1268 | if (plchan->slave) { |
1376 | pl08x_free_txd_list(pl08x, plchan); | 1269 | pl08x_free_txd_list(pl08x, plchan); |
1377 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | 1270 | pl08x_free_txd(pl08x, txd); |
1271 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1378 | return -EBUSY; | 1272 | return -EBUSY; |
1379 | } | 1273 | } |
1380 | /* Do this memcpy whenever there is a channel ready */ | ||
1381 | plchan->state = PL08X_CHAN_WAITING; | ||
1382 | plchan->waiting = txd; | ||
1383 | } else | 1274 | } else |
1384 | /* | 1275 | /* |
1385 | * Else we're all set, paused and ready to roll, | 1276 | * Else we're all set, paused and ready to roll, |
@@ -1391,16 +1282,47 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1391 | if (plchan->state == PL08X_CHAN_IDLE) | 1282 | if (plchan->state == PL08X_CHAN_IDLE) |
1392 | plchan->state = PL08X_CHAN_PAUSED; | 1283 | plchan->state = PL08X_CHAN_PAUSED; |
1393 | 1284 | ||
1394 | /* | 1285 | spin_unlock_irqrestore(&plchan->lock, flags); |
1395 | * Notice that we leave plchan->lock locked on purpose: | ||
1396 | * it will be unlocked in the subsequent tx_submit() | ||
1397 | * call. This is a consequence of the current API. | ||
1398 | */ | ||
1399 | 1286 | ||
1400 | return 0; | 1287 | return 0; |
1401 | } | 1288 | } |
1402 | 1289 | ||
1403 | /* | 1290 | /* |
1291 | * Given the source and destination available bus masks, select which | ||
1292 | * will be routed to each port. We try to have source and destination | ||
1293 | * on separate ports, but always respect the allowable settings. | ||
1294 | */ | ||
1295 | static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) | ||
1296 | { | ||
1297 | u32 cctl = 0; | ||
1298 | |||
1299 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1300 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1301 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1302 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1303 | |||
1304 | return cctl; | ||
1305 | } | ||
1306 | |||
1307 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | ||
1308 | unsigned long flags) | ||
1309 | { | ||
1310 | struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | ||
1311 | |||
1312 | if (txd) { | ||
1313 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); | ||
1314 | txd->tx.flags = flags; | ||
1315 | txd->tx.tx_submit = pl08x_tx_submit; | ||
1316 | INIT_LIST_HEAD(&txd->node); | ||
1317 | |||
1318 | /* Always enable error and terminal interrupts */ | ||
1319 | txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | | ||
1320 | PL080_CONFIG_TC_IRQ_MASK; | ||
1321 | } | ||
1322 | return txd; | ||
1323 | } | ||
1324 | |||
1325 | /* | ||
1404 | * Initialize a descriptor to be used by memcpy submit | 1326 | * Initialize a descriptor to be used by memcpy submit |
1405 | */ | 1327 | */ |
1406 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | 1328 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( |
@@ -1412,40 +1334,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1412 | struct pl08x_txd *txd; | 1334 | struct pl08x_txd *txd; |
1413 | int ret; | 1335 | int ret; |
1414 | 1336 | ||
1415 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | 1337 | txd = pl08x_get_txd(plchan, flags); |
1416 | if (!txd) { | 1338 | if (!txd) { |
1417 | dev_err(&pl08x->adev->dev, | 1339 | dev_err(&pl08x->adev->dev, |
1418 | "%s no memory for descriptor\n", __func__); | 1340 | "%s no memory for descriptor\n", __func__); |
1419 | return NULL; | 1341 | return NULL; |
1420 | } | 1342 | } |
1421 | 1343 | ||
1422 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
1423 | txd->direction = DMA_NONE; | 1344 | txd->direction = DMA_NONE; |
1424 | txd->srcbus.addr = src; | 1345 | txd->src_addr = src; |
1425 | txd->dstbus.addr = dest; | 1346 | txd->dst_addr = dest; |
1347 | txd->len = len; | ||
1426 | 1348 | ||
1427 | /* Set platform data for m2m */ | 1349 | /* Set platform data for m2m */ |
1428 | txd->cd = &pl08x->pd->memcpy_channel; | 1350 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1351 | txd->cctl = pl08x->pd->memcpy_channel.cctl & | ||
1352 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
1353 | |||
1429 | /* Both to be incremented or the code will break */ | 1354 | /* Both to be incremented or the code will break */ |
1430 | txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | 1355 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; |
1431 | txd->tx.tx_submit = pl08x_tx_submit; | 1356 | |
1432 | txd->tx.callback = NULL; | 1357 | if (pl08x->vd->dualmaster) |
1433 | txd->tx.callback_param = NULL; | 1358 | txd->cctl |= pl08x_select_bus(pl08x, |
1434 | txd->len = len; | 1359 | pl08x->mem_buses, pl08x->mem_buses); |
1435 | 1360 | ||
1436 | INIT_LIST_HEAD(&txd->node); | ||
1437 | ret = pl08x_prep_channel_resources(plchan, txd); | 1361 | ret = pl08x_prep_channel_resources(plchan, txd); |
1438 | if (ret) | 1362 | if (ret) |
1439 | return NULL; | 1363 | return NULL; |
1440 | /* | ||
1441 | * NB: the channel lock is held at this point so tx_submit() | ||
1442 | * must be called in direct succession. | ||
1443 | */ | ||
1444 | 1364 | ||
1445 | return &txd->tx; | 1365 | return &txd->tx; |
1446 | } | 1366 | } |
1447 | 1367 | ||
1448 | struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1368 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
1449 | struct dma_chan *chan, struct scatterlist *sgl, | 1369 | struct dma_chan *chan, struct scatterlist *sgl, |
1450 | unsigned int sg_len, enum dma_data_direction direction, | 1370 | unsigned int sg_len, enum dma_data_direction direction, |
1451 | unsigned long flags) | 1371 | unsigned long flags) |
@@ -1453,6 +1373,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1453 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1373 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1454 | struct pl08x_driver_data *pl08x = plchan->host; | 1374 | struct pl08x_driver_data *pl08x = plchan->host; |
1455 | struct pl08x_txd *txd; | 1375 | struct pl08x_txd *txd; |
1376 | u8 src_buses, dst_buses; | ||
1456 | int ret; | 1377 | int ret; |
1457 | 1378 | ||
1458 | /* | 1379 | /* |
@@ -1467,14 +1388,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1467 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | 1388 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", |
1468 | __func__, sgl->length, plchan->name); | 1389 | __func__, sgl->length, plchan->name); |
1469 | 1390 | ||
1470 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | 1391 | txd = pl08x_get_txd(plchan, flags); |
1471 | if (!txd) { | 1392 | if (!txd) { |
1472 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | 1393 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); |
1473 | return NULL; | 1394 | return NULL; |
1474 | } | 1395 | } |
1475 | 1396 | ||
1476 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
1477 | |||
1478 | if (direction != plchan->runtime_direction) | 1397 | if (direction != plchan->runtime_direction) |
1479 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | 1398 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " |
1480 | "the direction configured for the PrimeCell\n", | 1399 | "the direction configured for the PrimeCell\n", |
@@ -1486,37 +1405,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1486 | * channel target address dynamically at runtime. | 1405 | * channel target address dynamically at runtime. |
1487 | */ | 1406 | */ |
1488 | txd->direction = direction; | 1407 | txd->direction = direction; |
1408 | txd->len = sgl->length; | ||
1409 | |||
1410 | txd->cctl = plchan->cd->cctl & | ||
1411 | ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1412 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1413 | PL080_CONTROL_PROT_MASK); | ||
1414 | |||
1415 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1416 | txd->cctl |= PL080_CONTROL_PROT_SYS; | ||
1417 | |||
1489 | if (direction == DMA_TO_DEVICE) { | 1418 | if (direction == DMA_TO_DEVICE) { |
1490 | txd->srcbus.addr = sgl->dma_address; | 1419 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1420 | txd->cctl |= PL080_CONTROL_SRC_INCR; | ||
1421 | txd->src_addr = sgl->dma_address; | ||
1491 | if (plchan->runtime_addr) | 1422 | if (plchan->runtime_addr) |
1492 | txd->dstbus.addr = plchan->runtime_addr; | 1423 | txd->dst_addr = plchan->runtime_addr; |
1493 | else | 1424 | else |
1494 | txd->dstbus.addr = plchan->cd->addr; | 1425 | txd->dst_addr = plchan->cd->addr; |
1426 | src_buses = pl08x->mem_buses; | ||
1427 | dst_buses = plchan->cd->periph_buses; | ||
1495 | } else if (direction == DMA_FROM_DEVICE) { | 1428 | } else if (direction == DMA_FROM_DEVICE) { |
1429 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1430 | txd->cctl |= PL080_CONTROL_DST_INCR; | ||
1496 | if (plchan->runtime_addr) | 1431 | if (plchan->runtime_addr) |
1497 | txd->srcbus.addr = plchan->runtime_addr; | 1432 | txd->src_addr = plchan->runtime_addr; |
1498 | else | 1433 | else |
1499 | txd->srcbus.addr = plchan->cd->addr; | 1434 | txd->src_addr = plchan->cd->addr; |
1500 | txd->dstbus.addr = sgl->dma_address; | 1435 | txd->dst_addr = sgl->dma_address; |
1436 | src_buses = plchan->cd->periph_buses; | ||
1437 | dst_buses = pl08x->mem_buses; | ||
1501 | } else { | 1438 | } else { |
1502 | dev_err(&pl08x->adev->dev, | 1439 | dev_err(&pl08x->adev->dev, |
1503 | "%s direction unsupported\n", __func__); | 1440 | "%s direction unsupported\n", __func__); |
1504 | return NULL; | 1441 | return NULL; |
1505 | } | 1442 | } |
1506 | txd->cd = plchan->cd; | 1443 | |
1507 | txd->tx.tx_submit = pl08x_tx_submit; | 1444 | txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); |
1508 | txd->tx.callback = NULL; | ||
1509 | txd->tx.callback_param = NULL; | ||
1510 | txd->len = sgl->length; | ||
1511 | INIT_LIST_HEAD(&txd->node); | ||
1512 | 1445 | ||
1513 | ret = pl08x_prep_channel_resources(plchan, txd); | 1446 | ret = pl08x_prep_channel_resources(plchan, txd); |
1514 | if (ret) | 1447 | if (ret) |
1515 | return NULL; | 1448 | return NULL; |
1516 | /* | ||
1517 | * NB: the channel lock is held at this point so tx_submit() | ||
1518 | * must be called in direct succession. | ||
1519 | */ | ||
1520 | 1449 | ||
1521 | return &txd->tx; | 1450 | return &txd->tx; |
1522 | } | 1451 | } |
@@ -1531,10 +1460,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1531 | 1460 | ||
1532 | /* Controls applicable to inactive channels */ | 1461 | /* Controls applicable to inactive channels */ |
1533 | if (cmd == DMA_SLAVE_CONFIG) { | 1462 | if (cmd == DMA_SLAVE_CONFIG) { |
1534 | dma_set_runtime_config(chan, | 1463 | return dma_set_runtime_config(chan, |
1535 | (struct dma_slave_config *) | 1464 | (struct dma_slave_config *)arg); |
1536 | arg); | ||
1537 | return 0; | ||
1538 | } | 1465 | } |
1539 | 1466 | ||
1540 | /* | 1467 | /* |
@@ -1558,16 +1485,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1558 | * Mark physical channel as free and free any slave | 1485 | * Mark physical channel as free and free any slave |
1559 | * signal | 1486 | * signal |
1560 | */ | 1487 | */ |
1561 | if ((plchan->phychan->signal >= 0) && | 1488 | release_phy_channel(plchan); |
1562 | pl08x->pd->put_signal) { | ||
1563 | pl08x->pd->put_signal(plchan); | ||
1564 | plchan->phychan->signal = -1; | ||
1565 | } | ||
1566 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
1567 | plchan->phychan = NULL; | ||
1568 | } | 1489 | } |
1569 | /* Stop any pending tasklet */ | ||
1570 | tasklet_disable(&plchan->tasklet); | ||
1571 | /* Dequeue jobs and free LLIs */ | 1490 | /* Dequeue jobs and free LLIs */ |
1572 | if (plchan->at) { | 1491 | if (plchan->at) { |
1573 | pl08x_free_txd(pl08x, plchan->at); | 1492 | pl08x_free_txd(pl08x, plchan->at); |
@@ -1620,78 +1539,71 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | |||
1620 | 1539 | ||
1621 | val = readl(pl08x->base + PL080_CONFIG); | 1540 | val = readl(pl08x->base + PL080_CONFIG); |
1622 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | 1541 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); |
1623 | /* We implictly clear bit 1 and that means little-endian mode */ | 1542 | /* We implicitly clear bit 1 and that means little-endian mode */ |
1624 | val |= PL080_CONFIG_ENABLE; | 1543 | val |= PL080_CONFIG_ENABLE; |
1625 | writel(val, pl08x->base + PL080_CONFIG); | 1544 | writel(val, pl08x->base + PL080_CONFIG); |
1626 | } | 1545 | } |
1627 | 1546 | ||
1547 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
1548 | { | ||
1549 | struct device *dev = txd->tx.chan->device->dev; | ||
1550 | |||
1551 | if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1552 | if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1553 | dma_unmap_single(dev, txd->src_addr, txd->len, | ||
1554 | DMA_TO_DEVICE); | ||
1555 | else | ||
1556 | dma_unmap_page(dev, txd->src_addr, txd->len, | ||
1557 | DMA_TO_DEVICE); | ||
1558 | } | ||
1559 | if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1560 | if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1561 | dma_unmap_single(dev, txd->dst_addr, txd->len, | ||
1562 | DMA_FROM_DEVICE); | ||
1563 | else | ||
1564 | dma_unmap_page(dev, txd->dst_addr, txd->len, | ||
1565 | DMA_FROM_DEVICE); | ||
1566 | } | ||
1567 | } | ||
1568 | |||
1628 | static void pl08x_tasklet(unsigned long data) | 1569 | static void pl08x_tasklet(unsigned long data) |
1629 | { | 1570 | { |
1630 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | 1571 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; |
1631 | struct pl08x_phy_chan *phychan = plchan->phychan; | ||
1632 | struct pl08x_driver_data *pl08x = plchan->host; | 1572 | struct pl08x_driver_data *pl08x = plchan->host; |
1573 | struct pl08x_txd *txd; | ||
1574 | unsigned long flags; | ||
1633 | 1575 | ||
1634 | if (!plchan) | 1576 | spin_lock_irqsave(&plchan->lock, flags); |
1635 | BUG(); | ||
1636 | |||
1637 | spin_lock(&plchan->lock); | ||
1638 | 1577 | ||
1639 | if (plchan->at) { | 1578 | txd = plchan->at; |
1640 | dma_async_tx_callback callback = | 1579 | plchan->at = NULL; |
1641 | plchan->at->tx.callback; | ||
1642 | void *callback_param = | ||
1643 | plchan->at->tx.callback_param; | ||
1644 | 1580 | ||
1581 | if (txd) { | ||
1645 | /* | 1582 | /* |
1646 | * Update last completed | 1583 | * Update last completed |
1647 | */ | 1584 | */ |
1648 | plchan->lc = | 1585 | plchan->lc = txd->tx.cookie; |
1649 | (plchan->at->tx.cookie); | ||
1650 | |||
1651 | /* | ||
1652 | * Callback to signal completion | ||
1653 | */ | ||
1654 | if (callback) | ||
1655 | callback(callback_param); | ||
1656 | |||
1657 | /* | ||
1658 | * Device callbacks should NOT clear | ||
1659 | * the current transaction on the channel | ||
1660 | * Linus: sometimes they should? | ||
1661 | */ | ||
1662 | if (!plchan->at) | ||
1663 | BUG(); | ||
1664 | |||
1665 | /* | ||
1666 | * Free the descriptor if it's not for a device | ||
1667 | * using a circular buffer | ||
1668 | */ | ||
1669 | if (!plchan->at->cd->circular_buffer) { | ||
1670 | pl08x_free_txd(pl08x, plchan->at); | ||
1671 | plchan->at = NULL; | ||
1672 | } | ||
1673 | /* | ||
1674 | * else descriptor for circular | ||
1675 | * buffers only freed when | ||
1676 | * client has disabled dma | ||
1677 | */ | ||
1678 | } | 1586 | } |
1587 | |||
1679 | /* | 1588 | /* |
1680 | * If a new descriptor is queued, set it up | 1589 | * If a new descriptor is queued, set it up |
1681 | * plchan->at is NULL here | 1590 | * plchan->at is NULL here |
1682 | */ | 1591 | */ |
1683 | if (!list_empty(&plchan->desc_list)) { | 1592 | if (!list_empty(&plchan->pend_list)) { |
1684 | struct pl08x_txd *next; | 1593 | struct pl08x_txd *next; |
1685 | 1594 | ||
1686 | next = list_first_entry(&plchan->desc_list, | 1595 | next = list_first_entry(&plchan->pend_list, |
1687 | struct pl08x_txd, | 1596 | struct pl08x_txd, |
1688 | node); | 1597 | node); |
1689 | list_del(&next->node); | 1598 | list_del(&next->node); |
1690 | plchan->at = next; | 1599 | |
1691 | /* Configure the physical channel for the next txd */ | 1600 | pl08x_start_txd(plchan, next); |
1692 | pl08x_config_phychan_for_txd(plchan); | 1601 | } else if (plchan->phychan_hold) { |
1693 | pl08x_set_cregs(pl08x, plchan->phychan); | 1602 | /* |
1694 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | 1603 | * This channel is still in use - we have a new txd being |
1604 | * prepared and will soon be queued. Don't give up the | ||
1605 | * physical channel. | ||
1606 | */ | ||
1695 | } else { | 1607 | } else { |
1696 | struct pl08x_dma_chan *waiting = NULL; | 1608 | struct pl08x_dma_chan *waiting = NULL; |
1697 | 1609 | ||
@@ -1699,12 +1611,7 @@ static void pl08x_tasklet(unsigned long data) | |||
1699 | * No more jobs, so free up the physical channel | 1611 | * No more jobs, so free up the physical channel |
1700 | * Free any allocated signal on slave transfers too | 1612 | * Free any allocated signal on slave transfers too |
1701 | */ | 1613 | */ |
1702 | if ((phychan->signal >= 0) && pl08x->pd->put_signal) { | 1614 | release_phy_channel(plchan); |
1703 | pl08x->pd->put_signal(plchan); | ||
1704 | phychan->signal = -1; | ||
1705 | } | ||
1706 | pl08x_put_phy_channel(pl08x, phychan); | ||
1707 | plchan->phychan = NULL; | ||
1708 | plchan->state = PL08X_CHAN_IDLE; | 1615 | plchan->state = PL08X_CHAN_IDLE; |
1709 | 1616 | ||
1710 | /* | 1617 | /* |
@@ -1724,6 +1631,7 @@ static void pl08x_tasklet(unsigned long data) | |||
1724 | ret = prep_phy_channel(waiting, | 1631 | ret = prep_phy_channel(waiting, |
1725 | waiting->waiting); | 1632 | waiting->waiting); |
1726 | BUG_ON(ret); | 1633 | BUG_ON(ret); |
1634 | waiting->phychan_hold--; | ||
1727 | waiting->state = PL08X_CHAN_RUNNING; | 1635 | waiting->state = PL08X_CHAN_RUNNING; |
1728 | waiting->waiting = NULL; | 1636 | waiting->waiting = NULL; |
1729 | pl08x_issue_pending(&waiting->chan); | 1637 | pl08x_issue_pending(&waiting->chan); |
@@ -1732,7 +1640,25 @@ static void pl08x_tasklet(unsigned long data) | |||
1732 | } | 1640 | } |
1733 | } | 1641 | } |
1734 | 1642 | ||
1735 | spin_unlock(&plchan->lock); | 1643 | spin_unlock_irqrestore(&plchan->lock, flags); |
1644 | |||
1645 | if (txd) { | ||
1646 | dma_async_tx_callback callback = txd->tx.callback; | ||
1647 | void *callback_param = txd->tx.callback_param; | ||
1648 | |||
1649 | /* Don't try to unmap buffers on slave channels */ | ||
1650 | if (!plchan->slave) | ||
1651 | pl08x_unmap_buffers(txd); | ||
1652 | |||
1653 | /* Free the descriptor */ | ||
1654 | spin_lock_irqsave(&plchan->lock, flags); | ||
1655 | pl08x_free_txd(pl08x, txd); | ||
1656 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1657 | |||
1658 | /* Callback to signal completion */ | ||
1659 | if (callback) | ||
1660 | callback(callback_param); | ||
1661 | } | ||
1736 | } | 1662 | } |
1737 | 1663 | ||
1738 | static irqreturn_t pl08x_irq(int irq, void *dev) | 1664 | static irqreturn_t pl08x_irq(int irq, void *dev) |
@@ -1819,16 +1745,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1819 | return -ENOMEM; | 1745 | return -ENOMEM; |
1820 | } | 1746 | } |
1821 | } | 1747 | } |
1748 | if (chan->cd->circular_buffer) { | ||
1749 | dev_err(&pl08x->adev->dev, | ||
1750 | "channel %s: circular buffers not supported\n", | ||
1751 | chan->name); | ||
1752 | kfree(chan); | ||
1753 | continue; | ||
1754 | } | ||
1822 | dev_info(&pl08x->adev->dev, | 1755 | dev_info(&pl08x->adev->dev, |
1823 | "initialize virtual channel \"%s\"\n", | 1756 | "initialize virtual channel \"%s\"\n", |
1824 | chan->name); | 1757 | chan->name); |
1825 | 1758 | ||
1826 | chan->chan.device = dmadev; | 1759 | chan->chan.device = dmadev; |
1827 | atomic_set(&chan->last_issued, 0); | 1760 | chan->chan.cookie = 0; |
1828 | chan->lc = atomic_read(&chan->last_issued); | 1761 | chan->lc = 0; |
1829 | 1762 | ||
1830 | spin_lock_init(&chan->lock); | 1763 | spin_lock_init(&chan->lock); |
1831 | INIT_LIST_HEAD(&chan->desc_list); | 1764 | INIT_LIST_HEAD(&chan->pend_list); |
1832 | tasklet_init(&chan->tasklet, pl08x_tasklet, | 1765 | tasklet_init(&chan->tasklet, pl08x_tasklet, |
1833 | (unsigned long) chan); | 1766 | (unsigned long) chan); |
1834 | 1767 | ||
@@ -1898,7 +1831,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1898 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1831 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1899 | seq_printf(s, "--------\t------\n"); | 1832 | seq_printf(s, "--------\t------\n"); |
1900 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | 1833 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { |
1901 | seq_printf(s, "%s\t\t\%s\n", chan->name, | 1834 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1902 | pl08x_state_str(chan->state)); | 1835 | pl08x_state_str(chan->state)); |
1903 | } | 1836 | } |
1904 | 1837 | ||
@@ -1906,7 +1839,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1906 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1839 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1907 | seq_printf(s, "--------\t------\n"); | 1840 | seq_printf(s, "--------\t------\n"); |
1908 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | 1841 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { |
1909 | seq_printf(s, "%s\t\t\%s\n", chan->name, | 1842 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1910 | pl08x_state_str(chan->state)); | 1843 | pl08x_state_str(chan->state)); |
1911 | } | 1844 | } |
1912 | 1845 | ||
@@ -1942,7 +1875,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | |||
1942 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | 1875 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) |
1943 | { | 1876 | { |
1944 | struct pl08x_driver_data *pl08x; | 1877 | struct pl08x_driver_data *pl08x; |
1945 | struct vendor_data *vd = id->data; | 1878 | const struct vendor_data *vd = id->data; |
1946 | int ret = 0; | 1879 | int ret = 0; |
1947 | int i; | 1880 | int i; |
1948 | 1881 | ||
@@ -1990,6 +1923,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
1990 | pl08x->adev = adev; | 1923 | pl08x->adev = adev; |
1991 | pl08x->vd = vd; | 1924 | pl08x->vd = vd; |
1992 | 1925 | ||
1926 | /* By default, AHB1 only. If dualmaster, from platform */ | ||
1927 | pl08x->lli_buses = PL08X_AHB1; | ||
1928 | pl08x->mem_buses = PL08X_AHB1; | ||
1929 | if (pl08x->vd->dualmaster) { | ||
1930 | pl08x->lli_buses = pl08x->pd->lli_buses; | ||
1931 | pl08x->mem_buses = pl08x->pd->mem_buses; | ||
1932 | } | ||
1933 | |||
1993 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | 1934 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ |
1994 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | 1935 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, |
1995 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | 1936 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); |
@@ -2016,7 +1957,7 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
2016 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | 1957 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); |
2017 | 1958 | ||
2018 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | 1959 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, |
2019 | vd->name, pl08x); | 1960 | DRIVER_NAME, pl08x); |
2020 | if (ret) { | 1961 | if (ret) { |
2021 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | 1962 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", |
2022 | __func__, adev->irq[0]); | 1963 | __func__, adev->irq[0]); |
@@ -2087,8 +2028,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
2087 | 2028 | ||
2088 | amba_set_drvdata(adev, pl08x); | 2029 | amba_set_drvdata(adev, pl08x); |
2089 | init_pl08x_debugfs(pl08x); | 2030 | init_pl08x_debugfs(pl08x); |
2090 | dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", | 2031 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", |
2091 | vd->name, adev->res.start); | 2032 | amba_part(adev), amba_rev(adev), |
2033 | (unsigned long long)adev->res.start, adev->irq[0]); | ||
2092 | return 0; | 2034 | return 0; |
2093 | 2035 | ||
2094 | out_no_slave_reg: | 2036 | out_no_slave_reg: |
@@ -2115,13 +2057,11 @@ out_no_pl08x: | |||
2115 | 2057 | ||
2116 | /* PL080 has 8 channels and the PL080 have just 2 */ | 2058 | /* PL080 has 8 channels and the PL080 have just 2 */ |
2117 | static struct vendor_data vendor_pl080 = { | 2059 | static struct vendor_data vendor_pl080 = { |
2118 | .name = "PL080", | ||
2119 | .channels = 8, | 2060 | .channels = 8, |
2120 | .dualmaster = true, | 2061 | .dualmaster = true, |
2121 | }; | 2062 | }; |
2122 | 2063 | ||
2123 | static struct vendor_data vendor_pl081 = { | 2064 | static struct vendor_data vendor_pl081 = { |
2124 | .name = "PL081", | ||
2125 | .channels = 2, | 2065 | .channels = 2, |
2126 | .dualmaster = false, | 2066 | .dualmaster = false, |
2127 | }; | 2067 | }; |
@@ -2160,7 +2100,7 @@ static int __init pl08x_init(void) | |||
2160 | retval = amba_driver_register(&pl08x_amba_driver); | 2100 | retval = amba_driver_register(&pl08x_amba_driver); |
2161 | if (retval) | 2101 | if (retval) |
2162 | printk(KERN_WARNING DRIVER_NAME | 2102 | printk(KERN_WARNING DRIVER_NAME |
2163 | "failed to register as an amba device (%d)\n", | 2103 | "failed to register as an AMBA device (%d)\n", |
2164 | retval); | 2104 | retval); |
2165 | return retval; | 2105 | return retval; |
2166 | } | 2106 | } |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 286c3ac6bdcc..531230b87976 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | 2 | * Freescale MPC85xx, MPC83xx DMA Engine support |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 4 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * Author: | 6 | * Author: |
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
@@ -1322,6 +1322,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op, | |||
1322 | fdev->common.device_control = fsl_dma_device_control; | 1322 | fdev->common.device_control = fsl_dma_device_control; |
1323 | fdev->common.dev = &op->dev; | 1323 | fdev->common.dev = &op->dev; |
1324 | 1324 | ||
1325 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); | ||
1326 | |||
1325 | dev_set_drvdata(&op->dev, fdev); | 1327 | dev_set_drvdata(&op->dev, fdev); |
1326 | 1328 | ||
1327 | /* | 1329 | /* |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f629e4961af5..e53d438142bb 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -379,7 +379,7 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
379 | return 0; | 379 | return 0; |
380 | 380 | ||
381 | err_init: | 381 | err_init: |
382 | while (i-- >= 0) { | 382 | while (--i >= 0) { |
383 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 383 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
384 | imx_dma_free(imxdmac->imxdma_channel); | 384 | imx_dma_free(imxdmac->imxdma_channel); |
385 | } | 385 | } |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 0834323a0599..d0602dd5d1b2 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -951,7 +951,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
951 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | 951 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; |
952 | int param; | 952 | int param; |
953 | 953 | ||
954 | bd->buffer_addr = sgl->dma_address; | 954 | bd->buffer_addr = sg->dma_address; |
955 | 955 | ||
956 | count = sg->length; | 956 | count = sg->length; |
957 | 957 | ||
@@ -1385,7 +1385,7 @@ static int __init sdma_module_init(void) | |||
1385 | { | 1385 | { |
1386 | return platform_driver_probe(&sdma_driver, sdma_probe); | 1386 | return platform_driver_probe(&sdma_driver, sdma_probe); |
1387 | } | 1387 | } |
1388 | subsys_initcall(sdma_module_init); | 1388 | module_init(sdma_module_init); |
1389 | 1389 | ||
1390 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | 1390 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); |
1391 | MODULE_DESCRIPTION("i.MX SDMA driver"); | 1391 | MODULE_DESCRIPTION("i.MX SDMA driver"); |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 338bc4eed1f3..5397d8535e0a 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
664 | /*calculate CTL_LO*/ | 664 | /*calculate CTL_LO*/ |
665 | ctl_lo.ctl_lo = 0; | 665 | ctl_lo.ctl_lo = 0; |
666 | ctl_lo.ctlx.int_en = 1; | 666 | ctl_lo.ctlx.int_en = 1; |
667 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width; | ||
668 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width; | ||
669 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; | 667 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; |
670 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; | 668 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; |
671 | 669 | ||
670 | /* | ||
671 | * Here we need some translation from "enum dma_slave_buswidth" | ||
672 | * to the format for our dma controller | ||
673 | * standard intel_mid_dmac's format | ||
674 | * 1 Byte 0b000 | ||
675 | * 2 Bytes 0b001 | ||
676 | * 4 Bytes 0b010 | ||
677 | */ | ||
678 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; | ||
679 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; | ||
680 | |||
672 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | 681 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { |
673 | ctl_lo.ctlx.tt_fc = 0; | 682 | ctl_lo.ctlx.tt_fc = 0; |
674 | ctl_lo.ctlx.sinc = 0; | 683 | ctl_lo.ctlx.sinc = 0; |
@@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
746 | BUG_ON(!mids); | 755 | BUG_ON(!mids); |
747 | 756 | ||
748 | if (!midc->dma->pimr_mask) { | 757 | if (!midc->dma->pimr_mask) { |
749 | pr_debug("MDMA: SG list is not supported by this controller\n"); | 758 | /* We can still handle sg list with only one item */ |
750 | return NULL; | 759 | if (sg_len == 1) { |
760 | txd = intel_mid_dma_prep_memcpy(chan, | ||
761 | mids->dma_slave.dst_addr, | ||
762 | mids->dma_slave.src_addr, | ||
763 | sgl->length, | ||
764 | flags); | ||
765 | return txd; | ||
766 | } else { | ||
767 | pr_warn("MDMA: SG list is not supported by this controller\n"); | ||
768 | return NULL; | ||
769 | } | ||
751 | } | 770 | } |
752 | 771 | ||
753 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | 772 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", |
@@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
758 | pr_err("MDMA: Prep memcpy failed\n"); | 777 | pr_err("MDMA: Prep memcpy failed\n"); |
759 | return NULL; | 778 | return NULL; |
760 | } | 779 | } |
780 | |||
761 | desc = to_intel_mid_dma_desc(txd); | 781 | desc = to_intel_mid_dma_desc(txd); |
762 | desc->dirn = direction; | 782 | desc->dirn = direction; |
763 | ctl_lo.ctl_lo = desc->ctl_lo; | 783 | ctl_lo.ctl_lo = desc->ctl_lo; |
@@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
1021 | 1041 | ||
1022 | /*DMA Interrupt*/ | 1042 | /*DMA Interrupt*/ |
1023 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | 1043 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); |
1024 | if (!mid) { | ||
1025 | pr_err("ERR_MDMA:null pointer mid\n"); | ||
1026 | return -EINVAL; | ||
1027 | } | ||
1028 | |||
1029 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); | 1044 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); |
1030 | tfr_status &= mid->intr_mask; | 1045 | tfr_status &= mid->intr_mask; |
1031 | if (tfr_status) { | 1046 | if (tfr_status) { |
@@ -1075,7 +1090,6 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1075 | if (NULL == dma->dma_pool) { | 1090 | if (NULL == dma->dma_pool) { |
1076 | pr_err("ERR_MDMA:pci_pool_create failed\n"); | 1091 | pr_err("ERR_MDMA:pci_pool_create failed\n"); |
1077 | err = -ENOMEM; | 1092 | err = -ENOMEM; |
1078 | kfree(dma); | ||
1079 | goto err_dma_pool; | 1093 | goto err_dma_pool; |
1080 | } | 1094 | } |
1081 | 1095 | ||
@@ -1186,7 +1200,6 @@ err_engine: | |||
1186 | free_irq(pdev->irq, dma); | 1200 | free_irq(pdev->irq, dma); |
1187 | err_irq: | 1201 | err_irq: |
1188 | pci_pool_destroy(dma->dma_pool); | 1202 | pci_pool_destroy(dma->dma_pool); |
1189 | kfree(dma); | ||
1190 | err_dma_pool: | 1203 | err_dma_pool: |
1191 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | 1204 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); |
1192 | return err; | 1205 | return err; |
@@ -1413,7 +1426,7 @@ static const struct dev_pm_ops intel_mid_dma_pm = { | |||
1413 | .runtime_idle = dma_runtime_idle, | 1426 | .runtime_idle = dma_runtime_idle, |
1414 | }; | 1427 | }; |
1415 | 1428 | ||
1416 | static struct pci_driver intel_mid_dma_pci = { | 1429 | static struct pci_driver intel_mid_dma_pci_driver = { |
1417 | .name = "Intel MID DMA", | 1430 | .name = "Intel MID DMA", |
1418 | .id_table = intel_mid_dma_ids, | 1431 | .id_table = intel_mid_dma_ids, |
1419 | .probe = intel_mid_dma_probe, | 1432 | .probe = intel_mid_dma_probe, |
@@ -1431,13 +1444,13 @@ static int __init intel_mid_dma_init(void) | |||
1431 | { | 1444 | { |
1432 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", | 1445 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", |
1433 | INTEL_MID_DMA_DRIVER_VERSION); | 1446 | INTEL_MID_DMA_DRIVER_VERSION); |
1434 | return pci_register_driver(&intel_mid_dma_pci); | 1447 | return pci_register_driver(&intel_mid_dma_pci_driver); |
1435 | } | 1448 | } |
1436 | fs_initcall(intel_mid_dma_init); | 1449 | fs_initcall(intel_mid_dma_init); |
1437 | 1450 | ||
1438 | static void __exit intel_mid_dma_exit(void) | 1451 | static void __exit intel_mid_dma_exit(void) |
1439 | { | 1452 | { |
1440 | pci_unregister_driver(&intel_mid_dma_pci); | 1453 | pci_unregister_driver(&intel_mid_dma_pci_driver); |
1441 | } | 1454 | } |
1442 | module_exit(intel_mid_dma_exit); | 1455 | module_exit(intel_mid_dma_exit); |
1443 | 1456 | ||
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 8997d3fb9051..0ff7270af25b 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
2 | ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o | 2 | ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 92b679024fed..c064c89420d0 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -259,11 +259,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |||
259 | return; | 259 | return; |
260 | } | 260 | } |
261 | 261 | ||
262 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | ||
263 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | ||
264 | channel_writel(pd_chan, SIZE, desc->regs.size); | ||
265 | channel_writel(pd_chan, NEXT, desc->regs.next); | ||
266 | |||
267 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", | 262 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", |
268 | pd_chan->chan.chan_id, desc->regs.dev_addr); | 263 | pd_chan->chan.chan_id, desc->regs.dev_addr); |
269 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", | 264 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", |
@@ -273,10 +268,16 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |||
273 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", | 268 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", |
274 | pd_chan->chan.chan_id, desc->regs.next); | 269 | pd_chan->chan.chan_id, desc->regs.next); |
275 | 270 | ||
276 | if (list_empty(&desc->tx_list)) | 271 | if (list_empty(&desc->tx_list)) { |
272 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | ||
273 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | ||
274 | channel_writel(pd_chan, SIZE, desc->regs.size); | ||
275 | channel_writel(pd_chan, NEXT, desc->regs.next); | ||
277 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); | 276 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); |
278 | else | 277 | } else { |
278 | channel_writel(pd_chan, NEXT, desc->txd.phys); | ||
279 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); | 279 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
280 | } | ||
280 | 281 | ||
281 | val = dma_readl(pd, CTL2); | 282 | val = dma_readl(pd, CTL2); |
282 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); | 283 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 0d58a4a4487f..cef584533ee8 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -4449,9 +4449,8 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev, | |||
4449 | 4449 | ||
4450 | if (!request_mem_region(res.start, resource_size(&res), | 4450 | if (!request_mem_region(res.start, resource_size(&res), |
4451 | dev_driver_string(&ofdev->dev))) { | 4451 | dev_driver_string(&ofdev->dev))) { |
4452 | dev_err(&ofdev->dev, "failed to request memory region " | 4452 | dev_err(&ofdev->dev, "failed to request memory region %pR\n", |
4453 | "(0x%016llx-0x%016llx)\n", | 4453 | &res); |
4454 | (u64)res.start, (u64)res.end); | ||
4455 | initcode = PPC_ADMA_INIT_MEMREG; | 4454 | initcode = PPC_ADMA_INIT_MEMREG; |
4456 | ret = -EBUSY; | 4455 | ret = -EBUSY; |
4457 | goto out; | 4456 | goto out; |