diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 11 | ||||
-rw-r--r-- | drivers/dma/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 941 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 2 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 8 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_irq.c | 14 | ||||
-rw-r--r-- | drivers/dma/omap-dma.c | 669 | ||||
-rw-r--r-- | drivers/dma/sa11x0-dma.c | 388 | ||||
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 9 | ||||
-rw-r--r-- | drivers/dma/sh/shdma.c | 12 | ||||
-rw-r--r-- | drivers/dma/virt-dma.c | 123 | ||||
-rw-r--r-- | drivers/dma/virt-dma.h | 152 |
12 files changed, 1656 insertions, 675 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d45cf1bcbde5..d06ea2950dd9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -53,6 +53,7 @@ config AMBA_PL08X | |||
53 | bool "ARM PrimeCell PL080 or PL081 support" | 53 | bool "ARM PrimeCell PL080 or PL081 support" |
54 | depends on ARM_AMBA && EXPERIMENTAL | 54 | depends on ARM_AMBA && EXPERIMENTAL |
55 | select DMA_ENGINE | 55 | select DMA_ENGINE |
56 | select DMA_VIRTUAL_CHANNELS | ||
56 | help | 57 | help |
57 | Platform has a PL08x DMAC device | 58 | Platform has a PL08x DMAC device |
58 | which can provide DMA engine support | 59 | which can provide DMA engine support |
@@ -269,6 +270,7 @@ config DMA_SA11X0 | |||
269 | tristate "SA-11x0 DMA support" | 270 | tristate "SA-11x0 DMA support" |
270 | depends on ARCH_SA1100 | 271 | depends on ARCH_SA1100 |
271 | select DMA_ENGINE | 272 | select DMA_ENGINE |
273 | select DMA_VIRTUAL_CHANNELS | ||
272 | help | 274 | help |
273 | Support the DMA engine found on Intel StrongARM SA-1100 and | 275 | Support the DMA engine found on Intel StrongARM SA-1100 and |
274 | SA-1110 SoCs. This DMA engine can only be used with on-chip | 276 | SA-1110 SoCs. This DMA engine can only be used with on-chip |
@@ -284,9 +286,18 @@ config MMP_TDMA | |||
284 | 286 | ||
285 | Say Y here if you enabled MMP ADMA, otherwise say N. | 287 | Say Y here if you enabled MMP ADMA, otherwise say N. |
286 | 288 | ||
289 | config DMA_OMAP | ||
290 | tristate "OMAP DMA support" | ||
291 | depends on ARCH_OMAP | ||
292 | select DMA_ENGINE | ||
293 | select DMA_VIRTUAL_CHANNELS | ||
294 | |||
287 | config DMA_ENGINE | 295 | config DMA_ENGINE |
288 | bool | 296 | bool |
289 | 297 | ||
298 | config DMA_VIRTUAL_CHANNELS | ||
299 | tristate | ||
300 | |||
290 | comment "DMA Clients" | 301 | comment "DMA Clients" |
291 | depends on DMA_ENGINE | 302 | depends on DMA_ENGINE |
292 | 303 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 640356add0a3..4cf6b128ab9a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG | |||
2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG | 2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG |
3 | 3 | ||
4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | ||
5 | obj-$(CONFIG_NET_DMA) += iovlock.o | 6 | obj-$(CONFIG_NET_DMA) += iovlock.o |
6 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o | 7 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o |
7 | obj-$(CONFIG_DMATEST) += dmatest.o | 8 | obj-$(CONFIG_DMATEST) += dmatest.o |
@@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | |||
30 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | 31 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o |
31 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | 32 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o |
32 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o | 33 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o |
34 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 49ecbbb8932d..6fbeebb9486f 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -86,10 +86,12 @@ | |||
86 | #include <asm/hardware/pl080.h> | 86 | #include <asm/hardware/pl080.h> |
87 | 87 | ||
88 | #include "dmaengine.h" | 88 | #include "dmaengine.h" |
89 | #include "virt-dma.h" | ||
89 | 90 | ||
90 | #define DRIVER_NAME "pl08xdmac" | 91 | #define DRIVER_NAME "pl08xdmac" |
91 | 92 | ||
92 | static struct amba_driver pl08x_amba_driver; | 93 | static struct amba_driver pl08x_amba_driver; |
94 | struct pl08x_driver_data; | ||
93 | 95 | ||
94 | /** | 96 | /** |
95 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives | 97 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives |
@@ -119,6 +121,123 @@ struct pl08x_lli { | |||
119 | }; | 121 | }; |
120 | 122 | ||
121 | /** | 123 | /** |
124 | * struct pl08x_bus_data - information of source or destination | ||
125 | * busses for a transfer | ||
126 | * @addr: current address | ||
127 | * @maxwidth: the maximum width of a transfer on this bus | ||
128 | * @buswidth: the width of this bus in bytes: 1, 2 or 4 | ||
129 | */ | ||
130 | struct pl08x_bus_data { | ||
131 | dma_addr_t addr; | ||
132 | u8 maxwidth; | ||
133 | u8 buswidth; | ||
134 | }; | ||
135 | |||
136 | /** | ||
137 | * struct pl08x_phy_chan - holder for the physical channels | ||
138 | * @id: physical index to this channel | ||
139 | * @lock: a lock to use when altering an instance of this struct | ||
140 | * @serving: the virtual channel currently being served by this physical | ||
141 | * channel | ||
142 | * @locked: channel unavailable for the system, e.g. dedicated to secure | ||
143 | * world | ||
144 | */ | ||
145 | struct pl08x_phy_chan { | ||
146 | unsigned int id; | ||
147 | void __iomem *base; | ||
148 | spinlock_t lock; | ||
149 | struct pl08x_dma_chan *serving; | ||
150 | bool locked; | ||
151 | }; | ||
152 | |||
153 | /** | ||
154 | * struct pl08x_sg - structure containing data per sg | ||
155 | * @src_addr: src address of sg | ||
156 | * @dst_addr: dst address of sg | ||
157 | * @len: transfer len in bytes | ||
158 | * @node: node for txd's dsg_list | ||
159 | */ | ||
160 | struct pl08x_sg { | ||
161 | dma_addr_t src_addr; | ||
162 | dma_addr_t dst_addr; | ||
163 | size_t len; | ||
164 | struct list_head node; | ||
165 | }; | ||
166 | |||
167 | /** | ||
168 | * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor | ||
169 | * @vd: virtual DMA descriptor | ||
170 | * @dsg_list: list of children sg's | ||
171 | * @llis_bus: DMA memory address (physical) start for the LLIs | ||
172 | * @llis_va: virtual memory address start for the LLIs | ||
173 | * @cctl: control reg values for current txd | ||
174 | * @ccfg: config reg values for current txd | ||
175 | * @done: this marks completed descriptors, which should not have their | ||
176 | * mux released. | ||
177 | */ | ||
178 | struct pl08x_txd { | ||
179 | struct virt_dma_desc vd; | ||
180 | struct list_head dsg_list; | ||
181 | dma_addr_t llis_bus; | ||
182 | struct pl08x_lli *llis_va; | ||
183 | /* Default cctl value for LLIs */ | ||
184 | u32 cctl; | ||
185 | /* | ||
186 | * Settings to be put into the physical channel when we | ||
187 | * trigger this txd. Other registers are in llis_va[0]. | ||
188 | */ | ||
189 | u32 ccfg; | ||
190 | bool done; | ||
191 | }; | ||
192 | |||
193 | /** | ||
194 | * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel | ||
195 | * states | ||
196 | * @PL08X_CHAN_IDLE: the channel is idle | ||
197 | * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport | ||
198 | * channel and is running a transfer on it | ||
199 | * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport | ||
200 | * channel, but the transfer is currently paused | ||
201 | * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport | ||
202 | * channel to become available (only pertains to memcpy channels) | ||
203 | */ | ||
204 | enum pl08x_dma_chan_state { | ||
205 | PL08X_CHAN_IDLE, | ||
206 | PL08X_CHAN_RUNNING, | ||
207 | PL08X_CHAN_PAUSED, | ||
208 | PL08X_CHAN_WAITING, | ||
209 | }; | ||
210 | |||
211 | /** | ||
212 | * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel | ||
213 | * @vc: wrappped virtual channel | ||
214 | * @phychan: the physical channel utilized by this channel, if there is one | ||
215 | * @name: name of channel | ||
216 | * @cd: channel platform data | ||
217 | * @runtime_addr: address for RX/TX according to the runtime config | ||
218 | * @at: active transaction on this channel | ||
219 | * @lock: a lock for this channel data | ||
220 | * @host: a pointer to the host (internal use) | ||
221 | * @state: whether the channel is idle, paused, running etc | ||
222 | * @slave: whether this channel is a device (slave) or for memcpy | ||
223 | * @signal: the physical DMA request signal which this channel is using | ||
224 | * @mux_use: count of descriptors using this DMA request signal setting | ||
225 | */ | ||
226 | struct pl08x_dma_chan { | ||
227 | struct virt_dma_chan vc; | ||
228 | struct pl08x_phy_chan *phychan; | ||
229 | const char *name; | ||
230 | const struct pl08x_channel_data *cd; | ||
231 | struct dma_slave_config cfg; | ||
232 | struct pl08x_txd *at; | ||
233 | struct pl08x_driver_data *host; | ||
234 | enum pl08x_dma_chan_state state; | ||
235 | bool slave; | ||
236 | int signal; | ||
237 | unsigned mux_use; | ||
238 | }; | ||
239 | |||
240 | /** | ||
122 | * struct pl08x_driver_data - the local state holder for the PL08x | 241 | * struct pl08x_driver_data - the local state holder for the PL08x |
123 | * @slave: slave engine for this instance | 242 | * @slave: slave engine for this instance |
124 | * @memcpy: memcpy engine for this instance | 243 | * @memcpy: memcpy engine for this instance |
@@ -128,7 +247,6 @@ struct pl08x_lli { | |||
128 | * @pd: platform data passed in from the platform/machine | 247 | * @pd: platform data passed in from the platform/machine |
129 | * @phy_chans: array of data for the physical channels | 248 | * @phy_chans: array of data for the physical channels |
130 | * @pool: a pool for the LLI descriptors | 249 | * @pool: a pool for the LLI descriptors |
131 | * @pool_ctr: counter of LLIs in the pool | ||
132 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI | 250 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI |
133 | * fetches | 251 | * fetches |
134 | * @mem_buses: set to indicate memory transfers on AHB2. | 252 | * @mem_buses: set to indicate memory transfers on AHB2. |
@@ -143,10 +261,8 @@ struct pl08x_driver_data { | |||
143 | struct pl08x_platform_data *pd; | 261 | struct pl08x_platform_data *pd; |
144 | struct pl08x_phy_chan *phy_chans; | 262 | struct pl08x_phy_chan *phy_chans; |
145 | struct dma_pool *pool; | 263 | struct dma_pool *pool; |
146 | int pool_ctr; | ||
147 | u8 lli_buses; | 264 | u8 lli_buses; |
148 | u8 mem_buses; | 265 | u8 mem_buses; |
149 | spinlock_t lock; | ||
150 | }; | 266 | }; |
151 | 267 | ||
152 | /* | 268 | /* |
@@ -162,12 +278,51 @@ struct pl08x_driver_data { | |||
162 | 278 | ||
163 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | 279 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) |
164 | { | 280 | { |
165 | return container_of(chan, struct pl08x_dma_chan, chan); | 281 | return container_of(chan, struct pl08x_dma_chan, vc.chan); |
166 | } | 282 | } |
167 | 283 | ||
168 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) | 284 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) |
169 | { | 285 | { |
170 | return container_of(tx, struct pl08x_txd, tx); | 286 | return container_of(tx, struct pl08x_txd, vd.tx); |
287 | } | ||
288 | |||
289 | /* | ||
290 | * Mux handling. | ||
291 | * | ||
292 | * This gives us the DMA request input to the PL08x primecell which the | ||
293 | * peripheral described by the channel data will be routed to, possibly | ||
294 | * via a board/SoC specific external MUX. One important point to note | ||
295 | * here is that this does not depend on the physical channel. | ||
296 | */ | ||
297 | static int pl08x_request_mux(struct pl08x_dma_chan *plchan) | ||
298 | { | ||
299 | const struct pl08x_platform_data *pd = plchan->host->pd; | ||
300 | int ret; | ||
301 | |||
302 | if (plchan->mux_use++ == 0 && pd->get_signal) { | ||
303 | ret = pd->get_signal(plchan->cd); | ||
304 | if (ret < 0) { | ||
305 | plchan->mux_use = 0; | ||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | plchan->signal = ret; | ||
310 | } | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static void pl08x_release_mux(struct pl08x_dma_chan *plchan) | ||
315 | { | ||
316 | const struct pl08x_platform_data *pd = plchan->host->pd; | ||
317 | |||
318 | if (plchan->signal >= 0) { | ||
319 | WARN_ON(plchan->mux_use == 0); | ||
320 | |||
321 | if (--plchan->mux_use == 0 && pd->put_signal) { | ||
322 | pd->put_signal(plchan->cd, plchan->signal); | ||
323 | plchan->signal = -1; | ||
324 | } | ||
325 | } | ||
171 | } | 326 | } |
172 | 327 | ||
173 | /* | 328 | /* |
@@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |||
189 | * been set when the LLIs were constructed. Poke them into the hardware | 344 | * been set when the LLIs were constructed. Poke them into the hardware |
190 | * and start the transfer. | 345 | * and start the transfer. |
191 | */ | 346 | */ |
192 | static void pl08x_start_txd(struct pl08x_dma_chan *plchan, | 347 | static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) |
193 | struct pl08x_txd *txd) | ||
194 | { | 348 | { |
195 | struct pl08x_driver_data *pl08x = plchan->host; | 349 | struct pl08x_driver_data *pl08x = plchan->host; |
196 | struct pl08x_phy_chan *phychan = plchan->phychan; | 350 | struct pl08x_phy_chan *phychan = plchan->phychan; |
197 | struct pl08x_lli *lli = &txd->llis_va[0]; | 351 | struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); |
352 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | ||
353 | struct pl08x_lli *lli; | ||
198 | u32 val; | 354 | u32 val; |
199 | 355 | ||
356 | list_del(&txd->vd.node); | ||
357 | |||
200 | plchan->at = txd; | 358 | plchan->at = txd; |
201 | 359 | ||
202 | /* Wait for channel inactive */ | 360 | /* Wait for channel inactive */ |
203 | while (pl08x_phy_channel_busy(phychan)) | 361 | while (pl08x_phy_channel_busy(phychan)) |
204 | cpu_relax(); | 362 | cpu_relax(); |
205 | 363 | ||
364 | lli = &txd->llis_va[0]; | ||
365 | |||
206 | dev_vdbg(&pl08x->adev->dev, | 366 | dev_vdbg(&pl08x->adev->dev, |
207 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | 367 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " |
208 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | 368 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", |
@@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |||
311 | { | 471 | { |
312 | struct pl08x_phy_chan *ch; | 472 | struct pl08x_phy_chan *ch; |
313 | struct pl08x_txd *txd; | 473 | struct pl08x_txd *txd; |
314 | unsigned long flags; | ||
315 | size_t bytes = 0; | 474 | size_t bytes = 0; |
316 | 475 | ||
317 | spin_lock_irqsave(&plchan->lock, flags); | ||
318 | ch = plchan->phychan; | 476 | ch = plchan->phychan; |
319 | txd = plchan->at; | 477 | txd = plchan->at; |
320 | 478 | ||
@@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |||
354 | } | 512 | } |
355 | } | 513 | } |
356 | 514 | ||
357 | /* Sum up all queued transactions */ | ||
358 | if (!list_empty(&plchan->pend_list)) { | ||
359 | struct pl08x_txd *txdi; | ||
360 | list_for_each_entry(txdi, &plchan->pend_list, node) { | ||
361 | struct pl08x_sg *dsg; | ||
362 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
363 | bytes += dsg->len; | ||
364 | } | ||
365 | } | ||
366 | |||
367 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
368 | |||
369 | return bytes; | 515 | return bytes; |
370 | } | 516 | } |
371 | 517 | ||
@@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |||
391 | 537 | ||
392 | if (!ch->locked && !ch->serving) { | 538 | if (!ch->locked && !ch->serving) { |
393 | ch->serving = virt_chan; | 539 | ch->serving = virt_chan; |
394 | ch->signal = -1; | ||
395 | spin_unlock_irqrestore(&ch->lock, flags); | 540 | spin_unlock_irqrestore(&ch->lock, flags); |
396 | break; | 541 | break; |
397 | } | 542 | } |
@@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |||
404 | return NULL; | 549 | return NULL; |
405 | } | 550 | } |
406 | 551 | ||
407 | pm_runtime_get_sync(&pl08x->adev->dev); | ||
408 | return ch; | 552 | return ch; |
409 | } | 553 | } |
410 | 554 | ||
555 | /* Mark the physical channel as free. Note, this write is atomic. */ | ||
411 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | 556 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, |
412 | struct pl08x_phy_chan *ch) | 557 | struct pl08x_phy_chan *ch) |
413 | { | 558 | { |
414 | unsigned long flags; | 559 | ch->serving = NULL; |
560 | } | ||
561 | |||
562 | /* | ||
563 | * Try to allocate a physical channel. When successful, assign it to | ||
564 | * this virtual channel, and initiate the next descriptor. The | ||
565 | * virtual channel lock must be held at this point. | ||
566 | */ | ||
567 | static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) | ||
568 | { | ||
569 | struct pl08x_driver_data *pl08x = plchan->host; | ||
570 | struct pl08x_phy_chan *ch; | ||
415 | 571 | ||
416 | spin_lock_irqsave(&ch->lock, flags); | 572 | ch = pl08x_get_phy_channel(pl08x, plchan); |
573 | if (!ch) { | ||
574 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | ||
575 | plchan->state = PL08X_CHAN_WAITING; | ||
576 | return; | ||
577 | } | ||
417 | 578 | ||
418 | /* Stop the channel and clear its interrupts */ | 579 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", |
419 | pl08x_terminate_phy_chan(pl08x, ch); | 580 | ch->id, plchan->name); |
420 | 581 | ||
421 | pm_runtime_put(&pl08x->adev->dev); | 582 | plchan->phychan = ch; |
583 | plchan->state = PL08X_CHAN_RUNNING; | ||
584 | pl08x_start_next_txd(plchan); | ||
585 | } | ||
422 | 586 | ||
423 | /* Mark it as free */ | 587 | static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, |
424 | ch->serving = NULL; | 588 | struct pl08x_dma_chan *plchan) |
425 | spin_unlock_irqrestore(&ch->lock, flags); | 589 | { |
590 | struct pl08x_driver_data *pl08x = plchan->host; | ||
591 | |||
592 | dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", | ||
593 | ch->id, plchan->name); | ||
594 | |||
595 | /* | ||
596 | * We do this without taking the lock; we're really only concerned | ||
597 | * about whether this pointer is NULL or not, and we're guaranteed | ||
598 | * that this will only be called when it _already_ is non-NULL. | ||
599 | */ | ||
600 | ch->serving = plchan; | ||
601 | plchan->phychan = ch; | ||
602 | plchan->state = PL08X_CHAN_RUNNING; | ||
603 | pl08x_start_next_txd(plchan); | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * Free a physical DMA channel, potentially reallocating it to another | ||
608 | * virtual channel if we have any pending. | ||
609 | */ | ||
610 | static void pl08x_phy_free(struct pl08x_dma_chan *plchan) | ||
611 | { | ||
612 | struct pl08x_driver_data *pl08x = plchan->host; | ||
613 | struct pl08x_dma_chan *p, *next; | ||
614 | |||
615 | retry: | ||
616 | next = NULL; | ||
617 | |||
618 | /* Find a waiting virtual channel for the next transfer. */ | ||
619 | list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) | ||
620 | if (p->state == PL08X_CHAN_WAITING) { | ||
621 | next = p; | ||
622 | break; | ||
623 | } | ||
624 | |||
625 | if (!next) { | ||
626 | list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) | ||
627 | if (p->state == PL08X_CHAN_WAITING) { | ||
628 | next = p; | ||
629 | break; | ||
630 | } | ||
631 | } | ||
632 | |||
633 | /* Ensure that the physical channel is stopped */ | ||
634 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); | ||
635 | |||
636 | if (next) { | ||
637 | bool success; | ||
638 | |||
639 | /* | ||
640 | * Eww. We know this isn't going to deadlock | ||
641 | * but lockdep probably doesn't. | ||
642 | */ | ||
643 | spin_lock(&next->vc.lock); | ||
644 | /* Re-check the state now that we have the lock */ | ||
645 | success = next->state == PL08X_CHAN_WAITING; | ||
646 | if (success) | ||
647 | pl08x_phy_reassign_start(plchan->phychan, next); | ||
648 | spin_unlock(&next->vc.lock); | ||
649 | |||
650 | /* If the state changed, try to find another channel */ | ||
651 | if (!success) | ||
652 | goto retry; | ||
653 | } else { | ||
654 | /* No more jobs, so free up the physical channel */ | ||
655 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
656 | } | ||
657 | |||
658 | plchan->phychan = NULL; | ||
659 | plchan->state = PL08X_CHAN_IDLE; | ||
426 | } | 660 | } |
427 | 661 | ||
428 | /* | 662 | /* |
@@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
585 | return 0; | 819 | return 0; |
586 | } | 820 | } |
587 | 821 | ||
588 | pl08x->pool_ctr++; | ||
589 | |||
590 | bd.txd = txd; | 822 | bd.txd = txd; |
591 | bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; | 823 | bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; |
592 | cctl = txd->cctl; | 824 | cctl = txd->cctl; |
@@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
802 | return num_llis; | 1034 | return num_llis; |
803 | } | 1035 | } |
804 | 1036 | ||
805 | /* You should call this with the struct pl08x lock held */ | ||
806 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | 1037 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, |
807 | struct pl08x_txd *txd) | 1038 | struct pl08x_txd *txd) |
808 | { | 1039 | { |
809 | struct pl08x_sg *dsg, *_dsg; | 1040 | struct pl08x_sg *dsg, *_dsg; |
810 | 1041 | ||
811 | /* Free the LLI */ | ||
812 | if (txd->llis_va) | 1042 | if (txd->llis_va) |
813 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); | 1043 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); |
814 | 1044 | ||
815 | pl08x->pool_ctr--; | ||
816 | |||
817 | list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { | 1045 | list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { |
818 | list_del(&dsg->node); | 1046 | list_del(&dsg->node); |
819 | kfree(dsg); | 1047 | kfree(dsg); |
@@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | |||
822 | kfree(txd); | 1050 | kfree(txd); |
823 | } | 1051 | } |
824 | 1052 | ||
825 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | 1053 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) |
826 | struct pl08x_dma_chan *plchan) | ||
827 | { | 1054 | { |
828 | struct pl08x_txd *txdi = NULL; | 1055 | struct device *dev = txd->vd.tx.chan->device->dev; |
829 | struct pl08x_txd *next; | 1056 | struct pl08x_sg *dsg; |
830 | 1057 | ||
831 | if (!list_empty(&plchan->pend_list)) { | 1058 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
832 | list_for_each_entry_safe(txdi, | 1059 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
833 | next, &plchan->pend_list, node) { | 1060 | list_for_each_entry(dsg, &txd->dsg_list, node) |
834 | list_del(&txdi->node); | 1061 | dma_unmap_single(dev, dsg->src_addr, dsg->len, |
835 | pl08x_free_txd(pl08x, txdi); | 1062 | DMA_TO_DEVICE); |
1063 | else { | ||
1064 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1065 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
1066 | DMA_TO_DEVICE); | ||
836 | } | 1067 | } |
837 | } | 1068 | } |
1069 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1070 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1071 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1072 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
1073 | DMA_FROM_DEVICE); | ||
1074 | else | ||
1075 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1076 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
1077 | DMA_FROM_DEVICE); | ||
1078 | } | ||
838 | } | 1079 | } |
839 | 1080 | ||
840 | /* | 1081 | static void pl08x_desc_free(struct virt_dma_desc *vd) |
841 | * The DMA ENGINE API | ||
842 | */ | ||
843 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | ||
844 | { | 1082 | { |
845 | return 0; | 1083 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); |
846 | } | 1084 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); |
847 | 1085 | ||
848 | static void pl08x_free_chan_resources(struct dma_chan *chan) | 1086 | if (!plchan->slave) |
849 | { | 1087 | pl08x_unmap_buffers(txd); |
1088 | |||
1089 | if (!txd->done) | ||
1090 | pl08x_release_mux(plchan); | ||
1091 | |||
1092 | pl08x_free_txd(plchan->host, txd); | ||
850 | } | 1093 | } |
851 | 1094 | ||
852 | /* | 1095 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, |
853 | * This should be called with the channel plchan->lock held | 1096 | struct pl08x_dma_chan *plchan) |
854 | */ | ||
855 | static int prep_phy_channel(struct pl08x_dma_chan *plchan, | ||
856 | struct pl08x_txd *txd) | ||
857 | { | 1097 | { |
858 | struct pl08x_driver_data *pl08x = plchan->host; | 1098 | LIST_HEAD(head); |
859 | struct pl08x_phy_chan *ch; | 1099 | struct pl08x_txd *txd; |
860 | int ret; | ||
861 | |||
862 | /* Check if we already have a channel */ | ||
863 | if (plchan->phychan) { | ||
864 | ch = plchan->phychan; | ||
865 | goto got_channel; | ||
866 | } | ||
867 | 1100 | ||
868 | ch = pl08x_get_phy_channel(pl08x, plchan); | 1101 | vchan_get_all_descriptors(&plchan->vc, &head); |
869 | if (!ch) { | ||
870 | /* No physical channel available, cope with it */ | ||
871 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | ||
872 | return -EBUSY; | ||
873 | } | ||
874 | 1102 | ||
875 | /* | 1103 | while (!list_empty(&head)) { |
876 | * OK we have a physical channel: for memcpy() this is all we | 1104 | txd = list_first_entry(&head, struct pl08x_txd, vd.node); |
877 | * need, but for slaves the physical signals may be muxed! | 1105 | list_del(&txd->vd.node); |
878 | * Can the platform allow us to use this channel? | 1106 | pl08x_desc_free(&txd->vd); |
879 | */ | ||
880 | if (plchan->slave && pl08x->pd->get_signal) { | ||
881 | ret = pl08x->pd->get_signal(plchan); | ||
882 | if (ret < 0) { | ||
883 | dev_dbg(&pl08x->adev->dev, | ||
884 | "unable to use physical channel %d for transfer on %s due to platform restrictions\n", | ||
885 | ch->id, plchan->name); | ||
886 | /* Release physical channel & return */ | ||
887 | pl08x_put_phy_channel(pl08x, ch); | ||
888 | return -EBUSY; | ||
889 | } | ||
890 | ch->signal = ret; | ||
891 | } | 1107 | } |
892 | |||
893 | plchan->phychan = ch; | ||
894 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | ||
895 | ch->id, | ||
896 | ch->signal, | ||
897 | plchan->name); | ||
898 | |||
899 | got_channel: | ||
900 | /* Assign the flow control signal to this channel */ | ||
901 | if (txd->direction == DMA_MEM_TO_DEV) | ||
902 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
903 | else if (txd->direction == DMA_DEV_TO_MEM) | ||
904 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
905 | |||
906 | plchan->phychan_hold++; | ||
907 | |||
908 | return 0; | ||
909 | } | 1108 | } |
910 | 1109 | ||
911 | static void release_phy_channel(struct pl08x_dma_chan *plchan) | 1110 | /* |
1111 | * The DMA ENGINE API | ||
1112 | */ | ||
1113 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | ||
912 | { | 1114 | { |
913 | struct pl08x_driver_data *pl08x = plchan->host; | 1115 | return 0; |
914 | |||
915 | if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { | ||
916 | pl08x->pd->put_signal(plchan); | ||
917 | plchan->phychan->signal = -1; | ||
918 | } | ||
919 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
920 | plchan->phychan = NULL; | ||
921 | } | 1116 | } |
922 | 1117 | ||
923 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | 1118 | static void pl08x_free_chan_resources(struct dma_chan *chan) |
924 | { | 1119 | { |
925 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | 1120 | /* Ensure all queued descriptors are freed */ |
926 | struct pl08x_txd *txd = to_pl08x_txd(tx); | 1121 | vchan_free_chan_resources(to_virt_chan(chan)); |
927 | unsigned long flags; | ||
928 | dma_cookie_t cookie; | ||
929 | |||
930 | spin_lock_irqsave(&plchan->lock, flags); | ||
931 | cookie = dma_cookie_assign(tx); | ||
932 | |||
933 | /* Put this onto the pending list */ | ||
934 | list_add_tail(&txd->node, &plchan->pend_list); | ||
935 | |||
936 | /* | ||
937 | * If there was no physical channel available for this memcpy, | ||
938 | * stack the request up and indicate that the channel is waiting | ||
939 | * for a free physical channel. | ||
940 | */ | ||
941 | if (!plchan->slave && !plchan->phychan) { | ||
942 | /* Do this memcpy whenever there is a channel ready */ | ||
943 | plchan->state = PL08X_CHAN_WAITING; | ||
944 | plchan->waiting = txd; | ||
945 | } else { | ||
946 | plchan->phychan_hold--; | ||
947 | } | ||
948 | |||
949 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
950 | |||
951 | return cookie; | ||
952 | } | 1122 | } |
953 | 1123 | ||
954 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | 1124 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( |
@@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, | |||
968 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 1138 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
969 | { | 1139 | { |
970 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1140 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1141 | struct virt_dma_desc *vd; | ||
1142 | unsigned long flags; | ||
971 | enum dma_status ret; | 1143 | enum dma_status ret; |
1144 | size_t bytes = 0; | ||
972 | 1145 | ||
973 | ret = dma_cookie_status(chan, cookie, txstate); | 1146 | ret = dma_cookie_status(chan, cookie, txstate); |
974 | if (ret == DMA_SUCCESS) | 1147 | if (ret == DMA_SUCCESS) |
975 | return ret; | 1148 | return ret; |
976 | 1149 | ||
977 | /* | 1150 | /* |
1151 | * There's no point calculating the residue if there's | ||
1152 | * no txstate to store the value. | ||
1153 | */ | ||
1154 | if (!txstate) { | ||
1155 | if (plchan->state == PL08X_CHAN_PAUSED) | ||
1156 | ret = DMA_PAUSED; | ||
1157 | return ret; | ||
1158 | } | ||
1159 | |||
1160 | spin_lock_irqsave(&plchan->vc.lock, flags); | ||
1161 | ret = dma_cookie_status(chan, cookie, txstate); | ||
1162 | if (ret != DMA_SUCCESS) { | ||
1163 | vd = vchan_find_desc(&plchan->vc, cookie); | ||
1164 | if (vd) { | ||
1165 | /* On the issued list, so hasn't been processed yet */ | ||
1166 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | ||
1167 | struct pl08x_sg *dsg; | ||
1168 | |||
1169 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1170 | bytes += dsg->len; | ||
1171 | } else { | ||
1172 | bytes = pl08x_getbytes_chan(plchan); | ||
1173 | } | ||
1174 | } | ||
1175 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | ||
1176 | |||
1177 | /* | ||
978 | * This cookie not complete yet | 1178 | * This cookie not complete yet |
979 | * Get number of bytes left in the active transactions and queue | 1179 | * Get number of bytes left in the active transactions and queue |
980 | */ | 1180 | */ |
981 | dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); | 1181 | dma_set_residue(txstate, bytes); |
982 | 1182 | ||
983 | if (plchan->state == PL08X_CHAN_PAUSED) | 1183 | if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) |
984 | return DMA_PAUSED; | 1184 | ret = DMA_PAUSED; |
985 | 1185 | ||
986 | /* Whether waiting or running, we're in progress */ | 1186 | /* Whether waiting or running, we're in progress */ |
987 | return DMA_IN_PROGRESS; | 1187 | return ret; |
988 | } | 1188 | } |
989 | 1189 | ||
990 | /* PrimeCell DMA extension */ | 1190 | /* PrimeCell DMA extension */ |
@@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst) | |||
1080 | return burst_sizes[i].reg; | 1280 | return burst_sizes[i].reg; |
1081 | } | 1281 | } |
1082 | 1282 | ||
1083 | static int dma_set_runtime_config(struct dma_chan *chan, | 1283 | static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, |
1084 | struct dma_slave_config *config) | 1284 | enum dma_slave_buswidth addr_width, u32 maxburst) |
1085 | { | 1285 | { |
1086 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1286 | u32 width, burst, cctl = 0; |
1087 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1088 | enum dma_slave_buswidth addr_width; | ||
1089 | u32 width, burst, maxburst; | ||
1090 | u32 cctl = 0; | ||
1091 | |||
1092 | if (!plchan->slave) | ||
1093 | return -EINVAL; | ||
1094 | |||
1095 | /* Transfer direction */ | ||
1096 | plchan->runtime_direction = config->direction; | ||
1097 | if (config->direction == DMA_MEM_TO_DEV) { | ||
1098 | addr_width = config->dst_addr_width; | ||
1099 | maxburst = config->dst_maxburst; | ||
1100 | } else if (config->direction == DMA_DEV_TO_MEM) { | ||
1101 | addr_width = config->src_addr_width; | ||
1102 | maxburst = config->src_maxburst; | ||
1103 | } else { | ||
1104 | dev_err(&pl08x->adev->dev, | ||
1105 | "bad runtime_config: alien transfer direction\n"); | ||
1106 | return -EINVAL; | ||
1107 | } | ||
1108 | 1287 | ||
1109 | width = pl08x_width(addr_width); | 1288 | width = pl08x_width(addr_width); |
1110 | if (width == ~0) { | 1289 | if (width == ~0) |
1111 | dev_err(&pl08x->adev->dev, | 1290 | return ~0; |
1112 | "bad runtime_config: alien address width\n"); | ||
1113 | return -EINVAL; | ||
1114 | } | ||
1115 | 1291 | ||
1116 | cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; | 1292 | cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; |
1117 | cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; | 1293 | cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; |
@@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1128 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | 1304 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; |
1129 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | 1305 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; |
1130 | 1306 | ||
1131 | plchan->device_fc = config->device_fc; | 1307 | return pl08x_cctl(cctl); |
1308 | } | ||
1132 | 1309 | ||
1133 | if (plchan->runtime_direction == DMA_DEV_TO_MEM) { | 1310 | static int dma_set_runtime_config(struct dma_chan *chan, |
1134 | plchan->src_addr = config->src_addr; | 1311 | struct dma_slave_config *config) |
1135 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | 1312 | { |
1136 | pl08x_select_bus(plchan->cd->periph_buses, | 1313 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1137 | pl08x->mem_buses); | ||
1138 | } else { | ||
1139 | plchan->dst_addr = config->dst_addr; | ||
1140 | plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | | ||
1141 | pl08x_select_bus(pl08x->mem_buses, | ||
1142 | plchan->cd->periph_buses); | ||
1143 | } | ||
1144 | 1314 | ||
1145 | dev_dbg(&pl08x->adev->dev, | 1315 | if (!plchan->slave) |
1146 | "configured channel %s (%s) for %s, data width %d, " | 1316 | return -EINVAL; |
1147 | "maxburst %d words, LE, CCTL=0x%08x\n", | 1317 | |
1148 | dma_chan_name(chan), plchan->name, | 1318 | /* Reject definitely invalid configurations */ |
1149 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", | 1319 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
1150 | addr_width, | 1320 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
1151 | maxburst, | 1321 | return -EINVAL; |
1152 | cctl); | 1322 | |
1323 | plchan->cfg = *config; | ||
1153 | 1324 | ||
1154 | return 0; | 1325 | return 0; |
1155 | } | 1326 | } |
@@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan) | |||
1163 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1334 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1164 | unsigned long flags; | 1335 | unsigned long flags; |
1165 | 1336 | ||
1166 | spin_lock_irqsave(&plchan->lock, flags); | 1337 | spin_lock_irqsave(&plchan->vc.lock, flags); |
1167 | /* Something is already active, or we're waiting for a channel... */ | 1338 | if (vchan_issue_pending(&plchan->vc)) { |
1168 | if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { | 1339 | if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) |
1169 | spin_unlock_irqrestore(&plchan->lock, flags); | 1340 | pl08x_phy_alloc_and_start(plchan); |
1170 | return; | ||
1171 | } | ||
1172 | |||
1173 | /* Take the first element in the queue and execute it */ | ||
1174 | if (!list_empty(&plchan->pend_list)) { | ||
1175 | struct pl08x_txd *next; | ||
1176 | |||
1177 | next = list_first_entry(&plchan->pend_list, | ||
1178 | struct pl08x_txd, | ||
1179 | node); | ||
1180 | list_del(&next->node); | ||
1181 | plchan->state = PL08X_CHAN_RUNNING; | ||
1182 | |||
1183 | pl08x_start_txd(plchan, next); | ||
1184 | } | 1341 | } |
1185 | 1342 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | |
1186 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1187 | } | ||
1188 | |||
1189 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | ||
1190 | struct pl08x_txd *txd) | ||
1191 | { | ||
1192 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1193 | unsigned long flags; | ||
1194 | int num_llis, ret; | ||
1195 | |||
1196 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | ||
1197 | if (!num_llis) { | ||
1198 | spin_lock_irqsave(&plchan->lock, flags); | ||
1199 | pl08x_free_txd(pl08x, txd); | ||
1200 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1201 | return -EINVAL; | ||
1202 | } | ||
1203 | |||
1204 | spin_lock_irqsave(&plchan->lock, flags); | ||
1205 | |||
1206 | /* | ||
1207 | * See if we already have a physical channel allocated, | ||
1208 | * else this is the time to try to get one. | ||
1209 | */ | ||
1210 | ret = prep_phy_channel(plchan, txd); | ||
1211 | if (ret) { | ||
1212 | /* | ||
1213 | * No physical channel was available. | ||
1214 | * | ||
1215 | * memcpy transfers can be sorted out at submission time. | ||
1216 | * | ||
1217 | * Slave transfers may have been denied due to platform | ||
1218 | * channel muxing restrictions. Since there is no guarantee | ||
1219 | * that this will ever be resolved, and the signal must be | ||
1220 | * acquired AFTER acquiring the physical channel, we will let | ||
1221 | * them be NACK:ed with -EBUSY here. The drivers can retry | ||
1222 | * the prep() call if they are eager on doing this using DMA. | ||
1223 | */ | ||
1224 | if (plchan->slave) { | ||
1225 | pl08x_free_txd_list(pl08x, plchan); | ||
1226 | pl08x_free_txd(pl08x, txd); | ||
1227 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1228 | return -EBUSY; | ||
1229 | } | ||
1230 | } else | ||
1231 | /* | ||
1232 | * Else we're all set, paused and ready to roll, status | ||
1233 | * will switch to PL08X_CHAN_RUNNING when we call | ||
1234 | * issue_pending(). If there is something running on the | ||
1235 | * channel already we don't change its state. | ||
1236 | */ | ||
1237 | if (plchan->state == PL08X_CHAN_IDLE) | ||
1238 | plchan->state = PL08X_CHAN_PAUSED; | ||
1239 | |||
1240 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1241 | |||
1242 | return 0; | ||
1243 | } | 1343 | } |
1244 | 1344 | ||
1245 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | 1345 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) |
1246 | unsigned long flags) | ||
1247 | { | 1346 | { |
1248 | struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | 1347 | struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); |
1249 | 1348 | ||
1250 | if (txd) { | 1349 | if (txd) { |
1251 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); | ||
1252 | txd->tx.flags = flags; | ||
1253 | txd->tx.tx_submit = pl08x_tx_submit; | ||
1254 | INIT_LIST_HEAD(&txd->node); | ||
1255 | INIT_LIST_HEAD(&txd->dsg_list); | 1350 | INIT_LIST_HEAD(&txd->dsg_list); |
1256 | 1351 | ||
1257 | /* Always enable error and terminal interrupts */ | 1352 | /* Always enable error and terminal interrupts */ |
@@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1274 | struct pl08x_sg *dsg; | 1369 | struct pl08x_sg *dsg; |
1275 | int ret; | 1370 | int ret; |
1276 | 1371 | ||
1277 | txd = pl08x_get_txd(plchan, flags); | 1372 | txd = pl08x_get_txd(plchan); |
1278 | if (!txd) { | 1373 | if (!txd) { |
1279 | dev_err(&pl08x->adev->dev, | 1374 | dev_err(&pl08x->adev->dev, |
1280 | "%s no memory for descriptor\n", __func__); | 1375 | "%s no memory for descriptor\n", __func__); |
@@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1290 | } | 1385 | } |
1291 | list_add_tail(&dsg->node, &txd->dsg_list); | 1386 | list_add_tail(&dsg->node, &txd->dsg_list); |
1292 | 1387 | ||
1293 | txd->direction = DMA_NONE; | ||
1294 | dsg->src_addr = src; | 1388 | dsg->src_addr = src; |
1295 | dsg->dst_addr = dest; | 1389 | dsg->dst_addr = dest; |
1296 | dsg->len = len; | 1390 | dsg->len = len; |
1297 | 1391 | ||
1298 | /* Set platform data for m2m */ | 1392 | /* Set platform data for m2m */ |
1299 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1393 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1300 | txd->cctl = pl08x->pd->memcpy_channel.cctl & | 1394 | txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & |
1301 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | 1395 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); |
1302 | 1396 | ||
1303 | /* Both to be incremented or the code will break */ | 1397 | /* Both to be incremented or the code will break */ |
@@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1307 | txd->cctl |= pl08x_select_bus(pl08x->mem_buses, | 1401 | txd->cctl |= pl08x_select_bus(pl08x->mem_buses, |
1308 | pl08x->mem_buses); | 1402 | pl08x->mem_buses); |
1309 | 1403 | ||
1310 | ret = pl08x_prep_channel_resources(plchan, txd); | 1404 | ret = pl08x_fill_llis_for_desc(plchan->host, txd); |
1311 | if (ret) | 1405 | if (!ret) { |
1406 | pl08x_free_txd(pl08x, txd); | ||
1312 | return NULL; | 1407 | return NULL; |
1408 | } | ||
1313 | 1409 | ||
1314 | return &txd->tx; | 1410 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); |
1315 | } | 1411 | } |
1316 | 1412 | ||
1317 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1413 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
@@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1324 | struct pl08x_txd *txd; | 1420 | struct pl08x_txd *txd; |
1325 | struct pl08x_sg *dsg; | 1421 | struct pl08x_sg *dsg; |
1326 | struct scatterlist *sg; | 1422 | struct scatterlist *sg; |
1423 | enum dma_slave_buswidth addr_width; | ||
1327 | dma_addr_t slave_addr; | 1424 | dma_addr_t slave_addr; |
1328 | int ret, tmp; | 1425 | int ret, tmp; |
1426 | u8 src_buses, dst_buses; | ||
1427 | u32 maxburst, cctl; | ||
1329 | 1428 | ||
1330 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | 1429 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", |
1331 | __func__, sg_dma_len(sgl), plchan->name); | 1430 | __func__, sg_dma_len(sgl), plchan->name); |
1332 | 1431 | ||
1333 | txd = pl08x_get_txd(plchan, flags); | 1432 | txd = pl08x_get_txd(plchan); |
1334 | if (!txd) { | 1433 | if (!txd) { |
1335 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | 1434 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); |
1336 | return NULL; | 1435 | return NULL; |
1337 | } | 1436 | } |
1338 | 1437 | ||
1339 | if (direction != plchan->runtime_direction) | ||
1340 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | ||
1341 | "the direction configured for the PrimeCell\n", | ||
1342 | __func__); | ||
1343 | |||
1344 | /* | 1438 | /* |
1345 | * Set up addresses, the PrimeCell configured address | 1439 | * Set up addresses, the PrimeCell configured address |
1346 | * will take precedence since this may configure the | 1440 | * will take precedence since this may configure the |
1347 | * channel target address dynamically at runtime. | 1441 | * channel target address dynamically at runtime. |
1348 | */ | 1442 | */ |
1349 | txd->direction = direction; | ||
1350 | |||
1351 | if (direction == DMA_MEM_TO_DEV) { | 1443 | if (direction == DMA_MEM_TO_DEV) { |
1352 | txd->cctl = plchan->dst_cctl; | 1444 | cctl = PL080_CONTROL_SRC_INCR; |
1353 | slave_addr = plchan->dst_addr; | 1445 | slave_addr = plchan->cfg.dst_addr; |
1446 | addr_width = plchan->cfg.dst_addr_width; | ||
1447 | maxburst = plchan->cfg.dst_maxburst; | ||
1448 | src_buses = pl08x->mem_buses; | ||
1449 | dst_buses = plchan->cd->periph_buses; | ||
1354 | } else if (direction == DMA_DEV_TO_MEM) { | 1450 | } else if (direction == DMA_DEV_TO_MEM) { |
1355 | txd->cctl = plchan->src_cctl; | 1451 | cctl = PL080_CONTROL_DST_INCR; |
1356 | slave_addr = plchan->src_addr; | 1452 | slave_addr = plchan->cfg.src_addr; |
1453 | addr_width = plchan->cfg.src_addr_width; | ||
1454 | maxburst = plchan->cfg.src_maxburst; | ||
1455 | src_buses = plchan->cd->periph_buses; | ||
1456 | dst_buses = pl08x->mem_buses; | ||
1357 | } else { | 1457 | } else { |
1358 | pl08x_free_txd(pl08x, txd); | 1458 | pl08x_free_txd(pl08x, txd); |
1359 | dev_err(&pl08x->adev->dev, | 1459 | dev_err(&pl08x->adev->dev, |
@@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1361 | return NULL; | 1461 | return NULL; |
1362 | } | 1462 | } |
1363 | 1463 | ||
1364 | if (plchan->device_fc) | 1464 | cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); |
1465 | if (cctl == ~0) { | ||
1466 | pl08x_free_txd(pl08x, txd); | ||
1467 | dev_err(&pl08x->adev->dev, | ||
1468 | "DMA slave configuration botched?\n"); | ||
1469 | return NULL; | ||
1470 | } | ||
1471 | |||
1472 | txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); | ||
1473 | |||
1474 | if (plchan->cfg.device_fc) | ||
1365 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : | 1475 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : |
1366 | PL080_FLOW_PER2MEM_PER; | 1476 | PL080_FLOW_PER2MEM_PER; |
1367 | else | 1477 | else |
@@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1370 | 1480 | ||
1371 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1481 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1372 | 1482 | ||
1483 | ret = pl08x_request_mux(plchan); | ||
1484 | if (ret < 0) { | ||
1485 | pl08x_free_txd(pl08x, txd); | ||
1486 | dev_dbg(&pl08x->adev->dev, | ||
1487 | "unable to mux for transfer on %s due to platform restrictions\n", | ||
1488 | plchan->name); | ||
1489 | return NULL; | ||
1490 | } | ||
1491 | |||
1492 | dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", | ||
1493 | plchan->signal, plchan->name); | ||
1494 | |||
1495 | /* Assign the flow control signal to this channel */ | ||
1496 | if (direction == DMA_MEM_TO_DEV) | ||
1497 | txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
1498 | else | ||
1499 | txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
1500 | |||
1373 | for_each_sg(sgl, sg, sg_len, tmp) { | 1501 | for_each_sg(sgl, sg, sg_len, tmp) { |
1374 | dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); | 1502 | dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); |
1375 | if (!dsg) { | 1503 | if (!dsg) { |
1504 | pl08x_release_mux(plchan); | ||
1376 | pl08x_free_txd(pl08x, txd); | 1505 | pl08x_free_txd(pl08x, txd); |
1377 | dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", | 1506 | dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", |
1378 | __func__); | 1507 | __func__); |
@@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1390 | } | 1519 | } |
1391 | } | 1520 | } |
1392 | 1521 | ||
1393 | ret = pl08x_prep_channel_resources(plchan, txd); | 1522 | ret = pl08x_fill_llis_for_desc(plchan->host, txd); |
1394 | if (ret) | 1523 | if (!ret) { |
1524 | pl08x_release_mux(plchan); | ||
1525 | pl08x_free_txd(pl08x, txd); | ||
1395 | return NULL; | 1526 | return NULL; |
1527 | } | ||
1396 | 1528 | ||
1397 | return &txd->tx; | 1529 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); |
1398 | } | 1530 | } |
1399 | 1531 | ||
1400 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1532 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1415 | * Anything succeeds on channels with no physical allocation and | 1547 | * Anything succeeds on channels with no physical allocation and |
1416 | * no queued transfers. | 1548 | * no queued transfers. |
1417 | */ | 1549 | */ |
1418 | spin_lock_irqsave(&plchan->lock, flags); | 1550 | spin_lock_irqsave(&plchan->vc.lock, flags); |
1419 | if (!plchan->phychan && !plchan->at) { | 1551 | if (!plchan->phychan && !plchan->at) { |
1420 | spin_unlock_irqrestore(&plchan->lock, flags); | 1552 | spin_unlock_irqrestore(&plchan->vc.lock, flags); |
1421 | return 0; | 1553 | return 0; |
1422 | } | 1554 | } |
1423 | 1555 | ||
@@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1426 | plchan->state = PL08X_CHAN_IDLE; | 1558 | plchan->state = PL08X_CHAN_IDLE; |
1427 | 1559 | ||
1428 | if (plchan->phychan) { | 1560 | if (plchan->phychan) { |
1429 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); | ||
1430 | |||
1431 | /* | 1561 | /* |
1432 | * Mark physical channel as free and free any slave | 1562 | * Mark physical channel as free and free any slave |
1433 | * signal | 1563 | * signal |
1434 | */ | 1564 | */ |
1435 | release_phy_channel(plchan); | 1565 | pl08x_phy_free(plchan); |
1436 | plchan->phychan_hold = 0; | ||
1437 | } | 1566 | } |
1438 | /* Dequeue jobs and free LLIs */ | 1567 | /* Dequeue jobs and free LLIs */ |
1439 | if (plchan->at) { | 1568 | if (plchan->at) { |
1440 | pl08x_free_txd(pl08x, plchan->at); | 1569 | pl08x_desc_free(&plchan->at->vd); |
1441 | plchan->at = NULL; | 1570 | plchan->at = NULL; |
1442 | } | 1571 | } |
1443 | /* Dequeue jobs not yet fired as well */ | 1572 | /* Dequeue jobs not yet fired as well */ |
@@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1457 | break; | 1586 | break; |
1458 | } | 1587 | } |
1459 | 1588 | ||
1460 | spin_unlock_irqrestore(&plchan->lock, flags); | 1589 | spin_unlock_irqrestore(&plchan->vc.lock, flags); |
1461 | 1590 | ||
1462 | return ret; | 1591 | return ret; |
1463 | } | 1592 | } |
@@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | |||
1494 | writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); | 1623 | writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); |
1495 | } | 1624 | } |
1496 | 1625 | ||
1497 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
1498 | { | ||
1499 | struct device *dev = txd->tx.chan->device->dev; | ||
1500 | struct pl08x_sg *dsg; | ||
1501 | |||
1502 | if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1503 | if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1504 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1505 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
1506 | DMA_TO_DEVICE); | ||
1507 | else { | ||
1508 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1509 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
1510 | DMA_TO_DEVICE); | ||
1511 | } | ||
1512 | } | ||
1513 | if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1514 | if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1515 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1516 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
1517 | DMA_FROM_DEVICE); | ||
1518 | else | ||
1519 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1520 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
1521 | DMA_FROM_DEVICE); | ||
1522 | } | ||
1523 | } | ||
1524 | |||
1525 | static void pl08x_tasklet(unsigned long data) | ||
1526 | { | ||
1527 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | ||
1528 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1529 | struct pl08x_txd *txd; | ||
1530 | unsigned long flags; | ||
1531 | |||
1532 | spin_lock_irqsave(&plchan->lock, flags); | ||
1533 | |||
1534 | txd = plchan->at; | ||
1535 | plchan->at = NULL; | ||
1536 | |||
1537 | if (txd) { | ||
1538 | /* Update last completed */ | ||
1539 | dma_cookie_complete(&txd->tx); | ||
1540 | } | ||
1541 | |||
1542 | /* If a new descriptor is queued, set it up plchan->at is NULL here */ | ||
1543 | if (!list_empty(&plchan->pend_list)) { | ||
1544 | struct pl08x_txd *next; | ||
1545 | |||
1546 | next = list_first_entry(&plchan->pend_list, | ||
1547 | struct pl08x_txd, | ||
1548 | node); | ||
1549 | list_del(&next->node); | ||
1550 | |||
1551 | pl08x_start_txd(plchan, next); | ||
1552 | } else if (plchan->phychan_hold) { | ||
1553 | /* | ||
1554 | * This channel is still in use - we have a new txd being | ||
1555 | * prepared and will soon be queued. Don't give up the | ||
1556 | * physical channel. | ||
1557 | */ | ||
1558 | } else { | ||
1559 | struct pl08x_dma_chan *waiting = NULL; | ||
1560 | |||
1561 | /* | ||
1562 | * No more jobs, so free up the physical channel | ||
1563 | * Free any allocated signal on slave transfers too | ||
1564 | */ | ||
1565 | release_phy_channel(plchan); | ||
1566 | plchan->state = PL08X_CHAN_IDLE; | ||
1567 | |||
1568 | /* | ||
1569 | * And NOW before anyone else can grab that free:d up | ||
1570 | * physical channel, see if there is some memcpy pending | ||
1571 | * that seriously needs to start because of being stacked | ||
1572 | * up while we were choking the physical channels with data. | ||
1573 | */ | ||
1574 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | ||
1575 | chan.device_node) { | ||
1576 | if (waiting->state == PL08X_CHAN_WAITING && | ||
1577 | waiting->waiting != NULL) { | ||
1578 | int ret; | ||
1579 | |||
1580 | /* This should REALLY not fail now */ | ||
1581 | ret = prep_phy_channel(waiting, | ||
1582 | waiting->waiting); | ||
1583 | BUG_ON(ret); | ||
1584 | waiting->phychan_hold--; | ||
1585 | waiting->state = PL08X_CHAN_RUNNING; | ||
1586 | waiting->waiting = NULL; | ||
1587 | pl08x_issue_pending(&waiting->chan); | ||
1588 | break; | ||
1589 | } | ||
1590 | } | ||
1591 | } | ||
1592 | |||
1593 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1594 | |||
1595 | if (txd) { | ||
1596 | dma_async_tx_callback callback = txd->tx.callback; | ||
1597 | void *callback_param = txd->tx.callback_param; | ||
1598 | |||
1599 | /* Don't try to unmap buffers on slave channels */ | ||
1600 | if (!plchan->slave) | ||
1601 | pl08x_unmap_buffers(txd); | ||
1602 | |||
1603 | /* Free the descriptor */ | ||
1604 | spin_lock_irqsave(&plchan->lock, flags); | ||
1605 | pl08x_free_txd(pl08x, txd); | ||
1606 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1607 | |||
1608 | /* Callback to signal completion */ | ||
1609 | if (callback) | ||
1610 | callback(callback_param); | ||
1611 | } | ||
1612 | } | ||
1613 | |||
1614 | static irqreturn_t pl08x_irq(int irq, void *dev) | 1626 | static irqreturn_t pl08x_irq(int irq, void *dev) |
1615 | { | 1627 | { |
1616 | struct pl08x_driver_data *pl08x = dev; | 1628 | struct pl08x_driver_data *pl08x = dev; |
@@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1635 | /* Locate physical channel */ | 1647 | /* Locate physical channel */ |
1636 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | 1648 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; |
1637 | struct pl08x_dma_chan *plchan = phychan->serving; | 1649 | struct pl08x_dma_chan *plchan = phychan->serving; |
1650 | struct pl08x_txd *tx; | ||
1638 | 1651 | ||
1639 | if (!plchan) { | 1652 | if (!plchan) { |
1640 | dev_err(&pl08x->adev->dev, | 1653 | dev_err(&pl08x->adev->dev, |
@@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1643 | continue; | 1656 | continue; |
1644 | } | 1657 | } |
1645 | 1658 | ||
1646 | /* Schedule tasklet on this channel */ | 1659 | spin_lock(&plchan->vc.lock); |
1647 | tasklet_schedule(&plchan->tasklet); | 1660 | tx = plchan->at; |
1661 | if (tx) { | ||
1662 | plchan->at = NULL; | ||
1663 | /* | ||
1664 | * This descriptor is done, release its mux | ||
1665 | * reservation. | ||
1666 | */ | ||
1667 | pl08x_release_mux(plchan); | ||
1668 | tx->done = true; | ||
1669 | vchan_cookie_complete(&tx->vd); | ||
1670 | |||
1671 | /* | ||
1672 | * And start the next descriptor (if any), | ||
1673 | * otherwise free this channel. | ||
1674 | */ | ||
1675 | if (vchan_next_desc(&plchan->vc)) | ||
1676 | pl08x_start_next_txd(plchan); | ||
1677 | else | ||
1678 | pl08x_phy_free(plchan); | ||
1679 | } | ||
1680 | spin_unlock(&plchan->vc.lock); | ||
1681 | |||
1648 | mask |= (1 << i); | 1682 | mask |= (1 << i); |
1649 | } | 1683 | } |
1650 | } | 1684 | } |
@@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1654 | 1688 | ||
1655 | static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) | 1689 | static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) |
1656 | { | 1690 | { |
1657 | u32 cctl = pl08x_cctl(chan->cd->cctl); | ||
1658 | |||
1659 | chan->slave = true; | 1691 | chan->slave = true; |
1660 | chan->name = chan->cd->bus_id; | 1692 | chan->name = chan->cd->bus_id; |
1661 | chan->src_addr = chan->cd->addr; | 1693 | chan->cfg.src_addr = chan->cd->addr; |
1662 | chan->dst_addr = chan->cd->addr; | 1694 | chan->cfg.dst_addr = chan->cd->addr; |
1663 | chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | | ||
1664 | pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); | ||
1665 | chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | | ||
1666 | pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); | ||
1667 | } | 1695 | } |
1668 | 1696 | ||
1669 | /* | 1697 | /* |
@@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1693 | 1721 | ||
1694 | chan->host = pl08x; | 1722 | chan->host = pl08x; |
1695 | chan->state = PL08X_CHAN_IDLE; | 1723 | chan->state = PL08X_CHAN_IDLE; |
1724 | chan->signal = -1; | ||
1696 | 1725 | ||
1697 | if (slave) { | 1726 | if (slave) { |
1698 | chan->cd = &pl08x->pd->slave_channels[i]; | 1727 | chan->cd = &pl08x->pd->slave_channels[i]; |
@@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1705 | return -ENOMEM; | 1734 | return -ENOMEM; |
1706 | } | 1735 | } |
1707 | } | 1736 | } |
1708 | if (chan->cd->circular_buffer) { | ||
1709 | dev_err(&pl08x->adev->dev, | ||
1710 | "channel %s: circular buffers not supported\n", | ||
1711 | chan->name); | ||
1712 | kfree(chan); | ||
1713 | continue; | ||
1714 | } | ||
1715 | dev_dbg(&pl08x->adev->dev, | 1737 | dev_dbg(&pl08x->adev->dev, |
1716 | "initialize virtual channel \"%s\"\n", | 1738 | "initialize virtual channel \"%s\"\n", |
1717 | chan->name); | 1739 | chan->name); |
1718 | 1740 | ||
1719 | chan->chan.device = dmadev; | 1741 | chan->vc.desc_free = pl08x_desc_free; |
1720 | dma_cookie_init(&chan->chan); | 1742 | vchan_init(&chan->vc, dmadev); |
1721 | |||
1722 | spin_lock_init(&chan->lock); | ||
1723 | INIT_LIST_HEAD(&chan->pend_list); | ||
1724 | tasklet_init(&chan->tasklet, pl08x_tasklet, | ||
1725 | (unsigned long) chan); | ||
1726 | |||
1727 | list_add_tail(&chan->chan.device_node, &dmadev->channels); | ||
1728 | } | 1743 | } |
1729 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", | 1744 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", |
1730 | i, slave ? "slave" : "memcpy"); | 1745 | i, slave ? "slave" : "memcpy"); |
@@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) | |||
1737 | struct pl08x_dma_chan *next; | 1752 | struct pl08x_dma_chan *next; |
1738 | 1753 | ||
1739 | list_for_each_entry_safe(chan, | 1754 | list_for_each_entry_safe(chan, |
1740 | next, &dmadev->channels, chan.device_node) { | 1755 | next, &dmadev->channels, vc.chan.device_node) { |
1741 | list_del(&chan->chan.device_node); | 1756 | list_del(&chan->vc.chan.device_node); |
1742 | kfree(chan); | 1757 | kfree(chan); |
1743 | } | 1758 | } |
1744 | } | 1759 | } |
@@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1791 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); | 1806 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); |
1792 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1807 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1793 | seq_printf(s, "--------\t------\n"); | 1808 | seq_printf(s, "--------\t------\n"); |
1794 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | 1809 | list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { |
1795 | seq_printf(s, "%s\t\t%s\n", chan->name, | 1810 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1796 | pl08x_state_str(chan->state)); | 1811 | pl08x_state_str(chan->state)); |
1797 | } | 1812 | } |
@@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1799 | seq_printf(s, "\nPL08x virtual slave channels:\n"); | 1814 | seq_printf(s, "\nPL08x virtual slave channels:\n"); |
1800 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1815 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1801 | seq_printf(s, "--------\t------\n"); | 1816 | seq_printf(s, "--------\t------\n"); |
1802 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | 1817 | list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { |
1803 | seq_printf(s, "%s\t\t%s\n", chan->name, | 1818 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1804 | pl08x_state_str(chan->state)); | 1819 | pl08x_state_str(chan->state)); |
1805 | } | 1820 | } |
@@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1851 | goto out_no_pl08x; | 1866 | goto out_no_pl08x; |
1852 | } | 1867 | } |
1853 | 1868 | ||
1854 | pm_runtime_set_active(&adev->dev); | ||
1855 | pm_runtime_enable(&adev->dev); | ||
1856 | |||
1857 | /* Initialize memcpy engine */ | 1869 | /* Initialize memcpy engine */ |
1858 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | 1870 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); |
1859 | pl08x->memcpy.dev = &adev->dev; | 1871 | pl08x->memcpy.dev = &adev->dev; |
@@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1903 | goto out_no_lli_pool; | 1915 | goto out_no_lli_pool; |
1904 | } | 1916 | } |
1905 | 1917 | ||
1906 | spin_lock_init(&pl08x->lock); | ||
1907 | |||
1908 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); | 1918 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); |
1909 | if (!pl08x->base) { | 1919 | if (!pl08x->base) { |
1910 | ret = -ENOMEM; | 1920 | ret = -ENOMEM; |
@@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
1942 | ch->id = i; | 1952 | ch->id = i; |
1943 | ch->base = pl08x->base + PL080_Cx_BASE(i); | 1953 | ch->base = pl08x->base + PL080_Cx_BASE(i); |
1944 | spin_lock_init(&ch->lock); | 1954 | spin_lock_init(&ch->lock); |
1945 | ch->signal = -1; | ||
1946 | 1955 | ||
1947 | /* | 1956 | /* |
1948 | * Nomadik variants can have channels that are locked | 1957 | * Nomadik variants can have channels that are locked |
@@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2007 | amba_part(adev), amba_rev(adev), | 2016 | amba_part(adev), amba_rev(adev), |
2008 | (unsigned long long)adev->res.start, adev->irq[0]); | 2017 | (unsigned long long)adev->res.start, adev->irq[0]); |
2009 | 2018 | ||
2010 | pm_runtime_put(&adev->dev); | ||
2011 | return 0; | 2019 | return 0; |
2012 | 2020 | ||
2013 | out_no_slave_reg: | 2021 | out_no_slave_reg: |
@@ -2026,9 +2034,6 @@ out_no_ioremap: | |||
2026 | dma_pool_destroy(pl08x->pool); | 2034 | dma_pool_destroy(pl08x->pool); |
2027 | out_no_lli_pool: | 2035 | out_no_lli_pool: |
2028 | out_no_platdata: | 2036 | out_no_platdata: |
2029 | pm_runtime_put(&adev->dev); | ||
2030 | pm_runtime_disable(&adev->dev); | ||
2031 | |||
2032 | kfree(pl08x); | 2037 | kfree(pl08x); |
2033 | out_no_pl08x: | 2038 | out_no_pl08x: |
2034 | amba_release_regions(adev); | 2039 | amba_release_regions(adev); |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index ed2c9499d3ea..4f4ff1337cac 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -1575,4 +1575,4 @@ module_exit(dw_exit); | |||
1575 | MODULE_LICENSE("GPL v2"); | 1575 | MODULE_LICENSE("GPL v2"); |
1576 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 1576 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); |
1577 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 1577 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1578 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 1578 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 5ec72044ea4c..c7573e50aa14 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1663,7 +1663,6 @@ static void __exit ipu_idmac_exit(struct ipu *ipu) | |||
1663 | 1663 | ||
1664 | static int __init ipu_probe(struct platform_device *pdev) | 1664 | static int __init ipu_probe(struct platform_device *pdev) |
1665 | { | 1665 | { |
1666 | struct ipu_platform_data *pdata = pdev->dev.platform_data; | ||
1667 | struct resource *mem_ipu, *mem_ic; | 1666 | struct resource *mem_ipu, *mem_ic; |
1668 | int ret; | 1667 | int ret; |
1669 | 1668 | ||
@@ -1671,7 +1670,7 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1671 | 1670 | ||
1672 | mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1671 | mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1673 | mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 1672 | mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1674 | if (!pdata || !mem_ipu || !mem_ic) | 1673 | if (!mem_ipu || !mem_ic) |
1675 | return -EINVAL; | 1674 | return -EINVAL; |
1676 | 1675 | ||
1677 | ipu_data.dev = &pdev->dev; | 1676 | ipu_data.dev = &pdev->dev; |
@@ -1688,10 +1687,9 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1688 | goto err_noirq; | 1687 | goto err_noirq; |
1689 | 1688 | ||
1690 | ipu_data.irq_err = ret; | 1689 | ipu_data.irq_err = ret; |
1691 | ipu_data.irq_base = pdata->irq_base; | ||
1692 | 1690 | ||
1693 | dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n", | 1691 | dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n", |
1694 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); | 1692 | ipu_data.irq_fn, ipu_data.irq_err); |
1695 | 1693 | ||
1696 | /* Remap IPU common registers */ | 1694 | /* Remap IPU common registers */ |
1697 | ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); | 1695 | ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index a71f55e72be9..fa95bcc3de1f 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/module.h> | ||
17 | 18 | ||
18 | #include <mach/ipu.h> | 19 | #include <mach/ipu.h> |
19 | 20 | ||
@@ -354,10 +355,12 @@ static struct irq_chip ipu_irq_chip = { | |||
354 | /* Install the IRQ handler */ | 355 | /* Install the IRQ handler */ |
355 | int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | 356 | int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) |
356 | { | 357 | { |
357 | struct ipu_platform_data *pdata = dev->dev.platform_data; | 358 | unsigned int irq, i; |
358 | unsigned int irq, irq_base, i; | 359 | int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS, |
360 | numa_node_id()); | ||
359 | 361 | ||
360 | irq_base = pdata->irq_base; | 362 | if (irq_base < 0) |
363 | return irq_base; | ||
361 | 364 | ||
362 | for (i = 0; i < IPU_IRQ_NR_BANKS; i++) | 365 | for (i = 0; i < IPU_IRQ_NR_BANKS; i++) |
363 | irq_bank[i].ipu = ipu; | 366 | irq_bank[i].ipu = ipu; |
@@ -387,15 +390,16 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | |||
387 | irq_set_handler_data(ipu->irq_err, ipu); | 390 | irq_set_handler_data(ipu->irq_err, ipu); |
388 | irq_set_chained_handler(ipu->irq_err, ipu_irq_err); | 391 | irq_set_chained_handler(ipu->irq_err, ipu_irq_err); |
389 | 392 | ||
393 | ipu->irq_base = irq_base; | ||
394 | |||
390 | return 0; | 395 | return 0; |
391 | } | 396 | } |
392 | 397 | ||
393 | void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) | 398 | void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) |
394 | { | 399 | { |
395 | struct ipu_platform_data *pdata = dev->dev.platform_data; | ||
396 | unsigned int irq, irq_base; | 400 | unsigned int irq, irq_base; |
397 | 401 | ||
398 | irq_base = pdata->irq_base; | 402 | irq_base = ipu->irq_base; |
399 | 403 | ||
400 | irq_set_chained_handler(ipu->irq_fn, NULL); | 404 | irq_set_chained_handler(ipu->irq_fn, NULL); |
401 | irq_set_handler_data(ipu->irq_fn, NULL); | 405 | irq_set_handler_data(ipu->irq_fn, NULL); |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c new file mode 100644 index 000000000000..ae0561826137 --- /dev/null +++ b/drivers/dma/omap-dma.c | |||
@@ -0,0 +1,669 @@ | |||
1 | /* | ||
2 | * OMAP DMAengine support | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/dmaengine.h> | ||
9 | #include <linux/dma-mapping.h> | ||
10 | #include <linux/err.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/list.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/omap-dma.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | |||
20 | #include "virt-dma.h" | ||
21 | #include <plat/dma.h> | ||
22 | |||
23 | struct omap_dmadev { | ||
24 | struct dma_device ddev; | ||
25 | spinlock_t lock; | ||
26 | struct tasklet_struct task; | ||
27 | struct list_head pending; | ||
28 | }; | ||
29 | |||
30 | struct omap_chan { | ||
31 | struct virt_dma_chan vc; | ||
32 | struct list_head node; | ||
33 | |||
34 | struct dma_slave_config cfg; | ||
35 | unsigned dma_sig; | ||
36 | bool cyclic; | ||
37 | |||
38 | int dma_ch; | ||
39 | struct omap_desc *desc; | ||
40 | unsigned sgidx; | ||
41 | }; | ||
42 | |||
43 | struct omap_sg { | ||
44 | dma_addr_t addr; | ||
45 | uint32_t en; /* number of elements (24-bit) */ | ||
46 | uint32_t fn; /* number of frames (16-bit) */ | ||
47 | }; | ||
48 | |||
49 | struct omap_desc { | ||
50 | struct virt_dma_desc vd; | ||
51 | enum dma_transfer_direction dir; | ||
52 | dma_addr_t dev_addr; | ||
53 | |||
54 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ | ||
55 | uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ | ||
56 | uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ | ||
57 | uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ | ||
58 | uint8_t periph_port; /* Peripheral port */ | ||
59 | |||
60 | unsigned sglen; | ||
61 | struct omap_sg sg[0]; | ||
62 | }; | ||
63 | |||
64 | static const unsigned es_bytes[] = { | ||
65 | [OMAP_DMA_DATA_TYPE_S8] = 1, | ||
66 | [OMAP_DMA_DATA_TYPE_S16] = 2, | ||
67 | [OMAP_DMA_DATA_TYPE_S32] = 4, | ||
68 | }; | ||
69 | |||
70 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) | ||
71 | { | ||
72 | return container_of(d, struct omap_dmadev, ddev); | ||
73 | } | ||
74 | |||
75 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) | ||
76 | { | ||
77 | return container_of(c, struct omap_chan, vc.chan); | ||
78 | } | ||
79 | |||
80 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) | ||
81 | { | ||
82 | return container_of(t, struct omap_desc, vd.tx); | ||
83 | } | ||
84 | |||
85 | static void omap_dma_desc_free(struct virt_dma_desc *vd) | ||
86 | { | ||
87 | kfree(container_of(vd, struct omap_desc, vd)); | ||
88 | } | ||
89 | |||
90 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, | ||
91 | unsigned idx) | ||
92 | { | ||
93 | struct omap_sg *sg = d->sg + idx; | ||
94 | |||
95 | if (d->dir == DMA_DEV_TO_MEM) | ||
96 | omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | ||
97 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | ||
98 | else | ||
99 | omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | ||
100 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | ||
101 | |||
102 | omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, | ||
103 | d->sync_mode, c->dma_sig, d->sync_type); | ||
104 | |||
105 | omap_start_dma(c->dma_ch); | ||
106 | } | ||
107 | |||
108 | static void omap_dma_start_desc(struct omap_chan *c) | ||
109 | { | ||
110 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | ||
111 | struct omap_desc *d; | ||
112 | |||
113 | if (!vd) { | ||
114 | c->desc = NULL; | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | list_del(&vd->node); | ||
119 | |||
120 | c->desc = d = to_omap_dma_desc(&vd->tx); | ||
121 | c->sgidx = 0; | ||
122 | |||
123 | if (d->dir == DMA_DEV_TO_MEM) | ||
124 | omap_set_dma_src_params(c->dma_ch, d->periph_port, | ||
125 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | ||
126 | else | ||
127 | omap_set_dma_dest_params(c->dma_ch, d->periph_port, | ||
128 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | ||
129 | |||
130 | omap_dma_start_sg(c, d, 0); | ||
131 | } | ||
132 | |||
133 | static void omap_dma_callback(int ch, u16 status, void *data) | ||
134 | { | ||
135 | struct omap_chan *c = data; | ||
136 | struct omap_desc *d; | ||
137 | unsigned long flags; | ||
138 | |||
139 | spin_lock_irqsave(&c->vc.lock, flags); | ||
140 | d = c->desc; | ||
141 | if (d) { | ||
142 | if (!c->cyclic) { | ||
143 | if (++c->sgidx < d->sglen) { | ||
144 | omap_dma_start_sg(c, d, c->sgidx); | ||
145 | } else { | ||
146 | omap_dma_start_desc(c); | ||
147 | vchan_cookie_complete(&d->vd); | ||
148 | } | ||
149 | } else { | ||
150 | vchan_cyclic_callback(&d->vd); | ||
151 | } | ||
152 | } | ||
153 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * This callback schedules all pending channels. We could be more | ||
158 | * clever here by postponing allocation of the real DMA channels to | ||
159 | * this point, and freeing them when our virtual channel becomes idle. | ||
160 | * | ||
161 | * We would then need to deal with 'all channels in-use' | ||
162 | */ | ||
163 | static void omap_dma_sched(unsigned long data) | ||
164 | { | ||
165 | struct omap_dmadev *d = (struct omap_dmadev *)data; | ||
166 | LIST_HEAD(head); | ||
167 | |||
168 | spin_lock_irq(&d->lock); | ||
169 | list_splice_tail_init(&d->pending, &head); | ||
170 | spin_unlock_irq(&d->lock); | ||
171 | |||
172 | while (!list_empty(&head)) { | ||
173 | struct omap_chan *c = list_first_entry(&head, | ||
174 | struct omap_chan, node); | ||
175 | |||
176 | spin_lock_irq(&c->vc.lock); | ||
177 | list_del_init(&c->node); | ||
178 | omap_dma_start_desc(c); | ||
179 | spin_unlock_irq(&c->vc.lock); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) | ||
184 | { | ||
185 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
186 | |||
187 | dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); | ||
188 | |||
189 | return omap_request_dma(c->dma_sig, "DMA engine", | ||
190 | omap_dma_callback, c, &c->dma_ch); | ||
191 | } | ||
192 | |||
193 | static void omap_dma_free_chan_resources(struct dma_chan *chan) | ||
194 | { | ||
195 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
196 | |||
197 | vchan_free_chan_resources(&c->vc); | ||
198 | omap_free_dma(c->dma_ch); | ||
199 | |||
200 | dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); | ||
201 | } | ||
202 | |||
203 | static size_t omap_dma_sg_size(struct omap_sg *sg) | ||
204 | { | ||
205 | return sg->en * sg->fn; | ||
206 | } | ||
207 | |||
208 | static size_t omap_dma_desc_size(struct omap_desc *d) | ||
209 | { | ||
210 | unsigned i; | ||
211 | size_t size; | ||
212 | |||
213 | for (size = i = 0; i < d->sglen; i++) | ||
214 | size += omap_dma_sg_size(&d->sg[i]); | ||
215 | |||
216 | return size * es_bytes[d->es]; | ||
217 | } | ||
218 | |||
219 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) | ||
220 | { | ||
221 | unsigned i; | ||
222 | size_t size, es_size = es_bytes[d->es]; | ||
223 | |||
224 | for (size = i = 0; i < d->sglen; i++) { | ||
225 | size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; | ||
226 | |||
227 | if (size) | ||
228 | size += this_size; | ||
229 | else if (addr >= d->sg[i].addr && | ||
230 | addr < d->sg[i].addr + this_size) | ||
231 | size += d->sg[i].addr + this_size - addr; | ||
232 | } | ||
233 | return size; | ||
234 | } | ||
235 | |||
236 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | ||
237 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
238 | { | ||
239 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
240 | struct virt_dma_desc *vd; | ||
241 | enum dma_status ret; | ||
242 | unsigned long flags; | ||
243 | |||
244 | ret = dma_cookie_status(chan, cookie, txstate); | ||
245 | if (ret == DMA_SUCCESS || !txstate) | ||
246 | return ret; | ||
247 | |||
248 | spin_lock_irqsave(&c->vc.lock, flags); | ||
249 | vd = vchan_find_desc(&c->vc, cookie); | ||
250 | if (vd) { | ||
251 | txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); | ||
252 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { | ||
253 | struct omap_desc *d = c->desc; | ||
254 | dma_addr_t pos; | ||
255 | |||
256 | if (d->dir == DMA_MEM_TO_DEV) | ||
257 | pos = omap_get_dma_src_pos(c->dma_ch); | ||
258 | else if (d->dir == DMA_DEV_TO_MEM) | ||
259 | pos = omap_get_dma_dst_pos(c->dma_ch); | ||
260 | else | ||
261 | pos = 0; | ||
262 | |||
263 | txstate->residue = omap_dma_desc_size_pos(d, pos); | ||
264 | } else { | ||
265 | txstate->residue = 0; | ||
266 | } | ||
267 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
268 | |||
269 | return ret; | ||
270 | } | ||
271 | |||
272 | static void omap_dma_issue_pending(struct dma_chan *chan) | ||
273 | { | ||
274 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
275 | unsigned long flags; | ||
276 | |||
277 | spin_lock_irqsave(&c->vc.lock, flags); | ||
278 | if (vchan_issue_pending(&c->vc) && !c->desc) { | ||
279 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | ||
280 | spin_lock(&d->lock); | ||
281 | if (list_empty(&c->node)) | ||
282 | list_add_tail(&c->node, &d->pending); | ||
283 | spin_unlock(&d->lock); | ||
284 | tasklet_schedule(&d->task); | ||
285 | } | ||
286 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
287 | } | ||
288 | |||
289 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | ||
290 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, | ||
291 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) | ||
292 | { | ||
293 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
294 | enum dma_slave_buswidth dev_width; | ||
295 | struct scatterlist *sgent; | ||
296 | struct omap_desc *d; | ||
297 | dma_addr_t dev_addr; | ||
298 | unsigned i, j = 0, es, en, frame_bytes, sync_type; | ||
299 | u32 burst; | ||
300 | |||
301 | if (dir == DMA_DEV_TO_MEM) { | ||
302 | dev_addr = c->cfg.src_addr; | ||
303 | dev_width = c->cfg.src_addr_width; | ||
304 | burst = c->cfg.src_maxburst; | ||
305 | sync_type = OMAP_DMA_SRC_SYNC; | ||
306 | } else if (dir == DMA_MEM_TO_DEV) { | ||
307 | dev_addr = c->cfg.dst_addr; | ||
308 | dev_width = c->cfg.dst_addr_width; | ||
309 | burst = c->cfg.dst_maxburst; | ||
310 | sync_type = OMAP_DMA_DST_SYNC; | ||
311 | } else { | ||
312 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | ||
313 | return NULL; | ||
314 | } | ||
315 | |||
316 | /* Bus width translates to the element size (ES) */ | ||
317 | switch (dev_width) { | ||
318 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
319 | es = OMAP_DMA_DATA_TYPE_S8; | ||
320 | break; | ||
321 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
322 | es = OMAP_DMA_DATA_TYPE_S16; | ||
323 | break; | ||
324 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
325 | es = OMAP_DMA_DATA_TYPE_S32; | ||
326 | break; | ||
327 | default: /* not reached */ | ||
328 | return NULL; | ||
329 | } | ||
330 | |||
331 | /* Now allocate and setup the descriptor. */ | ||
332 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); | ||
333 | if (!d) | ||
334 | return NULL; | ||
335 | |||
336 | d->dir = dir; | ||
337 | d->dev_addr = dev_addr; | ||
338 | d->es = es; | ||
339 | d->sync_mode = OMAP_DMA_SYNC_FRAME; | ||
340 | d->sync_type = sync_type; | ||
341 | d->periph_port = OMAP_DMA_PORT_TIPB; | ||
342 | |||
343 | /* | ||
344 | * Build our scatterlist entries: each contains the address, | ||
345 | * the number of elements (EN) in each frame, and the number of | ||
346 | * frames (FN). Number of bytes for this entry = ES * EN * FN. | ||
347 | * | ||
348 | * Burst size translates to number of elements with frame sync. | ||
349 | * Note: DMA engine defines burst to be the number of dev-width | ||
350 | * transfers. | ||
351 | */ | ||
352 | en = burst; | ||
353 | frame_bytes = es_bytes[es] * en; | ||
354 | for_each_sg(sgl, sgent, sglen, i) { | ||
355 | d->sg[j].addr = sg_dma_address(sgent); | ||
356 | d->sg[j].en = en; | ||
357 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; | ||
358 | j++; | ||
359 | } | ||
360 | |||
361 | d->sglen = j; | ||
362 | |||
363 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); | ||
364 | } | ||
365 | |||
366 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | ||
367 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
368 | size_t period_len, enum dma_transfer_direction dir, void *context) | ||
369 | { | ||
370 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
371 | enum dma_slave_buswidth dev_width; | ||
372 | struct omap_desc *d; | ||
373 | dma_addr_t dev_addr; | ||
374 | unsigned es, sync_type; | ||
375 | u32 burst; | ||
376 | |||
377 | if (dir == DMA_DEV_TO_MEM) { | ||
378 | dev_addr = c->cfg.src_addr; | ||
379 | dev_width = c->cfg.src_addr_width; | ||
380 | burst = c->cfg.src_maxburst; | ||
381 | sync_type = OMAP_DMA_SRC_SYNC; | ||
382 | } else if (dir == DMA_MEM_TO_DEV) { | ||
383 | dev_addr = c->cfg.dst_addr; | ||
384 | dev_width = c->cfg.dst_addr_width; | ||
385 | burst = c->cfg.dst_maxburst; | ||
386 | sync_type = OMAP_DMA_DST_SYNC; | ||
387 | } else { | ||
388 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | ||
389 | return NULL; | ||
390 | } | ||
391 | |||
392 | /* Bus width translates to the element size (ES) */ | ||
393 | switch (dev_width) { | ||
394 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
395 | es = OMAP_DMA_DATA_TYPE_S8; | ||
396 | break; | ||
397 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
398 | es = OMAP_DMA_DATA_TYPE_S16; | ||
399 | break; | ||
400 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
401 | es = OMAP_DMA_DATA_TYPE_S32; | ||
402 | break; | ||
403 | default: /* not reached */ | ||
404 | return NULL; | ||
405 | } | ||
406 | |||
407 | /* Now allocate and setup the descriptor. */ | ||
408 | d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); | ||
409 | if (!d) | ||
410 | return NULL; | ||
411 | |||
412 | d->dir = dir; | ||
413 | d->dev_addr = dev_addr; | ||
414 | d->fi = burst; | ||
415 | d->es = es; | ||
416 | d->sync_mode = OMAP_DMA_SYNC_PACKET; | ||
417 | d->sync_type = sync_type; | ||
418 | d->periph_port = OMAP_DMA_PORT_MPUI; | ||
419 | d->sg[0].addr = buf_addr; | ||
420 | d->sg[0].en = period_len / es_bytes[es]; | ||
421 | d->sg[0].fn = buf_len / period_len; | ||
422 | d->sglen = 1; | ||
423 | |||
424 | if (!c->cyclic) { | ||
425 | c->cyclic = true; | ||
426 | omap_dma_link_lch(c->dma_ch, c->dma_ch); | ||
427 | omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); | ||
428 | omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); | ||
429 | } | ||
430 | |||
431 | if (!cpu_class_is_omap1()) { | ||
432 | omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
433 | omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
434 | } | ||
435 | |||
436 | return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); | ||
437 | } | ||
438 | |||
439 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) | ||
440 | { | ||
441 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | ||
442 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
443 | return -EINVAL; | ||
444 | |||
445 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); | ||
446 | |||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static int omap_dma_terminate_all(struct omap_chan *c) | ||
451 | { | ||
452 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); | ||
453 | unsigned long flags; | ||
454 | LIST_HEAD(head); | ||
455 | |||
456 | spin_lock_irqsave(&c->vc.lock, flags); | ||
457 | |||
458 | /* Prevent this channel being scheduled */ | ||
459 | spin_lock(&d->lock); | ||
460 | list_del_init(&c->node); | ||
461 | spin_unlock(&d->lock); | ||
462 | |||
463 | /* | ||
464 | * Stop DMA activity: we assume the callback will not be called | ||
465 | * after omap_stop_dma() returns (even if it does, it will see | ||
466 | * c->desc is NULL and exit.) | ||
467 | */ | ||
468 | if (c->desc) { | ||
469 | c->desc = NULL; | ||
470 | omap_stop_dma(c->dma_ch); | ||
471 | } | ||
472 | |||
473 | if (c->cyclic) { | ||
474 | c->cyclic = false; | ||
475 | omap_dma_unlink_lch(c->dma_ch, c->dma_ch); | ||
476 | } | ||
477 | |||
478 | vchan_get_all_descriptors(&c->vc, &head); | ||
479 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
480 | vchan_dma_desc_free_list(&c->vc, &head); | ||
481 | |||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | static int omap_dma_pause(struct omap_chan *c) | ||
486 | { | ||
487 | /* FIXME: not supported by platform private API */ | ||
488 | return -EINVAL; | ||
489 | } | ||
490 | |||
491 | static int omap_dma_resume(struct omap_chan *c) | ||
492 | { | ||
493 | /* FIXME: not supported by platform private API */ | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | |||
497 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
498 | unsigned long arg) | ||
499 | { | ||
500 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
501 | int ret; | ||
502 | |||
503 | switch (cmd) { | ||
504 | case DMA_SLAVE_CONFIG: | ||
505 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); | ||
506 | break; | ||
507 | |||
508 | case DMA_TERMINATE_ALL: | ||
509 | ret = omap_dma_terminate_all(c); | ||
510 | break; | ||
511 | |||
512 | case DMA_PAUSE: | ||
513 | ret = omap_dma_pause(c); | ||
514 | break; | ||
515 | |||
516 | case DMA_RESUME: | ||
517 | ret = omap_dma_resume(c); | ||
518 | break; | ||
519 | |||
520 | default: | ||
521 | ret = -ENXIO; | ||
522 | break; | ||
523 | } | ||
524 | |||
525 | return ret; | ||
526 | } | ||
527 | |||
528 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) | ||
529 | { | ||
530 | struct omap_chan *c; | ||
531 | |||
532 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
533 | if (!c) | ||
534 | return -ENOMEM; | ||
535 | |||
536 | c->dma_sig = dma_sig; | ||
537 | c->vc.desc_free = omap_dma_desc_free; | ||
538 | vchan_init(&c->vc, &od->ddev); | ||
539 | INIT_LIST_HEAD(&c->node); | ||
540 | |||
541 | od->ddev.chancnt++; | ||
542 | |||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | static void omap_dma_free(struct omap_dmadev *od) | ||
547 | { | ||
548 | tasklet_kill(&od->task); | ||
549 | while (!list_empty(&od->ddev.channels)) { | ||
550 | struct omap_chan *c = list_first_entry(&od->ddev.channels, | ||
551 | struct omap_chan, vc.chan.device_node); | ||
552 | |||
553 | list_del(&c->vc.chan.device_node); | ||
554 | tasklet_kill(&c->vc.task); | ||
555 | kfree(c); | ||
556 | } | ||
557 | kfree(od); | ||
558 | } | ||
559 | |||
560 | static int omap_dma_probe(struct platform_device *pdev) | ||
561 | { | ||
562 | struct omap_dmadev *od; | ||
563 | int rc, i; | ||
564 | |||
565 | od = kzalloc(sizeof(*od), GFP_KERNEL); | ||
566 | if (!od) | ||
567 | return -ENOMEM; | ||
568 | |||
569 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | ||
570 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); | ||
571 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; | ||
572 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; | ||
573 | od->ddev.device_tx_status = omap_dma_tx_status; | ||
574 | od->ddev.device_issue_pending = omap_dma_issue_pending; | ||
575 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; | ||
576 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; | ||
577 | od->ddev.device_control = omap_dma_control; | ||
578 | od->ddev.dev = &pdev->dev; | ||
579 | INIT_LIST_HEAD(&od->ddev.channels); | ||
580 | INIT_LIST_HEAD(&od->pending); | ||
581 | spin_lock_init(&od->lock); | ||
582 | |||
583 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); | ||
584 | |||
585 | for (i = 0; i < 127; i++) { | ||
586 | rc = omap_dma_chan_init(od, i); | ||
587 | if (rc) { | ||
588 | omap_dma_free(od); | ||
589 | return rc; | ||
590 | } | ||
591 | } | ||
592 | |||
593 | rc = dma_async_device_register(&od->ddev); | ||
594 | if (rc) { | ||
595 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | ||
596 | rc); | ||
597 | omap_dma_free(od); | ||
598 | } else { | ||
599 | platform_set_drvdata(pdev, od); | ||
600 | } | ||
601 | |||
602 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); | ||
603 | |||
604 | return rc; | ||
605 | } | ||
606 | |||
607 | static int omap_dma_remove(struct platform_device *pdev) | ||
608 | { | ||
609 | struct omap_dmadev *od = platform_get_drvdata(pdev); | ||
610 | |||
611 | dma_async_device_unregister(&od->ddev); | ||
612 | omap_dma_free(od); | ||
613 | |||
614 | return 0; | ||
615 | } | ||
616 | |||
617 | static struct platform_driver omap_dma_driver = { | ||
618 | .probe = omap_dma_probe, | ||
619 | .remove = omap_dma_remove, | ||
620 | .driver = { | ||
621 | .name = "omap-dma-engine", | ||
622 | .owner = THIS_MODULE, | ||
623 | }, | ||
624 | }; | ||
625 | |||
626 | bool omap_dma_filter_fn(struct dma_chan *chan, void *param) | ||
627 | { | ||
628 | if (chan->device->dev->driver == &omap_dma_driver.driver) { | ||
629 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
630 | unsigned req = *(unsigned *)param; | ||
631 | |||
632 | return req == c->dma_sig; | ||
633 | } | ||
634 | return false; | ||
635 | } | ||
636 | EXPORT_SYMBOL_GPL(omap_dma_filter_fn); | ||
637 | |||
638 | static struct platform_device *pdev; | ||
639 | |||
640 | static const struct platform_device_info omap_dma_dev_info = { | ||
641 | .name = "omap-dma-engine", | ||
642 | .id = -1, | ||
643 | .dma_mask = DMA_BIT_MASK(32), | ||
644 | }; | ||
645 | |||
646 | static int omap_dma_init(void) | ||
647 | { | ||
648 | int rc = platform_driver_register(&omap_dma_driver); | ||
649 | |||
650 | if (rc == 0) { | ||
651 | pdev = platform_device_register_full(&omap_dma_dev_info); | ||
652 | if (IS_ERR(pdev)) { | ||
653 | platform_driver_unregister(&omap_dma_driver); | ||
654 | rc = PTR_ERR(pdev); | ||
655 | } | ||
656 | } | ||
657 | return rc; | ||
658 | } | ||
659 | subsys_initcall(omap_dma_init); | ||
660 | |||
661 | static void __exit omap_dma_exit(void) | ||
662 | { | ||
663 | platform_device_unregister(pdev); | ||
664 | platform_driver_unregister(&omap_dma_driver); | ||
665 | } | ||
666 | module_exit(omap_dma_exit); | ||
667 | |||
668 | MODULE_AUTHOR("Russell King"); | ||
669 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ec78ccef9132..f5a73606217e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | 23 | ||
24 | #include "virt-dma.h" | ||
25 | |||
24 | #define NR_PHY_CHAN 6 | 26 | #define NR_PHY_CHAN 6 |
25 | #define DMA_ALIGN 3 | 27 | #define DMA_ALIGN 3 |
26 | #define DMA_MAX_SIZE 0x1fff | 28 | #define DMA_MAX_SIZE 0x1fff |
@@ -72,12 +74,13 @@ struct sa11x0_dma_sg { | |||
72 | }; | 74 | }; |
73 | 75 | ||
74 | struct sa11x0_dma_desc { | 76 | struct sa11x0_dma_desc { |
75 | struct dma_async_tx_descriptor tx; | 77 | struct virt_dma_desc vd; |
78 | |||
76 | u32 ddar; | 79 | u32 ddar; |
77 | size_t size; | 80 | size_t size; |
81 | unsigned period; | ||
82 | bool cyclic; | ||
78 | 83 | ||
79 | /* maybe protected by c->lock */ | ||
80 | struct list_head node; | ||
81 | unsigned sglen; | 84 | unsigned sglen; |
82 | struct sa11x0_dma_sg sg[0]; | 85 | struct sa11x0_dma_sg sg[0]; |
83 | }; | 86 | }; |
@@ -85,15 +88,11 @@ struct sa11x0_dma_desc { | |||
85 | struct sa11x0_dma_phy; | 88 | struct sa11x0_dma_phy; |
86 | 89 | ||
87 | struct sa11x0_dma_chan { | 90 | struct sa11x0_dma_chan { |
88 | struct dma_chan chan; | 91 | struct virt_dma_chan vc; |
89 | spinlock_t lock; | ||
90 | dma_cookie_t lc; | ||
91 | 92 | ||
92 | /* protected by c->lock */ | 93 | /* protected by c->vc.lock */ |
93 | struct sa11x0_dma_phy *phy; | 94 | struct sa11x0_dma_phy *phy; |
94 | enum dma_status status; | 95 | enum dma_status status; |
95 | struct list_head desc_submitted; | ||
96 | struct list_head desc_issued; | ||
97 | 96 | ||
98 | /* protected by d->lock */ | 97 | /* protected by d->lock */ |
99 | struct list_head node; | 98 | struct list_head node; |
@@ -109,7 +108,7 @@ struct sa11x0_dma_phy { | |||
109 | 108 | ||
110 | struct sa11x0_dma_chan *vchan; | 109 | struct sa11x0_dma_chan *vchan; |
111 | 110 | ||
112 | /* Protected by c->lock */ | 111 | /* Protected by c->vc.lock */ |
113 | unsigned sg_load; | 112 | unsigned sg_load; |
114 | struct sa11x0_dma_desc *txd_load; | 113 | struct sa11x0_dma_desc *txd_load; |
115 | unsigned sg_done; | 114 | unsigned sg_done; |
@@ -127,13 +126,12 @@ struct sa11x0_dma_dev { | |||
127 | spinlock_t lock; | 126 | spinlock_t lock; |
128 | struct tasklet_struct task; | 127 | struct tasklet_struct task; |
129 | struct list_head chan_pending; | 128 | struct list_head chan_pending; |
130 | struct list_head desc_complete; | ||
131 | struct sa11x0_dma_phy phy[NR_PHY_CHAN]; | 129 | struct sa11x0_dma_phy phy[NR_PHY_CHAN]; |
132 | }; | 130 | }; |
133 | 131 | ||
134 | static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) | 132 | static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) |
135 | { | 133 | { |
136 | return container_of(chan, struct sa11x0_dma_chan, chan); | 134 | return container_of(chan, struct sa11x0_dma_chan, vc.chan); |
137 | } | 135 | } |
138 | 136 | ||
139 | static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) | 137 | static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) |
@@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) | |||
141 | return container_of(dmadev, struct sa11x0_dma_dev, slave); | 139 | return container_of(dmadev, struct sa11x0_dma_dev, slave); |
142 | } | 140 | } |
143 | 141 | ||
144 | static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) | 142 | static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) |
145 | { | 143 | { |
146 | return container_of(tx, struct sa11x0_dma_desc, tx); | 144 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); |
145 | |||
146 | return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; | ||
147 | } | 147 | } |
148 | 148 | ||
149 | static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) | 149 | static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) |
150 | { | 150 | { |
151 | if (list_empty(&c->desc_issued)) | 151 | kfree(container_of(vd, struct sa11x0_dma_desc, vd)); |
152 | return NULL; | ||
153 | |||
154 | return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); | ||
155 | } | 152 | } |
156 | 153 | ||
157 | static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) | 154 | static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) |
158 | { | 155 | { |
159 | list_del(&txd->node); | 156 | list_del(&txd->vd.node); |
160 | p->txd_load = txd; | 157 | p->txd_load = txd; |
161 | p->sg_load = 0; | 158 | p->sg_load = 0; |
162 | 159 | ||
163 | dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", | 160 | dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", |
164 | p->num, txd, txd->tx.cookie, txd->ddar); | 161 | p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); |
165 | } | 162 | } |
166 | 163 | ||
167 | static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, | 164 | static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, |
@@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, | |||
183 | return; | 180 | return; |
184 | 181 | ||
185 | if (p->sg_load == txd->sglen) { | 182 | if (p->sg_load == txd->sglen) { |
186 | struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); | 183 | if (!txd->cyclic) { |
184 | struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); | ||
187 | 185 | ||
188 | /* | 186 | /* |
189 | * We have reached the end of the current descriptor. | 187 | * We have reached the end of the current descriptor. |
190 | * Peek at the next descriptor, and if compatible with | 188 | * Peek at the next descriptor, and if compatible with |
191 | * the current, start processing it. | 189 | * the current, start processing it. |
192 | */ | 190 | */ |
193 | if (txn && txn->ddar == txd->ddar) { | 191 | if (txn && txn->ddar == txd->ddar) { |
194 | txd = txn; | 192 | txd = txn; |
195 | sa11x0_dma_start_desc(p, txn); | 193 | sa11x0_dma_start_desc(p, txn); |
194 | } else { | ||
195 | p->txd_load = NULL; | ||
196 | return; | ||
197 | } | ||
196 | } else { | 198 | } else { |
197 | p->txd_load = NULL; | 199 | /* Cyclic: reset back to beginning */ |
198 | return; | 200 | p->sg_load = 0; |
199 | } | 201 | } |
200 | } | 202 | } |
201 | 203 | ||
@@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, | |||
229 | struct sa11x0_dma_desc *txd = p->txd_done; | 231 | struct sa11x0_dma_desc *txd = p->txd_done; |
230 | 232 | ||
231 | if (++p->sg_done == txd->sglen) { | 233 | if (++p->sg_done == txd->sglen) { |
232 | struct sa11x0_dma_dev *d = p->dev; | 234 | if (!txd->cyclic) { |
233 | 235 | vchan_cookie_complete(&txd->vd); | |
234 | dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", | ||
235 | p->num, p->txd_done, p->txd_done->tx.cookie); | ||
236 | |||
237 | c->lc = txd->tx.cookie; | ||
238 | 236 | ||
239 | spin_lock(&d->lock); | 237 | p->sg_done = 0; |
240 | list_add_tail(&txd->node, &d->desc_complete); | 238 | p->txd_done = p->txd_load; |
241 | spin_unlock(&d->lock); | ||
242 | 239 | ||
243 | p->sg_done = 0; | 240 | if (!p->txd_done) |
244 | p->txd_done = p->txd_load; | 241 | tasklet_schedule(&p->dev->task); |
242 | } else { | ||
243 | if ((p->sg_done % txd->period) == 0) | ||
244 | vchan_cyclic_callback(&txd->vd); | ||
245 | 245 | ||
246 | tasklet_schedule(&d->task); | 246 | /* Cyclic: reset back to beginning */ |
247 | p->sg_done = 0; | ||
248 | } | ||
247 | } | 249 | } |
248 | 250 | ||
249 | sa11x0_dma_start_sg(p, c); | 251 | sa11x0_dma_start_sg(p, c); |
@@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) | |||
280 | if (c) { | 282 | if (c) { |
281 | unsigned long flags; | 283 | unsigned long flags; |
282 | 284 | ||
283 | spin_lock_irqsave(&c->lock, flags); | 285 | spin_lock_irqsave(&c->vc.lock, flags); |
284 | /* | 286 | /* |
285 | * Now that we're holding the lock, check that the vchan | 287 | * Now that we're holding the lock, check that the vchan |
286 | * really is associated with this pchan before touching the | 288 | * really is associated with this pchan before touching the |
@@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) | |||
294 | if (dcsr & DCSR_DONEB) | 296 | if (dcsr & DCSR_DONEB) |
295 | sa11x0_dma_complete(p, c); | 297 | sa11x0_dma_complete(p, c); |
296 | } | 298 | } |
297 | spin_unlock_irqrestore(&c->lock, flags); | 299 | spin_unlock_irqrestore(&c->vc.lock, flags); |
298 | } | 300 | } |
299 | 301 | ||
300 | return IRQ_HANDLED; | 302 | return IRQ_HANDLED; |
@@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg) | |||
332 | struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; | 334 | struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; |
333 | struct sa11x0_dma_phy *p; | 335 | struct sa11x0_dma_phy *p; |
334 | struct sa11x0_dma_chan *c; | 336 | struct sa11x0_dma_chan *c; |
335 | struct sa11x0_dma_desc *txd, *txn; | ||
336 | LIST_HEAD(head); | ||
337 | unsigned pch, pch_alloc = 0; | 337 | unsigned pch, pch_alloc = 0; |
338 | 338 | ||
339 | dev_dbg(d->slave.dev, "tasklet enter\n"); | 339 | dev_dbg(d->slave.dev, "tasklet enter\n"); |
340 | 340 | ||
341 | /* Get the completed tx descriptors */ | 341 | list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { |
342 | spin_lock_irq(&d->lock); | 342 | spin_lock_irq(&c->vc.lock); |
343 | list_splice_init(&d->desc_complete, &head); | ||
344 | spin_unlock_irq(&d->lock); | ||
345 | |||
346 | list_for_each_entry(txd, &head, node) { | ||
347 | c = to_sa11x0_dma_chan(txd->tx.chan); | ||
348 | |||
349 | dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", | ||
350 | c, txd, txd->tx.cookie); | ||
351 | |||
352 | spin_lock_irq(&c->lock); | ||
353 | p = c->phy; | 343 | p = c->phy; |
354 | if (p) { | 344 | if (p && !p->txd_done) { |
355 | if (!p->txd_done) | 345 | sa11x0_dma_start_txd(c); |
356 | sa11x0_dma_start_txd(c); | ||
357 | if (!p->txd_done) { | 346 | if (!p->txd_done) { |
358 | /* No current txd associated with this channel */ | 347 | /* No current txd associated with this channel */ |
359 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); | 348 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); |
@@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) | |||
363 | p->vchan = NULL; | 352 | p->vchan = NULL; |
364 | } | 353 | } |
365 | } | 354 | } |
366 | spin_unlock_irq(&c->lock); | 355 | spin_unlock_irq(&c->vc.lock); |
367 | } | 356 | } |
368 | 357 | ||
369 | spin_lock_irq(&d->lock); | 358 | spin_lock_irq(&d->lock); |
@@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) | |||
380 | /* Mark this channel allocated */ | 369 | /* Mark this channel allocated */ |
381 | p->vchan = c; | 370 | p->vchan = c; |
382 | 371 | ||
383 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); | 372 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); |
384 | } | 373 | } |
385 | } | 374 | } |
386 | spin_unlock_irq(&d->lock); | 375 | spin_unlock_irq(&d->lock); |
@@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg) | |||
390 | p = &d->phy[pch]; | 379 | p = &d->phy[pch]; |
391 | c = p->vchan; | 380 | c = p->vchan; |
392 | 381 | ||
393 | spin_lock_irq(&c->lock); | 382 | spin_lock_irq(&c->vc.lock); |
394 | c->phy = p; | 383 | c->phy = p; |
395 | 384 | ||
396 | sa11x0_dma_start_txd(c); | 385 | sa11x0_dma_start_txd(c); |
397 | spin_unlock_irq(&c->lock); | 386 | spin_unlock_irq(&c->vc.lock); |
398 | } | 387 | } |
399 | } | 388 | } |
400 | 389 | ||
401 | /* Now free the completed tx descriptor, and call their callbacks */ | ||
402 | list_for_each_entry_safe(txd, txn, &head, node) { | ||
403 | dma_async_tx_callback callback = txd->tx.callback; | ||
404 | void *callback_param = txd->tx.callback_param; | ||
405 | |||
406 | dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", | ||
407 | txd, txd->tx.cookie); | ||
408 | |||
409 | kfree(txd); | ||
410 | |||
411 | if (callback) | ||
412 | callback(callback_param); | ||
413 | } | ||
414 | |||
415 | dev_dbg(d->slave.dev, "tasklet exit\n"); | 390 | dev_dbg(d->slave.dev, "tasklet exit\n"); |
416 | } | 391 | } |
417 | 392 | ||
418 | 393 | ||
419 | static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) | ||
420 | { | ||
421 | struct sa11x0_dma_desc *txd, *txn; | ||
422 | |||
423 | list_for_each_entry_safe(txd, txn, head, node) { | ||
424 | dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); | ||
425 | kfree(txd); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) | 394 | static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) |
430 | { | 395 | { |
431 | return 0; | 396 | return 0; |
@@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) | |||
436 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 401 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
437 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 402 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
438 | unsigned long flags; | 403 | unsigned long flags; |
439 | LIST_HEAD(head); | ||
440 | 404 | ||
441 | spin_lock_irqsave(&c->lock, flags); | 405 | spin_lock_irqsave(&d->lock, flags); |
442 | spin_lock(&d->lock); | ||
443 | list_del_init(&c->node); | 406 | list_del_init(&c->node); |
444 | spin_unlock(&d->lock); | 407 | spin_unlock_irqrestore(&d->lock, flags); |
445 | |||
446 | list_splice_tail_init(&c->desc_submitted, &head); | ||
447 | list_splice_tail_init(&c->desc_issued, &head); | ||
448 | spin_unlock_irqrestore(&c->lock, flags); | ||
449 | 408 | ||
450 | sa11x0_dma_desc_free(d, &head); | 409 | vchan_free_chan_resources(&c->vc); |
451 | } | 410 | } |
452 | 411 | ||
453 | static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) | 412 | static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) |
@@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, | |||
472 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 431 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
473 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 432 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
474 | struct sa11x0_dma_phy *p; | 433 | struct sa11x0_dma_phy *p; |
475 | struct sa11x0_dma_desc *txd; | 434 | struct virt_dma_desc *vd; |
476 | dma_cookie_t last_used, last_complete; | ||
477 | unsigned long flags; | 435 | unsigned long flags; |
478 | enum dma_status ret; | 436 | enum dma_status ret; |
479 | size_t bytes = 0; | ||
480 | |||
481 | last_used = c->chan.cookie; | ||
482 | last_complete = c->lc; | ||
483 | 437 | ||
484 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 438 | ret = dma_cookie_status(&c->vc.chan, cookie, state); |
485 | if (ret == DMA_SUCCESS) { | 439 | if (ret == DMA_SUCCESS) |
486 | dma_set_tx_state(state, last_complete, last_used, 0); | ||
487 | return ret; | 440 | return ret; |
488 | } | ||
489 | 441 | ||
490 | spin_lock_irqsave(&c->lock, flags); | 442 | if (!state) |
443 | return c->status; | ||
444 | |||
445 | spin_lock_irqsave(&c->vc.lock, flags); | ||
491 | p = c->phy; | 446 | p = c->phy; |
492 | ret = c->status; | ||
493 | if (p) { | ||
494 | dma_addr_t addr = sa11x0_dma_pos(p); | ||
495 | 447 | ||
496 | dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); | 448 | /* |
449 | * If the cookie is on our issue queue, then the residue is | ||
450 | * its total size. | ||
451 | */ | ||
452 | vd = vchan_find_desc(&c->vc, cookie); | ||
453 | if (vd) { | ||
454 | state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; | ||
455 | } else if (!p) { | ||
456 | state->residue = 0; | ||
457 | } else { | ||
458 | struct sa11x0_dma_desc *txd; | ||
459 | size_t bytes = 0; | ||
497 | 460 | ||
498 | txd = p->txd_done; | 461 | if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) |
462 | txd = p->txd_done; | ||
463 | else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) | ||
464 | txd = p->txd_load; | ||
465 | else | ||
466 | txd = NULL; | ||
467 | |||
468 | ret = c->status; | ||
499 | if (txd) { | 469 | if (txd) { |
470 | dma_addr_t addr = sa11x0_dma_pos(p); | ||
500 | unsigned i; | 471 | unsigned i; |
501 | 472 | ||
473 | dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); | ||
474 | |||
502 | for (i = 0; i < txd->sglen; i++) { | 475 | for (i = 0; i < txd->sglen; i++) { |
503 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", | 476 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", |
504 | i, txd->sg[i].addr, txd->sg[i].len); | 477 | i, txd->sg[i].addr, txd->sg[i].len); |
@@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, | |||
521 | bytes += txd->sg[i].len; | 494 | bytes += txd->sg[i].len; |
522 | } | 495 | } |
523 | } | 496 | } |
524 | if (txd != p->txd_load && p->txd_load) | 497 | state->residue = bytes; |
525 | bytes += p->txd_load->size; | ||
526 | } | ||
527 | list_for_each_entry(txd, &c->desc_issued, node) { | ||
528 | bytes += txd->size; | ||
529 | } | 498 | } |
530 | spin_unlock_irqrestore(&c->lock, flags); | 499 | spin_unlock_irqrestore(&c->vc.lock, flags); |
531 | |||
532 | dma_set_tx_state(state, last_complete, last_used, bytes); | ||
533 | 500 | ||
534 | dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); | 501 | dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); |
535 | 502 | ||
536 | return ret; | 503 | return ret; |
537 | } | 504 | } |
@@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan) | |||
547 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 514 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
548 | unsigned long flags; | 515 | unsigned long flags; |
549 | 516 | ||
550 | spin_lock_irqsave(&c->lock, flags); | 517 | spin_lock_irqsave(&c->vc.lock, flags); |
551 | list_splice_tail_init(&c->desc_submitted, &c->desc_issued); | 518 | if (vchan_issue_pending(&c->vc)) { |
552 | if (!list_empty(&c->desc_issued)) { | 519 | if (!c->phy) { |
553 | spin_lock(&d->lock); | 520 | spin_lock(&d->lock); |
554 | if (!c->phy && list_empty(&c->node)) { | 521 | if (list_empty(&c->node)) { |
555 | list_add_tail(&c->node, &d->chan_pending); | 522 | list_add_tail(&c->node, &d->chan_pending); |
556 | tasklet_schedule(&d->task); | 523 | tasklet_schedule(&d->task); |
557 | dev_dbg(d->slave.dev, "vchan %p: issued\n", c); | 524 | dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); |
525 | } | ||
526 | spin_unlock(&d->lock); | ||
558 | } | 527 | } |
559 | spin_unlock(&d->lock); | ||
560 | } else | 528 | } else |
561 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); | 529 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); |
562 | spin_unlock_irqrestore(&c->lock, flags); | 530 | spin_unlock_irqrestore(&c->vc.lock, flags); |
563 | } | ||
564 | |||
565 | static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
566 | { | ||
567 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); | ||
568 | struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); | ||
569 | unsigned long flags; | ||
570 | |||
571 | spin_lock_irqsave(&c->lock, flags); | ||
572 | c->chan.cookie += 1; | ||
573 | if (c->chan.cookie < 0) | ||
574 | c->chan.cookie = 1; | ||
575 | txd->tx.cookie = c->chan.cookie; | ||
576 | |||
577 | list_add_tail(&txd->node, &c->desc_submitted); | ||
578 | spin_unlock_irqrestore(&c->lock, flags); | ||
579 | |||
580 | dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", | ||
581 | c, txd, txd->tx.cookie); | ||
582 | |||
583 | return txd->tx.cookie; | ||
584 | } | 531 | } |
585 | 532 | ||
586 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | 533 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( |
@@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | |||
596 | /* SA11x0 channels can only operate in their native direction */ | 543 | /* SA11x0 channels can only operate in their native direction */ |
597 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { | 544 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { |
598 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", | 545 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", |
599 | c, c->ddar, dir); | 546 | &c->vc, c->ddar, dir); |
600 | return NULL; | 547 | return NULL; |
601 | } | 548 | } |
602 | 549 | ||
@@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | |||
612 | j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; | 559 | j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; |
613 | if (addr & DMA_ALIGN) { | 560 | if (addr & DMA_ALIGN) { |
614 | dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", | 561 | dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", |
615 | c, addr); | 562 | &c->vc, addr); |
616 | return NULL; | 563 | return NULL; |
617 | } | 564 | } |
618 | } | 565 | } |
619 | 566 | ||
620 | txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); | 567 | txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); |
621 | if (!txd) { | 568 | if (!txd) { |
622 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); | 569 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); |
623 | return NULL; | 570 | return NULL; |
624 | } | 571 | } |
625 | 572 | ||
@@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | |||
655 | } while (len); | 602 | } while (len); |
656 | } | 603 | } |
657 | 604 | ||
658 | dma_async_tx_descriptor_init(&txd->tx, &c->chan); | ||
659 | txd->tx.flags = flags; | ||
660 | txd->tx.tx_submit = sa11x0_dma_tx_submit; | ||
661 | txd->ddar = c->ddar; | 605 | txd->ddar = c->ddar; |
662 | txd->size = size; | 606 | txd->size = size; |
663 | txd->sglen = j; | 607 | txd->sglen = j; |
664 | 608 | ||
665 | dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", | 609 | dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", |
666 | c, txd, txd->size, txd->sglen); | 610 | &c->vc, &txd->vd, txd->size, txd->sglen); |
667 | 611 | ||
668 | return &txd->tx; | 612 | return vchan_tx_prep(&c->vc, &txd->vd, flags); |
613 | } | ||
614 | |||
615 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( | ||
616 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, | ||
617 | enum dma_transfer_direction dir, void *context) | ||
618 | { | ||
619 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
620 | struct sa11x0_dma_desc *txd; | ||
621 | unsigned i, j, k, sglen, sgperiod; | ||
622 | |||
623 | /* SA11x0 channels can only operate in their native direction */ | ||
624 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { | ||
625 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", | ||
626 | &c->vc, c->ddar, dir); | ||
627 | return NULL; | ||
628 | } | ||
629 | |||
630 | sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); | ||
631 | sglen = size * sgperiod / period; | ||
632 | |||
633 | /* Do not allow zero-sized txds */ | ||
634 | if (sglen == 0) | ||
635 | return NULL; | ||
636 | |||
637 | txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); | ||
638 | if (!txd) { | ||
639 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); | ||
640 | return NULL; | ||
641 | } | ||
642 | |||
643 | for (i = k = 0; i < size / period; i++) { | ||
644 | size_t tlen, len = period; | ||
645 | |||
646 | for (j = 0; j < sgperiod; j++, k++) { | ||
647 | tlen = len; | ||
648 | |||
649 | if (tlen > DMA_MAX_SIZE) { | ||
650 | unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); | ||
651 | tlen = (tlen / mult) & ~DMA_ALIGN; | ||
652 | } | ||
653 | |||
654 | txd->sg[k].addr = addr; | ||
655 | txd->sg[k].len = tlen; | ||
656 | addr += tlen; | ||
657 | len -= tlen; | ||
658 | } | ||
659 | |||
660 | WARN_ON(len != 0); | ||
661 | } | ||
662 | |||
663 | WARN_ON(k != sglen); | ||
664 | |||
665 | txd->ddar = c->ddar; | ||
666 | txd->size = size; | ||
667 | txd->sglen = sglen; | ||
668 | txd->cyclic = 1; | ||
669 | txd->period = sgperiod; | ||
670 | |||
671 | return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
669 | } | 672 | } |
670 | 673 | ||
671 | static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) | 674 | static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) |
@@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c | |||
695 | if (maxburst == 8) | 698 | if (maxburst == 8) |
696 | ddar |= DDAR_BS; | 699 | ddar |= DDAR_BS; |
697 | 700 | ||
698 | dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", | 701 | dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", |
699 | c, addr, width, maxburst); | 702 | &c->vc, addr, width, maxburst); |
700 | 703 | ||
701 | c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; | 704 | c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; |
702 | 705 | ||
@@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
718 | return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); | 721 | return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); |
719 | 722 | ||
720 | case DMA_TERMINATE_ALL: | 723 | case DMA_TERMINATE_ALL: |
721 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); | 724 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); |
722 | /* Clear the tx descriptor lists */ | 725 | /* Clear the tx descriptor lists */ |
723 | spin_lock_irqsave(&c->lock, flags); | 726 | spin_lock_irqsave(&c->vc.lock, flags); |
724 | list_splice_tail_init(&c->desc_submitted, &head); | 727 | vchan_get_all_descriptors(&c->vc, &head); |
725 | list_splice_tail_init(&c->desc_issued, &head); | ||
726 | 728 | ||
727 | p = c->phy; | 729 | p = c->phy; |
728 | if (p) { | 730 | if (p) { |
729 | struct sa11x0_dma_desc *txd, *txn; | ||
730 | |||
731 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); | 731 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); |
732 | /* vchan is assigned to a pchan - stop the channel */ | 732 | /* vchan is assigned to a pchan - stop the channel */ |
733 | writel(DCSR_RUN | DCSR_IE | | 733 | writel(DCSR_RUN | DCSR_IE | |
@@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
735 | DCSR_STRTB | DCSR_DONEB, | 735 | DCSR_STRTB | DCSR_DONEB, |
736 | p->base + DMA_DCSR_C); | 736 | p->base + DMA_DCSR_C); |
737 | 737 | ||
738 | list_for_each_entry_safe(txd, txn, &d->desc_complete, node) | ||
739 | if (txd->tx.chan == &c->chan) | ||
740 | list_move(&txd->node, &head); | ||
741 | |||
742 | if (p->txd_load) { | 738 | if (p->txd_load) { |
743 | if (p->txd_load != p->txd_done) | 739 | if (p->txd_load != p->txd_done) |
744 | list_add_tail(&p->txd_load->node, &head); | 740 | list_add_tail(&p->txd_load->vd.node, &head); |
745 | p->txd_load = NULL; | 741 | p->txd_load = NULL; |
746 | } | 742 | } |
747 | if (p->txd_done) { | 743 | if (p->txd_done) { |
748 | list_add_tail(&p->txd_done->node, &head); | 744 | list_add_tail(&p->txd_done->vd.node, &head); |
749 | p->txd_done = NULL; | 745 | p->txd_done = NULL; |
750 | } | 746 | } |
751 | c->phy = NULL; | 747 | c->phy = NULL; |
@@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
754 | spin_unlock(&d->lock); | 750 | spin_unlock(&d->lock); |
755 | tasklet_schedule(&d->task); | 751 | tasklet_schedule(&d->task); |
756 | } | 752 | } |
757 | spin_unlock_irqrestore(&c->lock, flags); | 753 | spin_unlock_irqrestore(&c->vc.lock, flags); |
758 | sa11x0_dma_desc_free(d, &head); | 754 | vchan_dma_desc_free_list(&c->vc, &head); |
759 | ret = 0; | 755 | ret = 0; |
760 | break; | 756 | break; |
761 | 757 | ||
762 | case DMA_PAUSE: | 758 | case DMA_PAUSE: |
763 | dev_dbg(d->slave.dev, "vchan %p: pause\n", c); | 759 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); |
764 | spin_lock_irqsave(&c->lock, flags); | 760 | spin_lock_irqsave(&c->vc.lock, flags); |
765 | if (c->status == DMA_IN_PROGRESS) { | 761 | if (c->status == DMA_IN_PROGRESS) { |
766 | c->status = DMA_PAUSED; | 762 | c->status = DMA_PAUSED; |
767 | 763 | ||
@@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
774 | spin_unlock(&d->lock); | 770 | spin_unlock(&d->lock); |
775 | } | 771 | } |
776 | } | 772 | } |
777 | spin_unlock_irqrestore(&c->lock, flags); | 773 | spin_unlock_irqrestore(&c->vc.lock, flags); |
778 | ret = 0; | 774 | ret = 0; |
779 | break; | 775 | break; |
780 | 776 | ||
781 | case DMA_RESUME: | 777 | case DMA_RESUME: |
782 | dev_dbg(d->slave.dev, "vchan %p: resume\n", c); | 778 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); |
783 | spin_lock_irqsave(&c->lock, flags); | 779 | spin_lock_irqsave(&c->vc.lock, flags); |
784 | if (c->status == DMA_PAUSED) { | 780 | if (c->status == DMA_PAUSED) { |
785 | c->status = DMA_IN_PROGRESS; | 781 | c->status = DMA_IN_PROGRESS; |
786 | 782 | ||
787 | p = c->phy; | 783 | p = c->phy; |
788 | if (p) { | 784 | if (p) { |
789 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); | 785 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); |
790 | } else if (!list_empty(&c->desc_issued)) { | 786 | } else if (!list_empty(&c->vc.desc_issued)) { |
791 | spin_lock(&d->lock); | 787 | spin_lock(&d->lock); |
792 | list_add_tail(&c->node, &d->chan_pending); | 788 | list_add_tail(&c->node, &d->chan_pending); |
793 | spin_unlock(&d->lock); | 789 | spin_unlock(&d->lock); |
794 | } | 790 | } |
795 | } | 791 | } |
796 | spin_unlock_irqrestore(&c->lock, flags); | 792 | spin_unlock_irqrestore(&c->vc.lock, flags); |
797 | ret = 0; | 793 | ret = 0; |
798 | break; | 794 | break; |
799 | 795 | ||
@@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, | |||
853 | return -ENOMEM; | 849 | return -ENOMEM; |
854 | } | 850 | } |
855 | 851 | ||
856 | c->chan.device = dmadev; | ||
857 | c->status = DMA_IN_PROGRESS; | 852 | c->status = DMA_IN_PROGRESS; |
858 | c->ddar = chan_desc[i].ddar; | 853 | c->ddar = chan_desc[i].ddar; |
859 | c->name = chan_desc[i].name; | 854 | c->name = chan_desc[i].name; |
860 | spin_lock_init(&c->lock); | ||
861 | INIT_LIST_HEAD(&c->desc_submitted); | ||
862 | INIT_LIST_HEAD(&c->desc_issued); | ||
863 | INIT_LIST_HEAD(&c->node); | 855 | INIT_LIST_HEAD(&c->node); |
864 | list_add_tail(&c->chan.device_node, &dmadev->channels); | 856 | |
857 | c->vc.desc_free = sa11x0_dma_free_desc; | ||
858 | vchan_init(&c->vc, dmadev); | ||
865 | } | 859 | } |
866 | 860 | ||
867 | return dma_async_device_register(dmadev); | 861 | return dma_async_device_register(dmadev); |
@@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev) | |||
890 | { | 884 | { |
891 | struct sa11x0_dma_chan *c, *cn; | 885 | struct sa11x0_dma_chan *c, *cn; |
892 | 886 | ||
893 | list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { | 887 | list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { |
894 | list_del(&c->chan.device_node); | 888 | list_del(&c->vc.chan.device_node); |
889 | tasklet_kill(&c->vc.task); | ||
895 | kfree(c); | 890 | kfree(c); |
896 | } | 891 | } |
897 | } | 892 | } |
@@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) | |||
915 | 910 | ||
916 | spin_lock_init(&d->lock); | 911 | spin_lock_init(&d->lock); |
917 | INIT_LIST_HEAD(&d->chan_pending); | 912 | INIT_LIST_HEAD(&d->chan_pending); |
918 | INIT_LIST_HEAD(&d->desc_complete); | ||
919 | 913 | ||
920 | d->base = ioremap(res->start, resource_size(res)); | 914 | d->base = ioremap(res->start, resource_size(res)); |
921 | if (!d->base) { | 915 | if (!d->base) { |
@@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) | |||
947 | } | 941 | } |
948 | 942 | ||
949 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | 943 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); |
944 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); | ||
950 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; | 945 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; |
946 | d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; | ||
951 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); | 947 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); |
952 | if (ret) { | 948 | if (ret) { |
953 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", | 949 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", |
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 27f5c781fd73..f4cd946d259d 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | |||
483 | new->mark = DESC_PREPARED; | 483 | new->mark = DESC_PREPARED; |
484 | new->async_tx.flags = flags; | 484 | new->async_tx.flags = flags; |
485 | new->direction = direction; | 485 | new->direction = direction; |
486 | new->partial = 0; | ||
486 | 487 | ||
487 | *len -= copy_size; | 488 | *len -= copy_size; |
488 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | 489 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) |
@@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
644 | case DMA_TERMINATE_ALL: | 645 | case DMA_TERMINATE_ALL: |
645 | spin_lock_irqsave(&schan->chan_lock, flags); | 646 | spin_lock_irqsave(&schan->chan_lock, flags); |
646 | ops->halt_channel(schan); | 647 | ops->halt_channel(schan); |
648 | |||
649 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { | ||
650 | /* Record partial transfer */ | ||
651 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, | ||
652 | struct shdma_desc, node); | ||
653 | desc->partial = ops->get_partial(schan, desc); | ||
654 | } | ||
655 | |||
647 | spin_unlock_irqrestore(&schan->chan_lock, flags); | 656 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
648 | 657 | ||
649 | shdma_chan_ld_cleanup(schan, true); | 658 | shdma_chan_ld_cleanup(schan, true); |
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 027c9be97654..f41bcc5267fd 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c | |||
@@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) | |||
381 | return true; | 381 | return true; |
382 | } | 382 | } |
383 | 383 | ||
384 | static size_t sh_dmae_get_partial(struct shdma_chan *schan, | ||
385 | struct shdma_desc *sdesc) | ||
386 | { | ||
387 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
388 | shdma_chan); | ||
389 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
390 | struct sh_dmae_desc, shdma_desc); | ||
391 | return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
392 | sh_chan->xmit_shift; | ||
393 | } | ||
394 | |||
384 | /* Called from error IRQ or NMI */ | 395 | /* Called from error IRQ or NMI */ |
385 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | 396 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) |
386 | { | 397 | { |
@@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = { | |||
632 | .start_xfer = sh_dmae_start_xfer, | 643 | .start_xfer = sh_dmae_start_xfer, |
633 | .embedded_desc = sh_dmae_embedded_desc, | 644 | .embedded_desc = sh_dmae_embedded_desc, |
634 | .chan_irq = sh_dmae_chan_irq, | 645 | .chan_irq = sh_dmae_chan_irq, |
646 | .get_partial = sh_dmae_get_partial, | ||
635 | }; | 647 | }; |
636 | 648 | ||
637 | static int __devinit sh_dmae_probe(struct platform_device *pdev) | 649 | static int __devinit sh_dmae_probe(struct platform_device *pdev) |
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c new file mode 100644 index 000000000000..6f80432a3f0a --- /dev/null +++ b/drivers/dma/virt-dma.c | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * Virtual DMA channel support for DMAengine | ||
3 | * | ||
4 | * Copyright (C) 2012 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/dmaengine.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | |||
15 | #include "virt-dma.h" | ||
16 | |||
17 | static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) | ||
18 | { | ||
19 | return container_of(tx, struct virt_dma_desc, tx); | ||
20 | } | ||
21 | |||
22 | dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | ||
23 | { | ||
24 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | ||
25 | struct virt_dma_desc *vd = to_virt_desc(tx); | ||
26 | unsigned long flags; | ||
27 | dma_cookie_t cookie; | ||
28 | |||
29 | spin_lock_irqsave(&vc->lock, flags); | ||
30 | cookie = dma_cookie_assign(tx); | ||
31 | |||
32 | list_add_tail(&vd->node, &vc->desc_submitted); | ||
33 | spin_unlock_irqrestore(&vc->lock, flags); | ||
34 | |||
35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", | ||
36 | vc, vd, cookie); | ||
37 | |||
38 | return cookie; | ||
39 | } | ||
40 | EXPORT_SYMBOL_GPL(vchan_tx_submit); | ||
41 | |||
42 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, | ||
43 | dma_cookie_t cookie) | ||
44 | { | ||
45 | struct virt_dma_desc *vd; | ||
46 | |||
47 | list_for_each_entry(vd, &vc->desc_issued, node) | ||
48 | if (vd->tx.cookie == cookie) | ||
49 | return vd; | ||
50 | |||
51 | return NULL; | ||
52 | } | ||
53 | EXPORT_SYMBOL_GPL(vchan_find_desc); | ||
54 | |||
55 | /* | ||
56 | * This tasklet handles the completion of a DMA descriptor by | ||
57 | * calling its callback and freeing it. | ||
58 | */ | ||
59 | static void vchan_complete(unsigned long arg) | ||
60 | { | ||
61 | struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; | ||
62 | struct virt_dma_desc *vd; | ||
63 | dma_async_tx_callback cb = NULL; | ||
64 | void *cb_data = NULL; | ||
65 | LIST_HEAD(head); | ||
66 | |||
67 | spin_lock_irq(&vc->lock); | ||
68 | list_splice_tail_init(&vc->desc_completed, &head); | ||
69 | vd = vc->cyclic; | ||
70 | if (vd) { | ||
71 | vc->cyclic = NULL; | ||
72 | cb = vd->tx.callback; | ||
73 | cb_data = vd->tx.callback_param; | ||
74 | } | ||
75 | spin_unlock_irq(&vc->lock); | ||
76 | |||
77 | if (cb) | ||
78 | cb(cb_data); | ||
79 | |||
80 | while (!list_empty(&head)) { | ||
81 | vd = list_first_entry(&head, struct virt_dma_desc, node); | ||
82 | cb = vd->tx.callback; | ||
83 | cb_data = vd->tx.callback_param; | ||
84 | |||
85 | list_del(&vd->node); | ||
86 | |||
87 | vc->desc_free(vd); | ||
88 | |||
89 | if (cb) | ||
90 | cb(cb_data); | ||
91 | } | ||
92 | } | ||
93 | |||
94 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | ||
95 | { | ||
96 | while (!list_empty(head)) { | ||
97 | struct virt_dma_desc *vd = list_first_entry(head, | ||
98 | struct virt_dma_desc, node); | ||
99 | list_del(&vd->node); | ||
100 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | ||
101 | vc->desc_free(vd); | ||
102 | } | ||
103 | } | ||
104 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); | ||
105 | |||
106 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) | ||
107 | { | ||
108 | dma_cookie_init(&vc->chan); | ||
109 | |||
110 | spin_lock_init(&vc->lock); | ||
111 | INIT_LIST_HEAD(&vc->desc_submitted); | ||
112 | INIT_LIST_HEAD(&vc->desc_issued); | ||
113 | INIT_LIST_HEAD(&vc->desc_completed); | ||
114 | |||
115 | tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); | ||
116 | |||
117 | vc->chan.device = dmadev; | ||
118 | list_add_tail(&vc->chan.device_node, &dmadev->channels); | ||
119 | } | ||
120 | EXPORT_SYMBOL_GPL(vchan_init); | ||
121 | |||
122 | MODULE_AUTHOR("Russell King"); | ||
123 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h new file mode 100644 index 000000000000..85c19d63f9fb --- /dev/null +++ b/drivers/dma/virt-dma.h | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * Virtual DMA channel support for DMAengine | ||
3 | * | ||
4 | * Copyright (C) 2012 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef VIRT_DMA_H | ||
11 | #define VIRT_DMA_H | ||
12 | |||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | |||
16 | #include "dmaengine.h" | ||
17 | |||
18 | struct virt_dma_desc { | ||
19 | struct dma_async_tx_descriptor tx; | ||
20 | /* protected by vc.lock */ | ||
21 | struct list_head node; | ||
22 | }; | ||
23 | |||
24 | struct virt_dma_chan { | ||
25 | struct dma_chan chan; | ||
26 | struct tasklet_struct task; | ||
27 | void (*desc_free)(struct virt_dma_desc *); | ||
28 | |||
29 | spinlock_t lock; | ||
30 | |||
31 | /* protected by vc.lock */ | ||
32 | struct list_head desc_submitted; | ||
33 | struct list_head desc_issued; | ||
34 | struct list_head desc_completed; | ||
35 | |||
36 | struct virt_dma_desc *cyclic; | ||
37 | }; | ||
38 | |||
39 | static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) | ||
40 | { | ||
41 | return container_of(chan, struct virt_dma_chan, chan); | ||
42 | } | ||
43 | |||
44 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); | ||
45 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); | ||
46 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); | ||
47 | |||
48 | /** | ||
49 | * vchan_tx_prep - prepare a descriptor | ||
50 | * vc: virtual channel allocating this descriptor | ||
51 | * vd: virtual descriptor to prepare | ||
52 | * tx_flags: flags argument passed in to prepare function | ||
53 | */ | ||
54 | static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, | ||
55 | struct virt_dma_desc *vd, unsigned long tx_flags) | ||
56 | { | ||
57 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); | ||
58 | |||
59 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); | ||
60 | vd->tx.flags = tx_flags; | ||
61 | vd->tx.tx_submit = vchan_tx_submit; | ||
62 | |||
63 | return &vd->tx; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * vchan_issue_pending - move submitted descriptors to issued list | ||
68 | * vc: virtual channel to update | ||
69 | * | ||
70 | * vc.lock must be held by caller | ||
71 | */ | ||
72 | static inline bool vchan_issue_pending(struct virt_dma_chan *vc) | ||
73 | { | ||
74 | list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); | ||
75 | return !list_empty(&vc->desc_issued); | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * vchan_cookie_complete - report completion of a descriptor | ||
80 | * vd: virtual descriptor to update | ||
81 | * | ||
82 | * vc.lock must be held by caller | ||
83 | */ | ||
84 | static inline void vchan_cookie_complete(struct virt_dma_desc *vd) | ||
85 | { | ||
86 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | ||
87 | |||
88 | dma_cookie_complete(&vd->tx); | ||
89 | dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", | ||
90 | vd, vd->tx.cookie); | ||
91 | list_add_tail(&vd->node, &vc->desc_completed); | ||
92 | |||
93 | tasklet_schedule(&vc->task); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * vchan_cyclic_callback - report the completion of a period | ||
98 | * vd: virtual descriptor | ||
99 | */ | ||
100 | static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) | ||
101 | { | ||
102 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | ||
103 | |||
104 | vc->cyclic = vd; | ||
105 | tasklet_schedule(&vc->task); | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * vchan_next_desc - peek at the next descriptor to be processed | ||
110 | * vc: virtual channel to obtain descriptor from | ||
111 | * | ||
112 | * vc.lock must be held by caller | ||
113 | */ | ||
114 | static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | ||
115 | { | ||
116 | if (list_empty(&vc->desc_issued)) | ||
117 | return NULL; | ||
118 | |||
119 | return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors | ||
124 | * vc: virtual channel to get descriptors from | ||
125 | * head: list of descriptors found | ||
126 | * | ||
127 | * vc.lock must be held by caller | ||
128 | * | ||
129 | * Removes all submitted and issued descriptors from internal lists, and | ||
130 | * provides a list of all descriptors found | ||
131 | */ | ||
132 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | ||
133 | struct list_head *head) | ||
134 | { | ||
135 | list_splice_tail_init(&vc->desc_submitted, head); | ||
136 | list_splice_tail_init(&vc->desc_issued, head); | ||
137 | list_splice_tail_init(&vc->desc_completed, head); | ||
138 | } | ||
139 | |||
140 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | LIST_HEAD(head); | ||
144 | |||
145 | spin_lock_irqsave(&vc->lock, flags); | ||
146 | vchan_get_all_descriptors(vc, &head); | ||
147 | spin_unlock_irqrestore(&vc->lock, flags); | ||
148 | |||
149 | vchan_dma_desc_free_list(vc, &head); | ||
150 | } | ||
151 | |||
152 | #endif | ||