diff options
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 127 |
1 files changed, 120 insertions, 7 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 20ea12c86fd0..5204f018931b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -40,11 +40,13 @@ typedef s32 dma_cookie_t; | |||
40 | * enum dma_status - DMA transaction status | 40 | * enum dma_status - DMA transaction status |
41 | * @DMA_SUCCESS: transaction completed successfully | 41 | * @DMA_SUCCESS: transaction completed successfully |
42 | * @DMA_IN_PROGRESS: transaction not yet processed | 42 | * @DMA_IN_PROGRESS: transaction not yet processed |
43 | * @DMA_PAUSED: transaction is paused | ||
43 | * @DMA_ERROR: transaction failed | 44 | * @DMA_ERROR: transaction failed |
44 | */ | 45 | */ |
45 | enum dma_status { | 46 | enum dma_status { |
46 | DMA_SUCCESS, | 47 | DMA_SUCCESS, |
47 | DMA_IN_PROGRESS, | 48 | DMA_IN_PROGRESS, |
49 | DMA_PAUSED, | ||
48 | DMA_ERROR, | 50 | DMA_ERROR, |
49 | }; | 51 | }; |
50 | 52 | ||
@@ -107,6 +109,19 @@ enum dma_ctrl_flags { | |||
107 | }; | 109 | }; |
108 | 110 | ||
109 | /** | 111 | /** |
112 | * enum dma_ctrl_cmd - DMA operations that can optionally be exercised | ||
113 | * on a running channel. | ||
114 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers | ||
115 | * @DMA_PAUSE: pause ongoing transfers | ||
116 | * @DMA_RESUME: resume paused transfer | ||
117 | */ | ||
118 | enum dma_ctrl_cmd { | ||
119 | DMA_TERMINATE_ALL, | ||
120 | DMA_PAUSE, | ||
121 | DMA_RESUME, | ||
122 | }; | ||
123 | |||
124 | /** | ||
110 | * enum sum_check_bits - bit position of pq_check_flags | 125 | * enum sum_check_bits - bit position of pq_check_flags |
111 | */ | 126 | */ |
112 | enum sum_check_bits { | 127 | enum sum_check_bits { |
@@ -230,9 +245,84 @@ struct dma_async_tx_descriptor { | |||
230 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 245 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
231 | dma_async_tx_callback callback; | 246 | dma_async_tx_callback callback; |
232 | void *callback_param; | 247 | void *callback_param; |
248 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
233 | struct dma_async_tx_descriptor *next; | 249 | struct dma_async_tx_descriptor *next; |
234 | struct dma_async_tx_descriptor *parent; | 250 | struct dma_async_tx_descriptor *parent; |
235 | spinlock_t lock; | 251 | spinlock_t lock; |
252 | #endif | ||
253 | }; | ||
254 | |||
255 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
256 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
257 | { | ||
258 | } | ||
259 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
260 | { | ||
261 | } | ||
262 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
263 | { | ||
264 | BUG(); | ||
265 | } | ||
266 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
267 | { | ||
268 | } | ||
269 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
270 | { | ||
271 | } | ||
272 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
273 | { | ||
274 | return NULL; | ||
275 | } | ||
276 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
277 | { | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | #else | ||
282 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | ||
283 | { | ||
284 | spin_lock_bh(&txd->lock); | ||
285 | } | ||
286 | static inline void txd_unlock(struct dma_async_tx_descriptor *txd) | ||
287 | { | ||
288 | spin_unlock_bh(&txd->lock); | ||
289 | } | ||
290 | static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) | ||
291 | { | ||
292 | txd->next = next; | ||
293 | next->parent = txd; | ||
294 | } | ||
295 | static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) | ||
296 | { | ||
297 | txd->parent = NULL; | ||
298 | } | ||
299 | static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) | ||
300 | { | ||
301 | txd->next = NULL; | ||
302 | } | ||
303 | static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) | ||
304 | { | ||
305 | return txd->parent; | ||
306 | } | ||
307 | static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) | ||
308 | { | ||
309 | return txd->next; | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | /** | ||
314 | * struct dma_tx_state - filled in to report the status of | ||
315 | * a transfer. | ||
316 | * @last: last completed DMA cookie | ||
317 | * @used: last issued DMA cookie (i.e. the one in progress) | ||
318 | * @residue: the remaining number of bytes left to transmit | ||
319 | * on the selected transfer for states DMA_IN_PROGRESS and | ||
320 | * DMA_PAUSED if this is implemented in the driver, else 0 | ||
321 | */ | ||
322 | struct dma_tx_state { | ||
323 | dma_cookie_t last; | ||
324 | dma_cookie_t used; | ||
325 | u32 residue; | ||
236 | }; | 326 | }; |
237 | 327 | ||
238 | /** | 328 | /** |
@@ -261,8 +351,12 @@ struct dma_async_tx_descriptor { | |||
261 | * @device_prep_dma_memset: prepares a memset operation | 351 | * @device_prep_dma_memset: prepares a memset operation |
262 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 352 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
263 | * @device_prep_slave_sg: prepares a slave dma operation | 353 | * @device_prep_slave_sg: prepares a slave dma operation |
264 | * @device_terminate_all: terminate all pending operations | 354 | * @device_control: manipulate all pending operations on a channel, returns |
265 | * @device_is_tx_complete: poll for transaction completion | 355 | * zero or error code |
356 | * @device_tx_status: poll for transaction completion, the optional | ||
357 | * txstate parameter can be supplied with a pointer to get a | ||
358 | * struct with auxilary transfer status information, otherwise the call | ||
359 | * will just return a simple status code | ||
266 | * @device_issue_pending: push pending transactions to hardware | 360 | * @device_issue_pending: push pending transactions to hardware |
267 | */ | 361 | */ |
268 | struct dma_device { | 362 | struct dma_device { |
@@ -313,11 +407,12 @@ struct dma_device { | |||
313 | struct dma_chan *chan, struct scatterlist *sgl, | 407 | struct dma_chan *chan, struct scatterlist *sgl, |
314 | unsigned int sg_len, enum dma_data_direction direction, | 408 | unsigned int sg_len, enum dma_data_direction direction, |
315 | unsigned long flags); | 409 | unsigned long flags); |
316 | void (*device_terminate_all)(struct dma_chan *chan); | 410 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
411 | unsigned long arg); | ||
317 | 412 | ||
318 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 413 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
319 | dma_cookie_t cookie, dma_cookie_t *last, | 414 | dma_cookie_t cookie, |
320 | dma_cookie_t *used); | 415 | struct dma_tx_state *txstate); |
321 | void (*device_issue_pending)(struct dma_chan *chan); | 416 | void (*device_issue_pending)(struct dma_chan *chan); |
322 | }; | 417 | }; |
323 | 418 | ||
@@ -558,7 +653,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) | |||
558 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 653 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
559 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 654 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
560 | { | 655 | { |
561 | return chan->device->device_is_tx_complete(chan, cookie, last, used); | 656 | struct dma_tx_state state; |
657 | enum dma_status status; | ||
658 | |||
659 | status = chan->device->device_tx_status(chan, cookie, &state); | ||
660 | if (last) | ||
661 | *last = state.last; | ||
662 | if (used) | ||
663 | *used = state.used; | ||
664 | return status; | ||
562 | } | 665 | } |
563 | 666 | ||
564 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | 667 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ |
@@ -586,6 +689,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
586 | return DMA_IN_PROGRESS; | 689 | return DMA_IN_PROGRESS; |
587 | } | 690 | } |
588 | 691 | ||
692 | static inline void | ||
693 | dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) | ||
694 | { | ||
695 | if (st) { | ||
696 | st->last = last; | ||
697 | st->used = used; | ||
698 | st->residue = residue; | ||
699 | } | ||
700 | } | ||
701 | |||
589 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 702 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
590 | #ifdef CONFIG_DMA_ENGINE | 703 | #ifdef CONFIG_DMA_ENGINE |
591 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 704 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |