diff options
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 76 |
1 files changed, 56 insertions, 20 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0bc727534108..41cf0c399288 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie) | |||
45 | 45 | ||
46 | /** | 46 | /** |
47 | * enum dma_status - DMA transaction status | 47 | * enum dma_status - DMA transaction status |
48 | * @DMA_SUCCESS: transaction completed successfully | 48 | * @DMA_COMPLETE: transaction completed |
49 | * @DMA_IN_PROGRESS: transaction not yet processed | 49 | * @DMA_IN_PROGRESS: transaction not yet processed |
50 | * @DMA_PAUSED: transaction is paused | 50 | * @DMA_PAUSED: transaction is paused |
51 | * @DMA_ERROR: transaction failed | 51 | * @DMA_ERROR: transaction failed |
52 | */ | 52 | */ |
53 | enum dma_status { | 53 | enum dma_status { |
54 | DMA_SUCCESS, | 54 | DMA_COMPLETE, |
55 | DMA_IN_PROGRESS, | 55 | DMA_IN_PROGRESS, |
56 | DMA_PAUSED, | 56 | DMA_PAUSED, |
57 | DMA_ERROR, | 57 | DMA_ERROR, |
@@ -171,12 +171,6 @@ struct dma_interleaved_template { | |||
171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client | 171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client |
172 | * acknowledges receipt, i.e. has has a chance to establish any dependency | 172 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
173 | * chains | 173 | * chains |
174 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | ||
175 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | ||
176 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | ||
177 | * (if not set, do the source dma-unmapping as page) | ||
178 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | ||
179 | * (if not set, do the destination dma-unmapping as page) | ||
180 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | 174 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q |
181 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | 175 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P |
182 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | 176 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as |
@@ -188,14 +182,10 @@ struct dma_interleaved_template { | |||
188 | enum dma_ctrl_flags { | 182 | enum dma_ctrl_flags { |
189 | DMA_PREP_INTERRUPT = (1 << 0), | 183 | DMA_PREP_INTERRUPT = (1 << 0), |
190 | DMA_CTRL_ACK = (1 << 1), | 184 | DMA_CTRL_ACK = (1 << 1), |
191 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | 185 | DMA_PREP_PQ_DISABLE_P = (1 << 2), |
192 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 186 | DMA_PREP_PQ_DISABLE_Q = (1 << 3), |
193 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 187 | DMA_PREP_CONTINUE = (1 << 4), |
194 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 188 | DMA_PREP_FENCE = (1 << 5), |
195 | DMA_PREP_PQ_DISABLE_P = (1 << 6), | ||
196 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), | ||
197 | DMA_PREP_CONTINUE = (1 << 8), | ||
198 | DMA_PREP_FENCE = (1 << 9), | ||
199 | }; | 189 | }; |
200 | 190 | ||
201 | /** | 191 | /** |
@@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref); | |||
413 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 403 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
414 | 404 | ||
415 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 405 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
406 | |||
407 | struct dmaengine_unmap_data { | ||
408 | u8 to_cnt; | ||
409 | u8 from_cnt; | ||
410 | u8 bidi_cnt; | ||
411 | struct device *dev; | ||
412 | struct kref kref; | ||
413 | size_t len; | ||
414 | dma_addr_t addr[0]; | ||
415 | }; | ||
416 | |||
416 | /** | 417 | /** |
417 | * struct dma_async_tx_descriptor - async transaction descriptor | 418 | * struct dma_async_tx_descriptor - async transaction descriptor |
418 | * ---dma generic offload fields--- | 419 | * ---dma generic offload fields--- |
@@ -438,6 +439,7 @@ struct dma_async_tx_descriptor { | |||
438 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 439 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
439 | dma_async_tx_callback callback; | 440 | dma_async_tx_callback callback; |
440 | void *callback_param; | 441 | void *callback_param; |
442 | struct dmaengine_unmap_data *unmap; | ||
441 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 443 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
442 | struct dma_async_tx_descriptor *next; | 444 | struct dma_async_tx_descriptor *next; |
443 | struct dma_async_tx_descriptor *parent; | 445 | struct dma_async_tx_descriptor *parent; |
@@ -445,6 +447,40 @@ struct dma_async_tx_descriptor { | |||
445 | #endif | 447 | #endif |
446 | }; | 448 | }; |
447 | 449 | ||
450 | #ifdef CONFIG_DMA_ENGINE | ||
451 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
452 | struct dmaengine_unmap_data *unmap) | ||
453 | { | ||
454 | kref_get(&unmap->kref); | ||
455 | tx->unmap = unmap; | ||
456 | } | ||
457 | |||
458 | struct dmaengine_unmap_data * | ||
459 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); | ||
460 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); | ||
461 | #else | ||
462 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
463 | struct dmaengine_unmap_data *unmap) | ||
464 | { | ||
465 | } | ||
466 | static inline struct dmaengine_unmap_data * | ||
467 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | ||
468 | { | ||
469 | return NULL; | ||
470 | } | ||
471 | static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) | ||
472 | { | ||
473 | } | ||
474 | #endif | ||
475 | |||
476 | static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) | ||
477 | { | ||
478 | if (tx->unmap) { | ||
479 | dmaengine_unmap_put(tx->unmap); | ||
480 | tx->unmap = NULL; | ||
481 | } | ||
482 | } | ||
483 | |||
448 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 484 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
449 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 485 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) |
450 | { | 486 | { |
@@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
979 | { | 1015 | { |
980 | if (last_complete <= last_used) { | 1016 | if (last_complete <= last_used) { |
981 | if ((cookie <= last_complete) || (cookie > last_used)) | 1017 | if ((cookie <= last_complete) || (cookie > last_used)) |
982 | return DMA_SUCCESS; | 1018 | return DMA_COMPLETE; |
983 | } else { | 1019 | } else { |
984 | if ((cookie <= last_complete) && (cookie > last_used)) | 1020 | if ((cookie <= last_complete) && (cookie > last_used)) |
985 | return DMA_SUCCESS; | 1021 | return DMA_COMPLETE; |
986 | } | 1022 | } |
987 | return DMA_IN_PROGRESS; | 1023 | return DMA_IN_PROGRESS; |
988 | } | 1024 | } |
@@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ | |||
1013 | } | 1049 | } |
1014 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | 1050 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
1015 | { | 1051 | { |
1016 | return DMA_SUCCESS; | 1052 | return DMA_COMPLETE; |
1017 | } | 1053 | } |
1018 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 1054 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
1019 | { | 1055 | { |
1020 | return DMA_SUCCESS; | 1056 | return DMA_COMPLETE; |
1021 | } | 1057 | } |
1022 | static inline void dma_issue_pending_all(void) | 1058 | static inline void dma_issue_pending_all(void) |
1023 | { | 1059 | { |