diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:20:24 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:20:24 -0500 |
| commit | e6d69a60b77a6ea8d5f9d41765c7571bb8d45531 (patch) | |
| tree | 4ea3fe7c49a864da2ce7ffb51a703661826dc15d /include/linux | |
| parent | 5a1efc6e68a095917277459091fafba6a6baef17 (diff) | |
| parent | df12a3178d340319b1955be6b973a4eb84aff754 (diff) | |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine changes from Vinod Koul:
"This brings for slave dmaengine:
- Change dma notification flag to DMA_COMPLETE from DMA_SUCCESS as
dmaengine can only transfer and not verify validaty of dma
transfers
- Bunch of fixes across drivers:
- cppi41 driver fixes from Daniel
- 8 channel freescale dma engine support and updated bindings from
Hongbo
- msx-dma fixes and cleanup by Markus
- DMAengine updates from Dan:
- Bartlomiej and Dan finalized a rework of the dma address unmap
implementation.
- In the course of testing 1/ a collection of enhancements to
dmatest fell out. Notably basic performance statistics, and
fixed / enhanced test control through new module parameters
'run', 'wait', 'noverify', and 'verbose'. Thanks to Andriy and
Linus [Walleij] for their review.
- Testing the raid related corner cases of 1/ triggered bugs in
the recently added 16-source operation support in the ioatdma
driver.
- Some minor fixes / cleanups to mv_xor and ioatdma"
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (99 commits)
dma: mv_xor: Fix mis-usage of mmio 'base' and 'high_base' registers
dma: mv_xor: Remove unneeded NULL address check
ioat: fix ioat3_irq_reinit
ioat: kill msix_single_vector support
raid6test: add new corner case for ioatdma driver
ioatdma: clean up sed pool kmem_cache
ioatdma: fix selection of 16 vs 8 source path
ioatdma: fix sed pool selection
ioatdma: Fix bug in selftest after removal of DMA_MEMSET.
dmatest: verbose mode
dmatest: convert to dmaengine_unmap_data
dmatest: add a 'wait' parameter
dmatest: add basic performance metrics
dmatest: add support for skipping verification and random data setup
dmatest: use pseudo random numbers
dmatest: support xor-only, or pq-only channels in tests
dmatest: restore ability to start test at module load and init
dmatest: cleanup redundant "dmatest: " prefixes
dmatest: replace stored results mechanism, with uniform messages
Revert "dmatest: append verify result to results"
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/dmaengine.h | 76 | ||||
| -rw-r--r-- | include/linux/platform_data/edma.h | 8 |
2 files changed, 60 insertions, 24 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0bc727534108..41cf0c399288 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie) | |||
| 45 | 45 | ||
| 46 | /** | 46 | /** |
| 47 | * enum dma_status - DMA transaction status | 47 | * enum dma_status - DMA transaction status |
| 48 | * @DMA_SUCCESS: transaction completed successfully | 48 | * @DMA_COMPLETE: transaction completed |
| 49 | * @DMA_IN_PROGRESS: transaction not yet processed | 49 | * @DMA_IN_PROGRESS: transaction not yet processed |
| 50 | * @DMA_PAUSED: transaction is paused | 50 | * @DMA_PAUSED: transaction is paused |
| 51 | * @DMA_ERROR: transaction failed | 51 | * @DMA_ERROR: transaction failed |
| 52 | */ | 52 | */ |
| 53 | enum dma_status { | 53 | enum dma_status { |
| 54 | DMA_SUCCESS, | 54 | DMA_COMPLETE, |
| 55 | DMA_IN_PROGRESS, | 55 | DMA_IN_PROGRESS, |
| 56 | DMA_PAUSED, | 56 | DMA_PAUSED, |
| 57 | DMA_ERROR, | 57 | DMA_ERROR, |
| @@ -171,12 +171,6 @@ struct dma_interleaved_template { | |||
| 171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client | 171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client |
| 172 | * acknowledges receipt, i.e. has has a chance to establish any dependency | 172 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
| 173 | * chains | 173 | * chains |
| 174 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | ||
| 175 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | ||
| 176 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | ||
| 177 | * (if not set, do the source dma-unmapping as page) | ||
| 178 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | ||
| 179 | * (if not set, do the destination dma-unmapping as page) | ||
| 180 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | 174 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q |
| 181 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | 175 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P |
| 182 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | 176 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as |
| @@ -188,14 +182,10 @@ struct dma_interleaved_template { | |||
| 188 | enum dma_ctrl_flags { | 182 | enum dma_ctrl_flags { |
| 189 | DMA_PREP_INTERRUPT = (1 << 0), | 183 | DMA_PREP_INTERRUPT = (1 << 0), |
| 190 | DMA_CTRL_ACK = (1 << 1), | 184 | DMA_CTRL_ACK = (1 << 1), |
| 191 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | 185 | DMA_PREP_PQ_DISABLE_P = (1 << 2), |
| 192 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 186 | DMA_PREP_PQ_DISABLE_Q = (1 << 3), |
| 193 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 187 | DMA_PREP_CONTINUE = (1 << 4), |
| 194 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 188 | DMA_PREP_FENCE = (1 << 5), |
| 195 | DMA_PREP_PQ_DISABLE_P = (1 << 6), | ||
| 196 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), | ||
| 197 | DMA_PREP_CONTINUE = (1 << 8), | ||
| 198 | DMA_PREP_FENCE = (1 << 9), | ||
| 199 | }; | 189 | }; |
| 200 | 190 | ||
| 201 | /** | 191 | /** |
| @@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref); | |||
| 413 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 403 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
| 414 | 404 | ||
| 415 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 405 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
| 406 | |||
| 407 | struct dmaengine_unmap_data { | ||
| 408 | u8 to_cnt; | ||
| 409 | u8 from_cnt; | ||
| 410 | u8 bidi_cnt; | ||
| 411 | struct device *dev; | ||
| 412 | struct kref kref; | ||
| 413 | size_t len; | ||
| 414 | dma_addr_t addr[0]; | ||
| 415 | }; | ||
| 416 | |||
| 416 | /** | 417 | /** |
| 417 | * struct dma_async_tx_descriptor - async transaction descriptor | 418 | * struct dma_async_tx_descriptor - async transaction descriptor |
| 418 | * ---dma generic offload fields--- | 419 | * ---dma generic offload fields--- |
| @@ -438,6 +439,7 @@ struct dma_async_tx_descriptor { | |||
| 438 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 439 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 439 | dma_async_tx_callback callback; | 440 | dma_async_tx_callback callback; |
| 440 | void *callback_param; | 441 | void *callback_param; |
| 442 | struct dmaengine_unmap_data *unmap; | ||
| 441 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 443 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 442 | struct dma_async_tx_descriptor *next; | 444 | struct dma_async_tx_descriptor *next; |
| 443 | struct dma_async_tx_descriptor *parent; | 445 | struct dma_async_tx_descriptor *parent; |
| @@ -445,6 +447,40 @@ struct dma_async_tx_descriptor { | |||
| 445 | #endif | 447 | #endif |
| 446 | }; | 448 | }; |
| 447 | 449 | ||
| 450 | #ifdef CONFIG_DMA_ENGINE | ||
| 451 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
| 452 | struct dmaengine_unmap_data *unmap) | ||
| 453 | { | ||
| 454 | kref_get(&unmap->kref); | ||
| 455 | tx->unmap = unmap; | ||
| 456 | } | ||
| 457 | |||
| 458 | struct dmaengine_unmap_data * | ||
| 459 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); | ||
| 460 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); | ||
| 461 | #else | ||
| 462 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
| 463 | struct dmaengine_unmap_data *unmap) | ||
| 464 | { | ||
| 465 | } | ||
| 466 | static inline struct dmaengine_unmap_data * | ||
| 467 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | ||
| 468 | { | ||
| 469 | return NULL; | ||
| 470 | } | ||
| 471 | static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) | ||
| 472 | { | ||
| 473 | } | ||
| 474 | #endif | ||
| 475 | |||
| 476 | static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) | ||
| 477 | { | ||
| 478 | if (tx->unmap) { | ||
| 479 | dmaengine_unmap_put(tx->unmap); | ||
| 480 | tx->unmap = NULL; | ||
| 481 | } | ||
| 482 | } | ||
| 483 | |||
| 448 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 484 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 449 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 485 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) |
| 450 | { | 486 | { |
| @@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
| 979 | { | 1015 | { |
| 980 | if (last_complete <= last_used) { | 1016 | if (last_complete <= last_used) { |
| 981 | if ((cookie <= last_complete) || (cookie > last_used)) | 1017 | if ((cookie <= last_complete) || (cookie > last_used)) |
| 982 | return DMA_SUCCESS; | 1018 | return DMA_COMPLETE; |
| 983 | } else { | 1019 | } else { |
| 984 | if ((cookie <= last_complete) && (cookie > last_used)) | 1020 | if ((cookie <= last_complete) && (cookie > last_used)) |
| 985 | return DMA_SUCCESS; | 1021 | return DMA_COMPLETE; |
| 986 | } | 1022 | } |
| 987 | return DMA_IN_PROGRESS; | 1023 | return DMA_IN_PROGRESS; |
| 988 | } | 1024 | } |
| @@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ | |||
| 1013 | } | 1049 | } |
| 1014 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | 1050 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
| 1015 | { | 1051 | { |
| 1016 | return DMA_SUCCESS; | 1052 | return DMA_COMPLETE; |
| 1017 | } | 1053 | } |
| 1018 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 1054 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
| 1019 | { | 1055 | { |
| 1020 | return DMA_SUCCESS; | 1056 | return DMA_COMPLETE; |
| 1021 | } | 1057 | } |
| 1022 | static inline void dma_issue_pending_all(void) | 1058 | static inline void dma_issue_pending_all(void) |
| 1023 | { | 1059 | { |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 179fb91bb5f2..f50821cb64be 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
| @@ -67,10 +67,10 @@ struct edmacc_param { | |||
| 67 | #define ITCCHEN BIT(23) | 67 | #define ITCCHEN BIT(23) |
| 68 | 68 | ||
| 69 | /*ch_status paramater of callback function possible values*/ | 69 | /*ch_status paramater of callback function possible values*/ |
| 70 | #define DMA_COMPLETE 1 | 70 | #define EDMA_DMA_COMPLETE 1 |
| 71 | #define DMA_CC_ERROR 2 | 71 | #define EDMA_DMA_CC_ERROR 2 |
| 72 | #define DMA_TC1_ERROR 3 | 72 | #define EDMA_DMA_TC1_ERROR 3 |
| 73 | #define DMA_TC2_ERROR 4 | 73 | #define EDMA_DMA_TC2_ERROR 4 |
| 74 | 74 | ||
| 75 | enum address_mode { | 75 | enum address_mode { |
| 76 | INCR = 0, | 76 | INCR = 0, |
