diff options
| author | Vinod Koul <vinod.koul@intel.com> | 2013-11-16 01:24:17 -0500 |
|---|---|---|
| committer | Vinod Koul <vinod.koul@intel.com> | 2013-11-16 01:32:36 -0500 |
| commit | df12a3178d340319b1955be6b973a4eb84aff754 (patch) | |
| tree | 2b9c68f8a6c299d1e5a4026c60117b5c00d46008 /include/linux | |
| parent | 2f986ec6fa57a5dcf77f19f5f0d44b1f680a100f (diff) | |
| parent | 82a1402eaee5dab1f3ab2d5aa4c316451374c5af (diff) | |
Merge commit 'dmaengine-3.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine changes from Dan
1/ Bartlomiej and Dan finalized a rework of the dma address unmap
implementation.
2/ In the course of testing 1/ a collection of enhancements to dmatest
fell out. Notably basic performance statistics, and fixed / enhanced
test control through new module parameters 'run', 'wait', 'noverify',
and 'verbose'. Thanks to Andriy and Linus for their review.
3/ Testing the raid related corner cases of 1/ triggered bugs in the
recently added 16-source operation support in the ioatdma driver.
4/ Some minor fixes / cleanups to mv_xor and ioatdma.
Conflicts:
drivers/dma/dmatest.c
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/dmaengine.h | 64 |
1 files changed, 50 insertions, 14 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 4b460a683968..41cf0c399288 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -171,12 +171,6 @@ struct dma_interleaved_template { | |||
| 171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client | 171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client |
| 172 | * acknowledges receipt, i.e. has has a chance to establish any dependency | 172 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
| 173 | * chains | 173 | * chains |
| 174 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | ||
| 175 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | ||
| 176 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | ||
| 177 | * (if not set, do the source dma-unmapping as page) | ||
| 178 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | ||
| 179 | * (if not set, do the destination dma-unmapping as page) | ||
| 180 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | 174 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q |
| 181 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | 175 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P |
| 182 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | 176 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as |
| @@ -188,14 +182,10 @@ struct dma_interleaved_template { | |||
| 188 | enum dma_ctrl_flags { | 182 | enum dma_ctrl_flags { |
| 189 | DMA_PREP_INTERRUPT = (1 << 0), | 183 | DMA_PREP_INTERRUPT = (1 << 0), |
| 190 | DMA_CTRL_ACK = (1 << 1), | 184 | DMA_CTRL_ACK = (1 << 1), |
| 191 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | 185 | DMA_PREP_PQ_DISABLE_P = (1 << 2), |
| 192 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 186 | DMA_PREP_PQ_DISABLE_Q = (1 << 3), |
| 193 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 187 | DMA_PREP_CONTINUE = (1 << 4), |
| 194 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 188 | DMA_PREP_FENCE = (1 << 5), |
| 195 | DMA_PREP_PQ_DISABLE_P = (1 << 6), | ||
| 196 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), | ||
| 197 | DMA_PREP_CONTINUE = (1 << 8), | ||
| 198 | DMA_PREP_FENCE = (1 << 9), | ||
| 199 | }; | 189 | }; |
| 200 | 190 | ||
| 201 | /** | 191 | /** |
| @@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref); | |||
| 413 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 403 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
| 414 | 404 | ||
| 415 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 405 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
| 406 | |||
| 407 | struct dmaengine_unmap_data { | ||
| 408 | u8 to_cnt; | ||
| 409 | u8 from_cnt; | ||
| 410 | u8 bidi_cnt; | ||
| 411 | struct device *dev; | ||
| 412 | struct kref kref; | ||
| 413 | size_t len; | ||
| 414 | dma_addr_t addr[0]; | ||
| 415 | }; | ||
| 416 | |||
| 416 | /** | 417 | /** |
| 417 | * struct dma_async_tx_descriptor - async transaction descriptor | 418 | * struct dma_async_tx_descriptor - async transaction descriptor |
| 418 | * ---dma generic offload fields--- | 419 | * ---dma generic offload fields--- |
| @@ -438,6 +439,7 @@ struct dma_async_tx_descriptor { | |||
| 438 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 439 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 439 | dma_async_tx_callback callback; | 440 | dma_async_tx_callback callback; |
| 440 | void *callback_param; | 441 | void *callback_param; |
| 442 | struct dmaengine_unmap_data *unmap; | ||
| 441 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 443 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 442 | struct dma_async_tx_descriptor *next; | 444 | struct dma_async_tx_descriptor *next; |
| 443 | struct dma_async_tx_descriptor *parent; | 445 | struct dma_async_tx_descriptor *parent; |
| @@ -445,6 +447,40 @@ struct dma_async_tx_descriptor { | |||
| 445 | #endif | 447 | #endif |
| 446 | }; | 448 | }; |
| 447 | 449 | ||
| 450 | #ifdef CONFIG_DMA_ENGINE | ||
| 451 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
| 452 | struct dmaengine_unmap_data *unmap) | ||
| 453 | { | ||
| 454 | kref_get(&unmap->kref); | ||
| 455 | tx->unmap = unmap; | ||
| 456 | } | ||
| 457 | |||
| 458 | struct dmaengine_unmap_data * | ||
| 459 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); | ||
| 460 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); | ||
| 461 | #else | ||
| 462 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
| 463 | struct dmaengine_unmap_data *unmap) | ||
| 464 | { | ||
| 465 | } | ||
| 466 | static inline struct dmaengine_unmap_data * | ||
| 467 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | ||
| 468 | { | ||
| 469 | return NULL; | ||
| 470 | } | ||
| 471 | static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) | ||
| 472 | { | ||
| 473 | } | ||
| 474 | #endif | ||
| 475 | |||
| 476 | static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) | ||
| 477 | { | ||
| 478 | if (tx->unmap) { | ||
| 479 | dmaengine_unmap_put(tx->unmap); | ||
| 480 | tx->unmap = NULL; | ||
| 481 | } | ||
| 482 | } | ||
| 483 | |||
| 448 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 484 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 449 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 485 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) |
| 450 | { | 486 | { |
