aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-10-18 13:35:23 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-13 19:25:06 -0500
commitd38a8c622a1b382336c3e152c6caf4e11d1f1b2a (patch)
treeef6adeb9eed556a62a153a296234945e2301a5df /include/linux/dmaengine.h
parent56ea27fd61f546117a35236113be72c8aaec382d (diff)
dmaengine: prepare for generic 'unmap' data
Add a hook for a common dma unmap implementation to enable removal of the per driver custom unmap code. (A reworked version of Bartlomiej Zolnierkiewicz's patches to remove the custom callbacks and the size increase of dma_async_tx_descriptor for drivers that don't care about raid). Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> [bzolnier: prepare pl330 driver for adding missing unmap while at it] Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r--include/linux/dmaengine.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..9070050fbcd8 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -413,6 +413,17 @@ void dma_chan_cleanup(struct kref *kref);
413typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 413typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
414 414
415typedef void (*dma_async_tx_callback)(void *dma_async_param); 415typedef void (*dma_async_tx_callback)(void *dma_async_param);
416
417struct dmaengine_unmap_data {
418 u8 to_cnt;
419 u8 from_cnt;
420 u8 bidi_cnt;
421 struct device *dev;
422 struct kref kref;
423 size_t len;
424 dma_addr_t addr[0];
425};
426
416/** 427/**
417 * struct dma_async_tx_descriptor - async transaction descriptor 428 * struct dma_async_tx_descriptor - async transaction descriptor
418 * ---dma generic offload fields--- 429 * ---dma generic offload fields---
@@ -438,6 +449,7 @@ struct dma_async_tx_descriptor {
438 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 449 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
439 dma_async_tx_callback callback; 450 dma_async_tx_callback callback;
440 void *callback_param; 451 void *callback_param;
452 struct dmaengine_unmap_data *unmap;
441#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 453#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
442 struct dma_async_tx_descriptor *next; 454 struct dma_async_tx_descriptor *next;
443 struct dma_async_tx_descriptor *parent; 455 struct dma_async_tx_descriptor *parent;
@@ -445,6 +457,20 @@ struct dma_async_tx_descriptor {
445#endif 457#endif
446}; 458};
447 459
460static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
461 struct dmaengine_unmap_data *unmap)
462{
463 kref_get(&unmap->kref);
464 tx->unmap = unmap;
465}
466
467static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
468{
469 if (tx->unmap) {
470 tx->unmap = NULL;
471 }
472}
473
448#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 474#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
449static inline void txd_lock(struct dma_async_tx_descriptor *txd) 475static inline void txd_lock(struct dma_async_tx_descriptor *txd)
450{ 476{