aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-10-18 13:35:23 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-13 19:25:06 -0500
commitd38a8c622a1b382336c3e152c6caf4e11d1f1b2a (patch)
treeef6adeb9eed556a62a153a296234945e2301a5df
parent56ea27fd61f546117a35236113be72c8aaec382d (diff)
dmaengine: prepare for generic 'unmap' data
Add a hook for a common dma unmap implementation to enable removal of the per driver custom unmap code. (A reworked version of Bartlomiej Zolnierkiewicz's patches to remove the custom callbacks and the size increase of dma_async_tx_descriptor for drivers that don't care about raid). Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> [bzolnier: prepare pl330 driver for adding missing unmap while at it] Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/amba-pl08x.c1
-rw-r--r--drivers/dma/at_hdmac.c1
-rw-r--r--drivers/dma/dw/core.c1
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/ioat/dma.c1
-rw-r--r--drivers/dma/ioat/dma_v2.c1
-rw-r--r--drivers/dma/ioat/dma_v3.c1
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/mv_xor.c1
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c1
-rw-r--r--drivers/dma/timb_dma.c1
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--include/linux/dmaengine.h26
15 files changed, 41 insertions, 0 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fce46c5bf1c7..7f9846464b77 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1197,6 +1197,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
1197 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1197 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1198 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1198 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
1199 1199
1200 dma_descriptor_unmap(txd);
1200 if (!plchan->slave) 1201 if (!plchan->slave)
1201 pl08x_unmap_buffers(txd); 1202 pl08x_unmap_buffers(txd);
1202 1203
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c787f38a186a..cc7098ddf9d4 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -345,6 +345,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
345 list_move(&desc->desc_node, &atchan->free_list); 345 list_move(&desc->desc_node, &atchan->free_list);
346 346
347 /* unmap dma addresses (not on slave channels) */ 347 /* unmap dma addresses (not on slave channels) */
348 dma_descriptor_unmap(txd);
348 if (!atchan->chan_common.private) { 349 if (!atchan->chan_common.private) {
349 struct device *parent = chan2parent(&atchan->chan_common); 350 struct device *parent = chan2parent(&atchan->chan_common);
350 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 351 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 89eb89f22284..e3fe1b1a73b1 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -311,6 +311,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
311 list_splice_init(&desc->tx_list, &dwc->free_list); 311 list_splice_init(&desc->tx_list, &dwc->free_list);
312 list_move(&desc->desc_node, &dwc->free_list); 312 list_move(&desc->desc_node, &dwc->free_list);
313 313
314 dma_descriptor_unmap(txd);
314 if (!is_slave_direction(dwc->direction)) { 315 if (!is_slave_direction(dwc->direction)) {
315 struct device *parent = chan2parent(&dwc->chan); 316 struct device *parent = chan2parent(&dwc->chan);
316 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 317 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 591cd8c63abb..dcd6bf5d3091 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -791,6 +791,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
791 * For the memcpy channels the API requires us to unmap the 791 * For the memcpy channels the API requires us to unmap the
792 * buffers unless requested otherwise. 792 * buffers unless requested otherwise.
793 */ 793 */
794 dma_descriptor_unmap(&desc->txd);
794 if (!edmac->chan.private) 795 if (!edmac->chan.private)
795 ep93xx_dma_unmap_buffers(desc); 796 ep93xx_dma_unmap_buffers(desc);
796 797
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b3f3e90054f2..66c4052a1f34 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -868,6 +868,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
868 /* Run any dependencies */ 868 /* Run any dependencies */
869 dma_run_dependencies(txd); 869 dma_run_dependencies(txd);
870 870
871 dma_descriptor_unmap(txd);
871 /* Unmap the dst buffer, if requested */ 872 /* Unmap the dst buffer, if requested */
872 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 873 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
873 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 874 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 5ff6fc1819dc..26f8cfd6bc3f 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -602,6 +602,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
602 dump_desc_dbg(ioat, desc); 602 dump_desc_dbg(ioat, desc);
603 if (tx->cookie) { 603 if (tx->cookie) {
604 dma_cookie_complete(tx); 604 dma_cookie_complete(tx);
605 dma_descriptor_unmap(tx);
605 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 606 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
606 ioat->active -= desc->hw->tx_cnt; 607 ioat->active -= desc->hw->tx_cnt;
607 if (tx->callback) { 608 if (tx->callback) {
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b925e1b1d139..fc7b50a813cc 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -148,6 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
148 tx = &desc->txd; 148 tx = &desc->txd;
149 dump_desc_dbg(ioat, desc); 149 dump_desc_dbg(ioat, desc);
150 if (tx->cookie) { 150 if (tx->cookie) {
151 dma_descriptor_unmap(tx);
151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 152 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
152 dma_cookie_complete(tx); 153 dma_cookie_complete(tx);
153 if (tx->callback) { 154 if (tx->callback) {
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d8ececaf1b57..57a2901b917a 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -577,6 +577,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
577 tx = &desc->txd; 577 tx = &desc->txd;
578 if (tx->cookie) { 578 if (tx->cookie) {
579 dma_cookie_complete(tx); 579 dma_cookie_complete(tx);
580 dma_descriptor_unmap(tx);
580 ioat3_dma_unmap(ioat, desc, idx + i); 581 ioat3_dma_unmap(ioat, desc, idx + i);
581 if (tx->callback) { 582 if (tx->callback) {
582 tx->callback(tx->callback_param); 583 tx->callback(tx->callback_param);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index dd8b44a56e5d..8f6e426590eb 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -152,6 +152,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
152 if (tx->callback) 152 if (tx->callback)
153 tx->callback(tx->callback_param); 153 tx->callback(tx->callback_param);
154 154
155 dma_descriptor_unmap(tx);
155 /* unmap dma addresses 156 /* unmap dma addresses
156 * (unmap_single vs unmap_page?) 157 * (unmap_single vs unmap_page?)
157 */ 158 */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 536dcb8ba5fd..ed1ab1d0875e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -278,6 +278,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
278 desc->async_tx.callback( 278 desc->async_tx.callback(
279 desc->async_tx.callback_param); 279 desc->async_tx.callback_param);
280 280
281 dma_descriptor_unmap(&desc->async_tx);
281 /* unmap dma addresses 282 /* unmap dma addresses
282 * (unmap_single vs unmap_page?) 283 * (unmap_single vs unmap_page?)
283 */ 284 */
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a562d24d20bf..ab25e52cd43b 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data)
2268 list_move_tail(&desc->node, &pch->dmac->desc_pool); 2268 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2269 } 2269 }
2270 2270
2271 dma_descriptor_unmap(&desc->txd);
2272
2271 if (callback) { 2273 if (callback) {
2272 spin_unlock_irqrestore(&pch->lock, flags); 2274 spin_unlock_irqrestore(&pch->lock, flags);
2273 callback(callback_param); 2275 callback(callback_param);
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 370ff8265630..442492da7415 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1765,6 +1765,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1765 desc->async_tx.callback( 1765 desc->async_tx.callback(
1766 desc->async_tx.callback_param); 1766 desc->async_tx.callback_param);
1767 1767
1768 dma_descriptor_unmap(&desc->async_tx);
1768 /* unmap dma addresses 1769 /* unmap dma addresses
1769 * (unmap_single vs unmap_page?) 1770 * (unmap_single vs unmap_page?)
1770 * 1771 *
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 28af214fce04..1d0c98839087 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -293,6 +293,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
293 293
294 list_move(&td_desc->desc_node, &td_chan->free_list); 294 list_move(&td_desc->desc_node, &td_chan->free_list);
295 295
296 dma_descriptor_unmap(txd);
296 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) 297 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
297 __td_unmap_descs(td_desc, 298 __td_unmap_descs(td_desc,
298 txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); 299 txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 71e8e775189e..22a0b6c78c77 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -419,6 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
419 list_splice_init(&desc->tx_list, &dc->free_list); 419 list_splice_init(&desc->tx_list, &dc->free_list);
420 list_move(&desc->desc_node, &dc->free_list); 420 list_move(&desc->desc_node, &dc->free_list);
421 421
422 dma_descriptor_unmap(txd);
422 if (!ds) { 423 if (!ds) {
423 dma_addr_t dmaaddr; 424 dma_addr_t dmaaddr;
424 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 425 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..9070050fbcd8 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -413,6 +413,17 @@ void dma_chan_cleanup(struct kref *kref);
413typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 413typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
414 414
415typedef void (*dma_async_tx_callback)(void *dma_async_param); 415typedef void (*dma_async_tx_callback)(void *dma_async_param);
416
417struct dmaengine_unmap_data {
418 u8 to_cnt;
419 u8 from_cnt;
420 u8 bidi_cnt;
421 struct device *dev;
422 struct kref kref;
423 size_t len;
424 dma_addr_t addr[0];
425};
426
416/** 427/**
417 * struct dma_async_tx_descriptor - async transaction descriptor 428 * struct dma_async_tx_descriptor - async transaction descriptor
418 * ---dma generic offload fields--- 429 * ---dma generic offload fields---
@@ -438,6 +449,7 @@ struct dma_async_tx_descriptor {
438 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 449 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
439 dma_async_tx_callback callback; 450 dma_async_tx_callback callback;
440 void *callback_param; 451 void *callback_param;
452 struct dmaengine_unmap_data *unmap;
441#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 453#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
442 struct dma_async_tx_descriptor *next; 454 struct dma_async_tx_descriptor *next;
443 struct dma_async_tx_descriptor *parent; 455 struct dma_async_tx_descriptor *parent;
@@ -445,6 +457,20 @@ struct dma_async_tx_descriptor {
445#endif 457#endif
446}; 458};
447 459
460static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
461 struct dmaengine_unmap_data *unmap)
462{
463 kref_get(&unmap->kref);
464 tx->unmap = unmap;
465}
466
467static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
468{
469 if (tx->unmap) {
470 tx->unmap = NULL;
471 }
472}
473
448#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 474#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
449static inline void txd_lock(struct dma_async_tx_descriptor *txd) 475static inline void txd_lock(struct dma_async_tx_descriptor *txd)
450{ 476{