diff options
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 10fcabad80f3..12fa48e380cf 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | |||
330 | bool head_acked = false; | 330 | bool head_acked = false; |
331 | dma_cookie_t cookie = 0; | 331 | dma_cookie_t cookie = 0; |
332 | dma_async_tx_callback callback = NULL; | 332 | dma_async_tx_callback callback = NULL; |
333 | void *param = NULL; | 333 | struct dmaengine_desc_callback cb; |
334 | unsigned long flags; | 334 | unsigned long flags; |
335 | LIST_HEAD(cyclic_list); | 335 | LIST_HEAD(cyclic_list); |
336 | 336 | ||
337 | memset(&cb, 0, sizeof(cb)); | ||
337 | spin_lock_irqsave(&schan->chan_lock, flags); | 338 | spin_lock_irqsave(&schan->chan_lock, flags); |
338 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { | 339 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { |
339 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 340 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
@@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | |||
367 | /* Call callback on the last chunk */ | 368 | /* Call callback on the last chunk */ |
368 | if (desc->mark == DESC_COMPLETED && tx->callback) { | 369 | if (desc->mark == DESC_COMPLETED && tx->callback) { |
369 | desc->mark = DESC_WAITING; | 370 | desc->mark = DESC_WAITING; |
371 | dmaengine_desc_get_callback(tx, &cb); | ||
370 | callback = tx->callback; | 372 | callback = tx->callback; |
371 | param = tx->callback_param; | ||
372 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", | 373 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", |
373 | tx->cookie, tx, schan->id); | 374 | tx->cookie, tx, schan->id); |
374 | BUG_ON(desc->chunks != 1); | 375 | BUG_ON(desc->chunks != 1); |
@@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | |||
430 | 431 | ||
431 | spin_unlock_irqrestore(&schan->chan_lock, flags); | 432 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
432 | 433 | ||
433 | if (callback) | 434 | dmaengine_desc_callback_invoke(&cb, NULL); |
434 | callback(param); | ||
435 | 435 | ||
436 | return callback; | 436 | return callback; |
437 | } | 437 | } |
@@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev) | |||
885 | /* Complete all */ | 885 | /* Complete all */ |
886 | list_for_each_entry(sdesc, &dl, node) { | 886 | list_for_each_entry(sdesc, &dl, node) { |
887 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; | 887 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; |
888 | |||
888 | sdesc->mark = DESC_IDLE; | 889 | sdesc->mark = DESC_IDLE; |
889 | if (tx->callback) | 890 | dmaengine_desc_get_callback_invoke(tx, NULL); |
890 | tx->callback(tx->callback_param); | ||
891 | } | 891 | } |
892 | 892 | ||
893 | spin_lock(&schan->chan_lock); | 893 | spin_lock(&schan->chan_lock); |