aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 0cb259c59916..8c9f45fd55fc 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -289,7 +289,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
289 do { 289 do {
290 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 290 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
291 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 291 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
292 pr_err("%s: timeout!\n", __func__); 292 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
293 return DMA_ERROR; 293 return DMA_ERROR;
294 } 294 }
295 if (status != DMA_IN_PROGRESS) 295 if (status != DMA_IN_PROGRESS)
@@ -482,7 +482,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
482 device = chan->device; 482 device = chan->device;
483 483
484 /* check if the channel supports slave transactions */ 484 /* check if the channel supports slave transactions */
485 if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) 485 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
486 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
486 return -ENXIO; 487 return -ENXIO;
487 488
488 /* 489 /*
@@ -518,7 +519,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
518 struct dma_chan *chan; 519 struct dma_chan *chan;
519 520
520 if (mask && !__dma_device_satisfies_mask(dev, mask)) { 521 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
521 pr_debug("%s: wrong capabilities\n", __func__); 522 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
522 return NULL; 523 return NULL;
523 } 524 }
524 /* devices with multiple channels need special handling as we need to 525 /* devices with multiple channels need special handling as we need to
@@ -533,12 +534,12 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
533 534
534 list_for_each_entry(chan, &dev->channels, device_node) { 535 list_for_each_entry(chan, &dev->channels, device_node) {
535 if (chan->client_count) { 536 if (chan->client_count) {
536 pr_debug("%s: %s busy\n", 537 dev_dbg(dev->dev, "%s: %s busy\n",
537 __func__, dma_chan_name(chan)); 538 __func__, dma_chan_name(chan));
538 continue; 539 continue;
539 } 540 }
540 if (fn && !fn(chan, fn_param)) { 541 if (fn && !fn(chan, fn_param)) {
541 pr_debug("%s: %s filter said false\n", 542 dev_dbg(dev->dev, "%s: %s filter said false\n",
542 __func__, dma_chan_name(chan)); 543 __func__, dma_chan_name(chan));
543 continue; 544 continue;
544 } 545 }
@@ -567,11 +568,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
567 568
568 if (err) { 569 if (err) {
569 if (err == -ENODEV) { 570 if (err == -ENODEV) {
570 pr_debug("%s: %s module removed\n", __func__, 571 dev_dbg(device->dev, "%s: %s module removed\n",
571 dma_chan_name(chan)); 572 __func__, dma_chan_name(chan));
572 list_del_rcu(&device->global_node); 573 list_del_rcu(&device->global_node);
573 } else 574 } else
574 pr_debug("%s: failed to get %s: (%d)\n", 575 dev_dbg(device->dev,
576 "%s: failed to get %s: (%d)\n",
575 __func__, dma_chan_name(chan), err); 577 __func__, dma_chan_name(chan), err);
576 578
577 if (--device->privatecnt == 0) 579 if (--device->privatecnt == 0)
@@ -602,7 +604,8 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
602 device->privatecnt++; 604 device->privatecnt++;
603 err = dma_chan_get(chan); 605 err = dma_chan_get(chan);
604 if (err) { 606 if (err) {
605 pr_debug("%s: failed to get %s: (%d)\n", 607 dev_dbg(chan->device->dev,
608 "%s: failed to get %s: (%d)\n",
606 __func__, dma_chan_name(chan), err); 609 __func__, dma_chan_name(chan), err);
607 chan = NULL; 610 chan = NULL;
608 if (--device->privatecnt == 0) 611 if (--device->privatecnt == 0)
@@ -814,8 +817,9 @@ void dmaengine_get(void)
814 list_del_rcu(&device->global_node); 817 list_del_rcu(&device->global_node);
815 break; 818 break;
816 } else if (err) 819 } else if (err)
817 pr_debug("%s: failed to get %s: (%d)\n", 820 dev_dbg(chan->device->dev,
818 __func__, dma_chan_name(chan), err); 821 "%s: failed to get %s: (%d)\n",
822 __func__, dma_chan_name(chan), err);
819 } 823 }
820 } 824 }
821 825
@@ -862,12 +866,12 @@ static bool device_has_all_tx_types(struct dma_device *device)
862 return false; 866 return false;
863 #endif 867 #endif
864 868
865 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 869 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
866 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 870 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
867 return false; 871 return false;
868 #endif 872 #endif
869 873
870 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 874 #if IS_ENABLED(CONFIG_ASYNC_XOR)
871 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 875 if (!dma_has_cap(DMA_XOR, device->cap_mask))
872 return false; 876 return false;
873 877
@@ -877,7 +881,7 @@ static bool device_has_all_tx_types(struct dma_device *device)
877 #endif 881 #endif
878 #endif 882 #endif
879 883
880 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 884 #if IS_ENABLED(CONFIG_ASYNC_PQ)
881 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 885 if (!dma_has_cap(DMA_PQ, device->cap_mask))
882 return false; 886 return false;
883 887
@@ -1222,8 +1226,9 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1222 1226
1223 while (tx->cookie == -EBUSY) { 1227 while (tx->cookie == -EBUSY) {
1224 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1228 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1225 pr_err("%s timeout waiting for descriptor submission\n", 1229 dev_err(tx->chan->device->dev,
1226 __func__); 1230 "%s timeout waiting for descriptor submission\n",
1231 __func__);
1227 return DMA_ERROR; 1232 return DMA_ERROR;
1228 } 1233 }
1229 cpu_relax(); 1234 cpu_relax();