aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2015-12-14 15:47:39 -0500
committerVinod Koul <vinod.koul@intel.com>2015-12-18 00:47:26 -0500
commit7bd903c5ca47fde5ad52370a47776491813c772e (patch)
treeb0d2d1a9be3a6504a6762938ba10839e02fb2b2b
parent26b64256e0c4573f3668ac8329a1266ebb9d6120 (diff)
dmaengine: core: Move and merge the code paths using private_candidate
Channel matching with private_candidate() is used in two paths, the error checking is slightly different in them and they are duplicating code also. Move the code under find_candidate() to provide consistent execution and going to allow us to reuse this mode of channel lookup later. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/dmaengine.c81
1 files changed, 42 insertions, 39 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index f2cbff95b56e..81a36fc445a7 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -542,6 +542,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
542 return NULL; 542 return NULL;
543} 543}
544 544
545static struct dma_chan *find_candidate(struct dma_device *device,
546 const dma_cap_mask_t *mask,
547 dma_filter_fn fn, void *fn_param)
548{
549 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
550 int err;
551
552 if (chan) {
553 /* Found a suitable channel, try to grab, prep, and return it.
554 * We first set DMA_PRIVATE to disable balance_ref_count as this
555 * channel will not be published in the general-purpose
556 * allocator
557 */
558 dma_cap_set(DMA_PRIVATE, device->cap_mask);
559 device->privatecnt++;
560 err = dma_chan_get(chan);
561
562 if (err) {
563 if (err == -ENODEV) {
564 pr_debug("%s: %s module removed\n", __func__,
565 dma_chan_name(chan));
566 list_del_rcu(&device->global_node);
567 } else
568 pr_debug("%s: failed to get %s: (%d)\n",
569 __func__, dma_chan_name(chan), err);
570
571 if (--device->privatecnt == 0)
572 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
573
574 chan = ERR_PTR(err);
575 }
576 }
577
578 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
579}
580
545/** 581/**
546 * dma_get_slave_channel - try to get specific channel exclusively 582 * dma_get_slave_channel - try to get specific channel exclusively
547 * @chan: target channel 583 * @chan: target channel
@@ -580,7 +616,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
580{ 616{
581 dma_cap_mask_t mask; 617 dma_cap_mask_t mask;
582 struct dma_chan *chan; 618 struct dma_chan *chan;
583 int err;
584 619
585 dma_cap_zero(mask); 620 dma_cap_zero(mask);
586 dma_cap_set(DMA_SLAVE, mask); 621 dma_cap_set(DMA_SLAVE, mask);
@@ -588,23 +623,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
588 /* lock against __dma_request_channel */ 623 /* lock against __dma_request_channel */
589 mutex_lock(&dma_list_mutex); 624 mutex_lock(&dma_list_mutex);
590 625
591 chan = private_candidate(&mask, device, NULL, NULL); 626 chan = find_candidate(device, &mask, NULL, NULL);
592 if (chan) {
593 dma_cap_set(DMA_PRIVATE, device->cap_mask);
594 device->privatecnt++;
595 err = dma_chan_get(chan);
596 if (err) {
597 pr_debug("%s: failed to get %s: (%d)\n",
598 __func__, dma_chan_name(chan), err);
599 chan = NULL;
600 if (--device->privatecnt == 0)
601 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
602 }
603 }
604 627
605 mutex_unlock(&dma_list_mutex); 628 mutex_unlock(&dma_list_mutex);
606 629
607 return chan; 630 return IS_ERR(chan) ? NULL : chan;
608} 631}
609EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); 632EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
610 633
@@ -621,35 +644,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
621{ 644{
622 struct dma_device *device, *_d; 645 struct dma_device *device, *_d;
623 struct dma_chan *chan = NULL; 646 struct dma_chan *chan = NULL;
624 int err;
625 647
626 /* Find a channel */ 648 /* Find a channel */
627 mutex_lock(&dma_list_mutex); 649 mutex_lock(&dma_list_mutex);
628 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 650 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
629 chan = private_candidate(mask, device, fn, fn_param); 651 chan = find_candidate(device, mask, fn, fn_param);
630 if (chan) { 652 if (!IS_ERR(chan))
631 /* Found a suitable channel, try to grab, prep, and 653 break;
632 * return it. We first set DMA_PRIVATE to disable
633 * balance_ref_count as this channel will not be
634 * published in the general-purpose allocator
635 */
636 dma_cap_set(DMA_PRIVATE, device->cap_mask);
637 device->privatecnt++;
638 err = dma_chan_get(chan);
639 654
640 if (err == -ENODEV) { 655 chan = NULL;
641 pr_debug("%s: %s module removed\n",
642 __func__, dma_chan_name(chan));
643 list_del_rcu(&device->global_node);
644 } else if (err)
645 pr_debug("%s: failed to get %s: (%d)\n",
646 __func__, dma_chan_name(chan), err);
647 else
648 break;
649 if (--device->privatecnt == 0)
650 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
651 chan = NULL;
652 }
653 } 656 }
654 mutex_unlock(&dma_list_mutex); 657 mutex_unlock(&dma_list_mutex);
655 658