aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:21 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:21 -0500
commite2346677af86150c6083974585c131e8a2c3ddcc (patch)
tree2205b224c4e961ad59dc684fb4db835aaf9515a0 /drivers/dma/dmaengine.c
parent864498aaa9fef69ee166da023d12413a7776342d (diff)
dmaengine: advertise all channels on a device to dma_filter_fn
Allow dma_filter_fn routines to disambiguate multiple channels on a device rather than assuming that all channels on a device are equal. Cc: Maciej Sosnowski <maciej.sosnowski@intel.com> Reported-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c33
1 files changed, 13 insertions, 20 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index dd43410c1019..9d3594cf17e0 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -454,10 +454,10 @@ static void dma_channel_rebalance(void)
454 } 454 }
455} 455}
456 456
457static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev) 457static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
458 dma_filter_fn fn, void *fn_param)
458{ 459{
459 struct dma_chan *chan; 460 struct dma_chan *chan;
460 struct dma_chan *ret = NULL;
461 461
462 if (!__dma_device_satisfies_mask(dev, mask)) { 462 if (!__dma_device_satisfies_mask(dev, mask)) {
463 pr_debug("%s: wrong capabilities\n", __func__); 463 pr_debug("%s: wrong capabilities\n", __func__);
@@ -479,11 +479,15 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic
479 __func__, dma_chan_name(chan)); 479 __func__, dma_chan_name(chan));
480 continue; 480 continue;
481 } 481 }
482 ret = chan; 482 if (fn && !fn(chan, fn_param)) {
483 break; 483 pr_debug("%s: %s filter said false\n",
484 __func__, dma_chan_name(chan));
485 continue;
486 }
487 return chan;
484 } 488 }
485 489
486 return ret; 490 return NULL;
487} 491}
488 492
489/** 493/**
@@ -496,22 +500,13 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
496{ 500{
497 struct dma_device *device, *_d; 501 struct dma_device *device, *_d;
498 struct dma_chan *chan = NULL; 502 struct dma_chan *chan = NULL;
499 bool ack;
500 int err; 503 int err;
501 504
502 /* Find a channel */ 505 /* Find a channel */
503 mutex_lock(&dma_list_mutex); 506 mutex_lock(&dma_list_mutex);
504 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 507 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
505 chan = private_candidate(mask, device); 508 chan = private_candidate(mask, device, fn, fn_param);
506 if (!chan) 509 if (chan) {
507 continue;
508
509 if (fn)
510 ack = fn(chan, fn_param);
511 else
512 ack = true;
513
514 if (ack) {
515 /* Found a suitable channel, try to grab, prep, and 510 /* Found a suitable channel, try to grab, prep, and
516 * return it. We first set DMA_PRIVATE to disable 511 * return it. We first set DMA_PRIVATE to disable
517 * balance_ref_count as this channel will not be 512 * balance_ref_count as this channel will not be
@@ -529,10 +524,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
529 dma_chan_name(chan), err); 524 dma_chan_name(chan), err);
530 else 525 else
531 break; 526 break;
532 } else 527 chan = NULL;
533 pr_debug("%s: %s filter said false\n", 528 }
534 __func__, dma_chan_name(chan));
535 chan = NULL;
536 } 529 }
537 mutex_unlock(&dma_list_mutex); 530 mutex_unlock(&dma_list_mutex);
538 531