aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:15 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:15 -0500
commit59b5ec21446b9239d706ab237fb261d525b75e81 (patch)
treea437a354e84d311104829ca0e8b00b0ec8cb05c4
parentf67b45999205164958de4ec0658d51fa4bee066d (diff)
dmaengine: introduce dma_request_channel and private channels
This interface is primarily for device-to-memory clients which need to search for dma channels with platform-specific characteristics. The prototype is: struct dma_chan *dma_request_channel(dma_cap_mask_t mask, dma_filter_fn filter_fn, void *filter_param); When the optional 'filter_fn' parameter is set to NULL dma_request_channel simply returns the first channel that satisfies the capability mask. Otherwise, when the mask parameter is insufficient for specifying the necessary channel, the filter_fn routine can be used to disposition the available channels in the system. The filter_fn routine is called once for each free channel in the system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags that channel to be the return value from dma_request_channel. A channel allocated via this interface is exclusive to the caller, until dma_release_channel() is called. To ensure that all channels are not consumed by the general-purpose allocator the DMA_PRIVATE capability is provided to exclude a dma_device from general-purpose (memory-to-memory) consideration. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/dmaengine.c155
-rw-r--r--include/linux/dmaengine.h16
2 files changed, 155 insertions, 16 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 418eca28d472..7a0594f24a3f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -134,14 +134,14 @@ static struct class dma_devclass = {
134 134
135/* --- client and device registration --- */ 135/* --- client and device registration --- */
136 136
137#define dma_chan_satisfies_mask(chan, mask) \ 137#define dma_device_satisfies_mask(device, mask) \
138 __dma_chan_satisfies_mask((chan), &(mask)) 138 __dma_device_satisfies_mask((device), &(mask))
139static int 139static int
140__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) 140__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
141{ 141{
142 dma_cap_mask_t has; 142 dma_cap_mask_t has;
143 143
144 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, 144 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
145 DMA_TX_TYPE_END); 145 DMA_TX_TYPE_END);
146 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 146 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
147} 147}
@@ -195,7 +195,7 @@ static int dma_chan_get(struct dma_chan *chan)
195 err = desc_cnt; 195 err = desc_cnt;
196 chan->client_count = 0; 196 chan->client_count = 0;
197 module_put(owner); 197 module_put(owner);
198 } else 198 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
199 balance_ref_count(chan); 199 balance_ref_count(chan);
200 } 200 }
201 201
@@ -232,14 +232,16 @@ static void dma_client_chan_alloc(struct dma_client *client)
232 232
233 /* Find a channel */ 233 /* Find a channel */
234 list_for_each_entry(device, &dma_device_list, global_node) { 234 list_for_each_entry(device, &dma_device_list, global_node) {
235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
236 continue;
235 /* Does the client require a specific DMA controller? */ 237 /* Does the client require a specific DMA controller? */
236 if (client->slave && client->slave->dma_dev 238 if (client->slave && client->slave->dma_dev
237 && client->slave->dma_dev != device->dev) 239 && client->slave->dma_dev != device->dev)
238 continue; 240 continue;
241 if (!dma_device_satisfies_mask(device, client->cap_mask))
242 continue;
239 243
240 list_for_each_entry(chan, &device->channels, device_node) { 244 list_for_each_entry(chan, &device->channels, device_node) {
241 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
242 continue;
243 if (!chan->client_count) 245 if (!chan->client_count)
244 continue; 246 continue;
245 ack = client->event_callback(client, chan, 247 ack = client->event_callback(client, chan,
@@ -320,11 +322,12 @@ static int __init dma_channel_table_init(void)
320 322
321 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 323 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
322 324
323 /* 'interrupt' and 'slave' are channel capabilities, but are not 325 /* 'interrupt', 'private', and 'slave' are channel capabilities,
324 * associated with an operation so they do not need an entry in the 326 * but are not associated with an operation so they do not need
325 * channel_table 327 * an entry in the channel_table
326 */ 328 */
327 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 329 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
330 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
328 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 331 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
329 332
330 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 333 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
@@ -378,10 +381,13 @@ void dma_issue_pending_all(void)
378 "client called %s without a reference", __func__); 381 "client called %s without a reference", __func__);
379 382
380 rcu_read_lock(); 383 rcu_read_lock();
381 list_for_each_entry_rcu(device, &dma_device_list, global_node) 384 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
385 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
386 continue;
382 list_for_each_entry(chan, &device->channels, device_node) 387 list_for_each_entry(chan, &device->channels, device_node)
383 if (chan->client_count) 388 if (chan->client_count)
384 device->device_issue_pending(chan); 389 device->device_issue_pending(chan);
390 }
385 rcu_read_unlock(); 391 rcu_read_unlock();
386} 392}
387EXPORT_SYMBOL(dma_issue_pending_all); 393EXPORT_SYMBOL(dma_issue_pending_all);
@@ -403,7 +409,8 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
403 struct dma_chan *min = NULL; 409 struct dma_chan *min = NULL;
404 410
405 list_for_each_entry(device, &dma_device_list, global_node) { 411 list_for_each_entry(device, &dma_device_list, global_node) {
406 if (!dma_has_cap(cap, device->cap_mask)) 412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
407 continue; 414 continue;
408 list_for_each_entry(chan, &device->channels, device_node) { 415 list_for_each_entry(chan, &device->channels, device_node) {
409 if (!chan->client_count) 416 if (!chan->client_count)
@@ -452,9 +459,12 @@ static void dma_channel_rebalance(void)
452 for_each_possible_cpu(cpu) 459 for_each_possible_cpu(cpu)
453 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 460 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
454 461
455 list_for_each_entry(device, &dma_device_list, global_node) 462 list_for_each_entry(device, &dma_device_list, global_node) {
463 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
464 continue;
456 list_for_each_entry(chan, &device->channels, device_node) 465 list_for_each_entry(chan, &device->channels, device_node)
457 chan->table_count = 0; 466 chan->table_count = 0;
467 }
458 468
459 /* don't populate the channel_table if no clients are available */ 469 /* don't populate the channel_table if no clients are available */
460 if (!dmaengine_ref_count) 470 if (!dmaengine_ref_count)
@@ -473,6 +483,111 @@ static void dma_channel_rebalance(void)
473 } 483 }
474} 484}
475 485
486static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev)
487{
488 struct dma_chan *chan;
489 struct dma_chan *ret = NULL;
490
491 if (!__dma_device_satisfies_mask(dev, mask)) {
492 pr_debug("%s: wrong capabilities\n", __func__);
493 return NULL;
494 }
495 /* devices with multiple channels need special handling as we need to
496 * ensure that all channels are either private or public.
497 */
498 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
499 list_for_each_entry(chan, &dev->channels, device_node) {
500 /* some channels are already publicly allocated */
501 if (chan->client_count)
502 return NULL;
503 }
504
505 list_for_each_entry(chan, &dev->channels, device_node) {
506 if (chan->client_count) {
507 pr_debug("%s: %s busy\n",
508 __func__, dev_name(&chan->dev));
509 continue;
510 }
511 ret = chan;
512 break;
513 }
514
515 return ret;
516}
517
518/**
519 * dma_request_channel - try to allocate an exclusive channel
520 * @mask: capabilities that the channel must satisfy
521 * @fn: optional callback to disposition available channels
522 * @fn_param: opaque parameter to pass to dma_filter_fn
523 */
524struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
525{
526 struct dma_device *device, *_d;
527 struct dma_chan *chan = NULL;
528 enum dma_state_client ack;
529 int err;
530
531 /* Find a channel */
532 mutex_lock(&dma_list_mutex);
533 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
534 chan = private_candidate(mask, device);
535 if (!chan)
536 continue;
537
538 if (fn)
539 ack = fn(chan, fn_param);
540 else
541 ack = DMA_ACK;
542
543 if (ack == DMA_ACK) {
544 /* Found a suitable channel, try to grab, prep, and
545 * return it. We first set DMA_PRIVATE to disable
546 * balance_ref_count as this channel will not be
547 * published in the general-purpose allocator
548 */
549 dma_cap_set(DMA_PRIVATE, device->cap_mask);
550 err = dma_chan_get(chan);
551
552 if (err == -ENODEV) {
553 pr_debug("%s: %s module removed\n", __func__,
554 dev_name(&chan->dev));
555 list_del_rcu(&device->global_node);
556 } else if (err)
557 pr_err("dmaengine: failed to get %s: (%d)\n",
558 dev_name(&chan->dev), err);
559 else
560 break;
561 } else if (ack == DMA_DUP) {
562 pr_debug("%s: %s filter said DMA_DUP\n",
563 __func__, dev_name(&chan->dev));
564 } else if (ack == DMA_NAK) {
565 pr_debug("%s: %s filter said DMA_NAK\n",
566 __func__, dev_name(&chan->dev));
567 break;
568 } else
569 WARN_ONCE(1, "filter_fn: unknown response?\n");
570 chan = NULL;
571 }
572 mutex_unlock(&dma_list_mutex);
573
574 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
575 chan ? dev_name(&chan->dev) : NULL);
576
577 return chan;
578}
579EXPORT_SYMBOL_GPL(__dma_request_channel);
580
581void dma_release_channel(struct dma_chan *chan)
582{
583 mutex_lock(&dma_list_mutex);
584 WARN_ONCE(chan->client_count != 1,
585 "chan reference count %d != 1\n", chan->client_count);
586 dma_chan_put(chan);
587 mutex_unlock(&dma_list_mutex);
588}
589EXPORT_SYMBOL_GPL(dma_release_channel);
590
476/** 591/**
477 * dma_chans_notify_available - broadcast available channels to the clients 592 * dma_chans_notify_available - broadcast available channels to the clients
478 */ 593 */
@@ -506,7 +621,9 @@ void dma_async_client_register(struct dma_client *client)
506 dmaengine_ref_count++; 621 dmaengine_ref_count++;
507 622
508 /* try to grab channels */ 623 /* try to grab channels */
509 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) 624 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
625 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
626 continue;
510 list_for_each_entry(chan, &device->channels, device_node) { 627 list_for_each_entry(chan, &device->channels, device_node) {
511 err = dma_chan_get(chan); 628 err = dma_chan_get(chan);
512 if (err == -ENODEV) { 629 if (err == -ENODEV) {
@@ -517,6 +634,7 @@ void dma_async_client_register(struct dma_client *client)
517 pr_err("dmaengine: failed to get %s: (%d)\n", 634 pr_err("dmaengine: failed to get %s: (%d)\n",
518 dev_name(&chan->dev), err); 635 dev_name(&chan->dev), err);
519 } 636 }
637 }
520 638
521 /* if this is the first reference and there were channels 639 /* if this is the first reference and there were channels
522 * waiting we need to rebalance to get those channels 640 * waiting we need to rebalance to get those channels
@@ -547,9 +665,12 @@ void dma_async_client_unregister(struct dma_client *client)
547 dmaengine_ref_count--; 665 dmaengine_ref_count--;
548 BUG_ON(dmaengine_ref_count < 0); 666 BUG_ON(dmaengine_ref_count < 0);
549 /* drop channel references */ 667 /* drop channel references */
550 list_for_each_entry(device, &dma_device_list, global_node) 668 list_for_each_entry(device, &dma_device_list, global_node) {
669 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
670 continue;
551 list_for_each_entry(chan, &device->channels, device_node) 671 list_for_each_entry(chan, &device->channels, device_node)
552 dma_chan_put(chan); 672 dma_chan_put(chan);
673 }
553 674
554 list_del(&client->global_node); 675 list_del(&client->global_node);
555 mutex_unlock(&dma_list_mutex); 676 mutex_unlock(&dma_list_mutex);
@@ -639,9 +760,11 @@ int dma_async_device_register(struct dma_device *device)
639 chan->slow_ref = 0; 760 chan->slow_ref = 0;
640 INIT_RCU_HEAD(&chan->rcu); 761 INIT_RCU_HEAD(&chan->rcu);
641 } 762 }
763 device->chancnt = chancnt;
642 764
643 mutex_lock(&dma_list_mutex); 765 mutex_lock(&dma_list_mutex);
644 if (dmaengine_ref_count) 766 /* take references on public channels */
767 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
645 list_for_each_entry(chan, &device->channels, device_node) { 768 list_for_each_entry(chan, &device->channels, device_node) {
646 /* if clients are already waiting for channels we need 769 /* if clients are already waiting for channels we need
647 * to take references on their behalf 770 * to take references on their behalf
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 57a43adfc39e..fe40bc020af6 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -89,6 +89,7 @@ enum dma_transaction_type {
89 DMA_MEMSET, 89 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C, 90 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT, 91 DMA_INTERRUPT,
92 DMA_PRIVATE,
92 DMA_SLAVE, 93 DMA_SLAVE,
93}; 94};
94 95
@@ -224,6 +225,18 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
224 struct dma_chan *chan, enum dma_state state); 225 struct dma_chan *chan, enum dma_state state);
225 226
226/** 227/**
228 * typedef dma_filter_fn - callback filter for dma_request_channel
229 * @chan: channel to be reviewed
230 * @filter_param: opaque parameter passed through dma_request_channel
231 *
232 * When this optional parameter is specified in a call to dma_request_channel a
233 * suitable channel is passed to this routine for further dispositioning before
234 * being returned. Where 'suitable' indicates a non-busy channel that
235 * satisfies the given capability mask.
236 */
237typedef enum dma_state_client (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
238
239/**
227 * struct dma_client - info on the entity making use of DMA services 240 * struct dma_client - info on the entity making use of DMA services
228 * @event_callback: func ptr to call when something happens 241 * @event_callback: func ptr to call when something happens
229 * @cap_mask: only return channels that satisfy the requested capabilities 242 * @cap_mask: only return channels that satisfy the requested capabilities
@@ -472,6 +485,9 @@ void dma_async_device_unregister(struct dma_device *device);
472void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 485void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
473struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 486struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
474void dma_issue_pending_all(void); 487void dma_issue_pending_all(void);
488#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
489struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
490void dma_release_channel(struct dma_chan *chan);
475 491
476/* --- Helper iov-locking functions --- */ 492/* --- Helper iov-locking functions --- */
477 493