diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:17 -0500 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:17 -0500 |
| commit | 209b84a88fe81341b4d8d465acc4a67cb7c3feb3 (patch) | |
| tree | 134632ed8c914f0ee497e7a22bc616d84e068119 /drivers | |
| parent | 74465b4ff9ac1da503025c0a0042e023bfa6505c (diff) | |
dmaengine: replace dma_async_client_register with dmaengine_get
Now that clients no longer need to be notified of channel arrival
dma_async_client_register can simply increment the dmaengine_ref_count.
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/dma/dmaengine.c | 22 |
1 files changed, 6 insertions, 16 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 90aca505a1df..3f1849b7f5ef 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -600,10 +600,9 @@ static void dma_clients_notify_available(void) | |||
| 600 | } | 600 | } |
| 601 | 601 | ||
| 602 | /** | 602 | /** |
| 603 | * dma_async_client_register - register a &dma_client | 603 | * dmaengine_get - register interest in dma_channels |
| 604 | * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' | ||
| 605 | */ | 604 | */ |
| 606 | void dma_async_client_register(struct dma_client *client) | 605 | void dmaengine_get(void) |
| 607 | { | 606 | { |
| 608 | struct dma_device *device, *_d; | 607 | struct dma_device *device, *_d; |
| 609 | struct dma_chan *chan; | 608 | struct dma_chan *chan; |
| @@ -634,25 +633,18 @@ void dma_async_client_register(struct dma_client *client) | |||
| 634 | */ | 633 | */ |
| 635 | if (dmaengine_ref_count == 1) | 634 | if (dmaengine_ref_count == 1) |
| 636 | dma_channel_rebalance(); | 635 | dma_channel_rebalance(); |
| 637 | list_add_tail(&client->global_node, &dma_client_list); | ||
| 638 | mutex_unlock(&dma_list_mutex); | 636 | mutex_unlock(&dma_list_mutex); |
| 639 | } | 637 | } |
| 640 | EXPORT_SYMBOL(dma_async_client_register); | 638 | EXPORT_SYMBOL(dmaengine_get); |
| 641 | 639 | ||
| 642 | /** | 640 | /** |
| 643 | * dma_async_client_unregister - unregister a client and free the &dma_client | 641 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
| 644 | * @client: &dma_client to free | ||
| 645 | * | ||
| 646 | * Force frees any allocated DMA channels, frees the &dma_client memory | ||
| 647 | */ | 642 | */ |
| 648 | void dma_async_client_unregister(struct dma_client *client) | 643 | void dmaengine_put(void) |
| 649 | { | 644 | { |
| 650 | struct dma_device *device; | 645 | struct dma_device *device; |
| 651 | struct dma_chan *chan; | 646 | struct dma_chan *chan; |
| 652 | 647 | ||
| 653 | if (!client) | ||
| 654 | return; | ||
| 655 | |||
| 656 | mutex_lock(&dma_list_mutex); | 648 | mutex_lock(&dma_list_mutex); |
| 657 | dmaengine_ref_count--; | 649 | dmaengine_ref_count--; |
| 658 | BUG_ON(dmaengine_ref_count < 0); | 650 | BUG_ON(dmaengine_ref_count < 0); |
| @@ -663,11 +655,9 @@ void dma_async_client_unregister(struct dma_client *client) | |||
| 663 | list_for_each_entry(chan, &device->channels, device_node) | 655 | list_for_each_entry(chan, &device->channels, device_node) |
| 664 | dma_chan_put(chan); | 656 | dma_chan_put(chan); |
| 665 | } | 657 | } |
| 666 | |||
| 667 | list_del(&client->global_node); | ||
| 668 | mutex_unlock(&dma_list_mutex); | 658 | mutex_unlock(&dma_list_mutex); |
| 669 | } | 659 | } |
| 670 | EXPORT_SYMBOL(dma_async_client_unregister); | 660 | EXPORT_SYMBOL(dmaengine_put); |
| 671 | 661 | ||
| 672 | /** | 662 | /** |
| 673 | * dma_async_client_chan_request - send all available channels to the | 663 | * dma_async_client_chan_request - send all available channels to the |
