aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:18 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:18 -0500
commitf27c580c3628d79b17f38976d842a6d7f3616e2e (patch)
treef1a1a96c1130e7e1c88f75cb5f5aab4c53fe0297 /drivers/dma/dmaengine.c
parentaa1e6f1a385eb2b04171ec841f3b760091e4a8ee (diff)
dmaengine: remove 'bigref' infrastructure
Reference counting is done at the module level so clients need not worry that a channel will leave while they are actively using dmaengine. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c87
1 files changed, 9 insertions, 78 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9fc91f973a9a..b245c38dbec3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -34,26 +34,15 @@
34 * The subsystem keeps a global list of dma_device structs it is protected by a 34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex. 35 * mutex, dma_list_mutex.
36 * 36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
37 * Each device has a channels list, which runs unlocked but is never modified 42 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 43 * once the device is registered, it's just setup by the driver.
39 * 44 *
40 * Each device has a kref, which is initialized to 1 when the device is 45 * See Documentation/dmaengine.txt for more details
41 * registered. A kref_get is done for each device registered. When the
42 * device is released, the corresponding kref_put is done in the release
43 * method. Every time one of the device's channels is allocated to a client,
44 * a kref_get occurs. When the channel is freed, the corresponding kref_put
45 * happens. The device's release function does a completion, so
46 * unregister_device does a remove event, device_unregister, a kref_put
47 * for the first reference, then waits on the completion for all other
48 * references to finish.
49 *
50 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
51 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
52 * signals that it wants to use a channel, and dma_chan_put is called when
53 * a channel is removed or a client using it is unregistered. A client can
54 * take extra references per outstanding transaction, as is the case with
55 * the NET DMA client. The release function does a kref_put on the device.
56 * -ChrisL, DanW
57 */ 46 */
58 47
59#include <linux/init.h> 48#include <linux/init.h>
@@ -114,18 +103,9 @@ static struct device_attribute dma_attrs[] = {
114 __ATTR_NULL 103 __ATTR_NULL
115}; 104};
116 105
117static void dma_async_device_cleanup(struct kref *kref);
118
119static void dma_dev_release(struct device *dev)
120{
121 struct dma_chan *chan = to_dma_chan(dev);
122 kref_put(&chan->device->refcount, dma_async_device_cleanup);
123}
124
125static struct class dma_devclass = { 106static struct class dma_devclass = {
126 .name = "dma", 107 .name = "dma",
127 .dev_attrs = dma_attrs, 108 .dev_attrs = dma_attrs,
128 .dev_release = dma_dev_release,
129}; 109};
130 110
131/* --- client and device registration --- */ 111/* --- client and device registration --- */
@@ -233,29 +213,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
233EXPORT_SYMBOL(dma_sync_wait); 213EXPORT_SYMBOL(dma_sync_wait);
234 214
235/** 215/**
236 * dma_chan_cleanup - release a DMA channel's resources
237 * @kref: kernel reference structure that contains the DMA channel device
238 */
239void dma_chan_cleanup(struct kref *kref)
240{
241 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
242 kref_put(&chan->device->refcount, dma_async_device_cleanup);
243}
244EXPORT_SYMBOL(dma_chan_cleanup);
245
246static void dma_chan_free_rcu(struct rcu_head *rcu)
247{
248 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
249
250 kref_put(&chan->refcount, dma_chan_cleanup);
251}
252
253static void dma_chan_release(struct dma_chan *chan)
254{
255 call_rcu(&chan->rcu, dma_chan_free_rcu);
256}
257
258/**
259 * dma_cap_mask_all - enable iteration over all operation types 216 * dma_cap_mask_all - enable iteration over all operation types
260 */ 217 */
261static dma_cap_mask_t dma_cap_mask_all; 218static dma_cap_mask_t dma_cap_mask_all;
@@ -641,9 +598,6 @@ int dma_async_device_register(struct dma_device *device)
641 BUG_ON(!device->device_issue_pending); 598 BUG_ON(!device->device_issue_pending);
642 BUG_ON(!device->dev); 599 BUG_ON(!device->dev);
643 600
644 init_completion(&device->done);
645 kref_init(&device->refcount);
646
647 mutex_lock(&dma_list_mutex); 601 mutex_lock(&dma_list_mutex);
648 device->dev_id = id++; 602 device->dev_id = id++;
649 mutex_unlock(&dma_list_mutex); 603 mutex_unlock(&dma_list_mutex);
@@ -662,19 +616,11 @@ int dma_async_device_register(struct dma_device *device)
662 616
663 rc = device_register(&chan->dev); 617 rc = device_register(&chan->dev);
664 if (rc) { 618 if (rc) {
665 chancnt--;
666 free_percpu(chan->local); 619 free_percpu(chan->local);
667 chan->local = NULL; 620 chan->local = NULL;
668 goto err_out; 621 goto err_out;
669 } 622 }
670
671 /* One for the channel, one of the class device */
672 kref_get(&device->refcount);
673 kref_get(&device->refcount);
674 kref_init(&chan->refcount);
675 chan->client_count = 0; 623 chan->client_count = 0;
676 chan->slow_ref = 0;
677 INIT_RCU_HEAD(&chan->rcu);
678 } 624 }
679 device->chancnt = chancnt; 625 device->chancnt = chancnt;
680 626
@@ -705,9 +651,7 @@ err_out:
705 list_for_each_entry(chan, &device->channels, device_node) { 651 list_for_each_entry(chan, &device->channels, device_node) {
706 if (chan->local == NULL) 652 if (chan->local == NULL)
707 continue; 653 continue;
708 kref_put(&device->refcount, dma_async_device_cleanup);
709 device_unregister(&chan->dev); 654 device_unregister(&chan->dev);
710 chancnt--;
711 free_percpu(chan->local); 655 free_percpu(chan->local);
712 } 656 }
713 return rc; 657 return rc;
@@ -715,20 +659,11 @@ err_out:
715EXPORT_SYMBOL(dma_async_device_register); 659EXPORT_SYMBOL(dma_async_device_register);
716 660
717/** 661/**
718 * dma_async_device_cleanup - function called when all references are released
719 * @kref: kernel reference object
720 */
721static void dma_async_device_cleanup(struct kref *kref)
722{
723 struct dma_device *device;
724
725 device = container_of(kref, struct dma_device, refcount);
726 complete(&device->done);
727}
728
729/**
730 * dma_async_device_unregister - unregister a DMA device 662 * dma_async_device_unregister - unregister a DMA device
731 * @device: &dma_device 663 * @device: &dma_device
664 *
665 * This routine is called by dma driver exit routines, dmaengine holds module
666 * references to prevent it being called while channels are in use.
732 */ 667 */
733void dma_async_device_unregister(struct dma_device *device) 668void dma_async_device_unregister(struct dma_device *device)
734{ 669{
@@ -744,11 +679,7 @@ void dma_async_device_unregister(struct dma_device *device)
744 "%s called while %d clients hold a reference\n", 679 "%s called while %d clients hold a reference\n",
745 __func__, chan->client_count); 680 __func__, chan->client_count);
746 device_unregister(&chan->dev); 681 device_unregister(&chan->dev);
747 dma_chan_release(chan);
748 } 682 }
749
750 kref_put(&device->refcount, dma_async_device_cleanup);
751 wait_for_completion(&device->done);
752} 683}
753EXPORT_SYMBOL(dma_async_device_unregister); 684EXPORT_SYMBOL(dma_async_device_unregister);
754 685