diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:18 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:18 -0500 |
commit | f27c580c3628d79b17f38976d842a6d7f3616e2e (patch) | |
tree | f1a1a96c1130e7e1c88f75cb5f5aab4c53fe0297 | |
parent | aa1e6f1a385eb2b04171ec841f3b760091e4a8ee (diff) |
dmaengine: remove 'bigref' infrastructure
Reference counting is done at the module level so clients need not worry
that a channel will leave while they are actively using dmaengine.
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/dma/dmaengine.c | 87 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 1 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 1 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 7 |
4 files changed, 9 insertions, 87 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9fc91f973a9a..b245c38dbec3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -34,26 +34,15 @@ | |||
34 | * The subsystem keeps a global list of dma_device structs it is protected by a | 34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * mutex, dma_list_mutex. | 35 | * mutex, dma_list_mutex. |
36 | * | 36 | * |
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed | ||
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | ||
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | ||
40 | * against its corresponding driver to disable removal. | ||
41 | * | ||
37 | * Each device has a channels list, which runs unlocked but is never modified | 42 | * Each device has a channels list, which runs unlocked but is never modified |
38 | * once the device is registered, it's just setup by the driver. | 43 | * once the device is registered, it's just setup by the driver. |
39 | * | 44 | * |
40 | * Each device has a kref, which is initialized to 1 when the device is | 45 | * See Documentation/dmaengine.txt for more details |
41 | * registered. A kref_get is done for each device registered. When the | ||
42 | * device is released, the corresponding kref_put is done in the release | ||
43 | * method. Every time one of the device's channels is allocated to a client, | ||
44 | * a kref_get occurs. When the channel is freed, the corresponding kref_put | ||
45 | * happens. The device's release function does a completion, so | ||
46 | * unregister_device does a remove event, device_unregister, a kref_put | ||
47 | * for the first reference, then waits on the completion for all other | ||
48 | * references to finish. | ||
49 | * | ||
50 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | ||
51 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client | ||
52 | * signals that it wants to use a channel, and dma_chan_put is called when | ||
53 | * a channel is removed or a client using it is unregistered. A client can | ||
54 | * take extra references per outstanding transaction, as is the case with | ||
55 | * the NET DMA client. The release function does a kref_put on the device. | ||
56 | * -ChrisL, DanW | ||
57 | */ | 46 | */ |
58 | 47 | ||
59 | #include <linux/init.h> | 48 | #include <linux/init.h> |
@@ -114,18 +103,9 @@ static struct device_attribute dma_attrs[] = { | |||
114 | __ATTR_NULL | 103 | __ATTR_NULL |
115 | }; | 104 | }; |
116 | 105 | ||
117 | static void dma_async_device_cleanup(struct kref *kref); | ||
118 | |||
119 | static void dma_dev_release(struct device *dev) | ||
120 | { | ||
121 | struct dma_chan *chan = to_dma_chan(dev); | ||
122 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | ||
123 | } | ||
124 | |||
125 | static struct class dma_devclass = { | 106 | static struct class dma_devclass = { |
126 | .name = "dma", | 107 | .name = "dma", |
127 | .dev_attrs = dma_attrs, | 108 | .dev_attrs = dma_attrs, |
128 | .dev_release = dma_dev_release, | ||
129 | }; | 109 | }; |
130 | 110 | ||
131 | /* --- client and device registration --- */ | 111 | /* --- client and device registration --- */ |
@@ -233,29 +213,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
233 | EXPORT_SYMBOL(dma_sync_wait); | 213 | EXPORT_SYMBOL(dma_sync_wait); |
234 | 214 | ||
235 | /** | 215 | /** |
236 | * dma_chan_cleanup - release a DMA channel's resources | ||
237 | * @kref: kernel reference structure that contains the DMA channel device | ||
238 | */ | ||
239 | void dma_chan_cleanup(struct kref *kref) | ||
240 | { | ||
241 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); | ||
242 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | ||
243 | } | ||
244 | EXPORT_SYMBOL(dma_chan_cleanup); | ||
245 | |||
246 | static void dma_chan_free_rcu(struct rcu_head *rcu) | ||
247 | { | ||
248 | struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); | ||
249 | |||
250 | kref_put(&chan->refcount, dma_chan_cleanup); | ||
251 | } | ||
252 | |||
253 | static void dma_chan_release(struct dma_chan *chan) | ||
254 | { | ||
255 | call_rcu(&chan->rcu, dma_chan_free_rcu); | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * dma_cap_mask_all - enable iteration over all operation types | 216 | * dma_cap_mask_all - enable iteration over all operation types |
260 | */ | 217 | */ |
261 | static dma_cap_mask_t dma_cap_mask_all; | 218 | static dma_cap_mask_t dma_cap_mask_all; |
@@ -641,9 +598,6 @@ int dma_async_device_register(struct dma_device *device) | |||
641 | BUG_ON(!device->device_issue_pending); | 598 | BUG_ON(!device->device_issue_pending); |
642 | BUG_ON(!device->dev); | 599 | BUG_ON(!device->dev); |
643 | 600 | ||
644 | init_completion(&device->done); | ||
645 | kref_init(&device->refcount); | ||
646 | |||
647 | mutex_lock(&dma_list_mutex); | 601 | mutex_lock(&dma_list_mutex); |
648 | device->dev_id = id++; | 602 | device->dev_id = id++; |
649 | mutex_unlock(&dma_list_mutex); | 603 | mutex_unlock(&dma_list_mutex); |
@@ -662,19 +616,11 @@ int dma_async_device_register(struct dma_device *device) | |||
662 | 616 | ||
663 | rc = device_register(&chan->dev); | 617 | rc = device_register(&chan->dev); |
664 | if (rc) { | 618 | if (rc) { |
665 | chancnt--; | ||
666 | free_percpu(chan->local); | 619 | free_percpu(chan->local); |
667 | chan->local = NULL; | 620 | chan->local = NULL; |
668 | goto err_out; | 621 | goto err_out; |
669 | } | 622 | } |
670 | |||
671 | /* One for the channel, one of the class device */ | ||
672 | kref_get(&device->refcount); | ||
673 | kref_get(&device->refcount); | ||
674 | kref_init(&chan->refcount); | ||
675 | chan->client_count = 0; | 623 | chan->client_count = 0; |
676 | chan->slow_ref = 0; | ||
677 | INIT_RCU_HEAD(&chan->rcu); | ||
678 | } | 624 | } |
679 | device->chancnt = chancnt; | 625 | device->chancnt = chancnt; |
680 | 626 | ||
@@ -705,9 +651,7 @@ err_out: | |||
705 | list_for_each_entry(chan, &device->channels, device_node) { | 651 | list_for_each_entry(chan, &device->channels, device_node) { |
706 | if (chan->local == NULL) | 652 | if (chan->local == NULL) |
707 | continue; | 653 | continue; |
708 | kref_put(&device->refcount, dma_async_device_cleanup); | ||
709 | device_unregister(&chan->dev); | 654 | device_unregister(&chan->dev); |
710 | chancnt--; | ||
711 | free_percpu(chan->local); | 655 | free_percpu(chan->local); |
712 | } | 656 | } |
713 | return rc; | 657 | return rc; |
@@ -715,20 +659,11 @@ err_out: | |||
715 | EXPORT_SYMBOL(dma_async_device_register); | 659 | EXPORT_SYMBOL(dma_async_device_register); |
716 | 660 | ||
717 | /** | 661 | /** |
718 | * dma_async_device_cleanup - function called when all references are released | ||
719 | * @kref: kernel reference object | ||
720 | */ | ||
721 | static void dma_async_device_cleanup(struct kref *kref) | ||
722 | { | ||
723 | struct dma_device *device; | ||
724 | |||
725 | device = container_of(kref, struct dma_device, refcount); | ||
726 | complete(&device->done); | ||
727 | } | ||
728 | |||
729 | /** | ||
730 | * dma_async_device_unregister - unregister a DMA device | 662 | * dma_async_device_unregister - unregister a DMA device |
731 | * @device: &dma_device | 663 | * @device: &dma_device |
664 | * | ||
665 | * This routine is called by dma driver exit routines, dmaengine holds module | ||
666 | * references to prevent it being called while channels are in use. | ||
732 | */ | 667 | */ |
733 | void dma_async_device_unregister(struct dma_device *device) | 668 | void dma_async_device_unregister(struct dma_device *device) |
734 | { | 669 | { |
@@ -744,11 +679,7 @@ void dma_async_device_unregister(struct dma_device *device) | |||
744 | "%s called while %d clients hold a reference\n", | 679 | "%s called while %d clients hold a reference\n", |
745 | __func__, chan->client_count); | 680 | __func__, chan->client_count); |
746 | device_unregister(&chan->dev); | 681 | device_unregister(&chan->dev); |
747 | dma_chan_release(chan); | ||
748 | } | 682 | } |
749 | |||
750 | kref_put(&device->refcount, dma_async_device_cleanup); | ||
751 | wait_for_completion(&device->done); | ||
752 | } | 683 | } |
753 | EXPORT_SYMBOL(dma_async_device_unregister); | 684 | EXPORT_SYMBOL(dma_async_device_unregister); |
754 | 685 | ||
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c74ac9eb009a..df0e37d70b31 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -1253,7 +1253,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1253 | spin_lock_init(&iop_chan->lock); | 1253 | spin_lock_init(&iop_chan->lock); |
1254 | INIT_LIST_HEAD(&iop_chan->chain); | 1254 | INIT_LIST_HEAD(&iop_chan->chain); |
1255 | INIT_LIST_HEAD(&iop_chan->all_slots); | 1255 | INIT_LIST_HEAD(&iop_chan->all_slots); |
1256 | INIT_RCU_HEAD(&iop_chan->common.rcu); | ||
1257 | iop_chan->common.device = dma_dev; | 1256 | iop_chan->common.device = dma_dev; |
1258 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); | 1257 | list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); |
1259 | 1258 | ||
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fbaa2f6225e2..d35cbd1ff0b3 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -1219,7 +1219,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) | |||
1219 | INIT_LIST_HEAD(&mv_chan->chain); | 1219 | INIT_LIST_HEAD(&mv_chan->chain); |
1220 | INIT_LIST_HEAD(&mv_chan->completed_slots); | 1220 | INIT_LIST_HEAD(&mv_chan->completed_slots); |
1221 | INIT_LIST_HEAD(&mv_chan->all_slots); | 1221 | INIT_LIST_HEAD(&mv_chan->all_slots); |
1222 | INIT_RCU_HEAD(&mv_chan->common.rcu); | ||
1223 | mv_chan->common.device = dma_dev; | 1222 | mv_chan->common.device = dma_dev; |
1224 | 1223 | ||
1225 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); | 1224 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index db050e97d2b4..bca2fc758894 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -142,10 +142,6 @@ struct dma_chan { | |||
142 | int chan_id; | 142 | int chan_id; |
143 | struct device dev; | 143 | struct device dev; |
144 | 144 | ||
145 | struct kref refcount; | ||
146 | int slow_ref; | ||
147 | struct rcu_head rcu; | ||
148 | |||
149 | struct list_head device_node; | 145 | struct list_head device_node; |
150 | struct dma_chan_percpu *local; | 146 | struct dma_chan_percpu *local; |
151 | int client_count; | 147 | int client_count; |
@@ -233,9 +229,6 @@ struct dma_device { | |||
233 | dma_cap_mask_t cap_mask; | 229 | dma_cap_mask_t cap_mask; |
234 | int max_xor; | 230 | int max_xor; |
235 | 231 | ||
236 | struct kref refcount; | ||
237 | struct completion done; | ||
238 | |||
239 | int dev_id; | 232 | int dev_id; |
240 | struct device *dev; | 233 | struct device *dev; |
241 | 234 | ||