diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:17 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:17 -0500 |
commit | 209b84a88fe81341b4d8d465acc4a67cb7c3feb3 (patch) | |
tree | 134632ed8c914f0ee497e7a22bc616d84e068119 | |
parent | 74465b4ff9ac1da503025c0a0042e023bfa6505c (diff) |
dmaengine: replace dma_async_client_register with dmaengine_get
Now that clients no longer need to be notified of channel arrival
dma_async_client_register can simply increment the dmaengine_ref_count.
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | crypto/async_tx/async_tx.c | 115 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 22 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 4 | ||||
-rw-r--r-- | net/core/dev.c | 3 |
4 files changed, 11 insertions, 133 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 2cdf7a0867b7..f21147f3626a 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -28,120 +28,9 @@ | |||
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | 29 | ||
30 | #ifdef CONFIG_DMA_ENGINE | 30 | #ifdef CONFIG_DMA_ENGINE |
31 | static enum dma_state_client | ||
32 | dma_channel_add_remove(struct dma_client *client, | ||
33 | struct dma_chan *chan, enum dma_state state); | ||
34 | |||
35 | static struct dma_client async_tx_dma = { | ||
36 | .event_callback = dma_channel_add_remove, | ||
37 | /* .cap_mask == 0 defaults to all channels */ | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * async_tx_lock - protect modification of async_tx_master_list and serialize | ||
42 | * rebalance operations | ||
43 | */ | ||
44 | static DEFINE_SPINLOCK(async_tx_lock); | ||
45 | |||
46 | static LIST_HEAD(async_tx_master_list); | ||
47 | |||
48 | static void | ||
49 | free_dma_chan_ref(struct rcu_head *rcu) | ||
50 | { | ||
51 | struct dma_chan_ref *ref; | ||
52 | ref = container_of(rcu, struct dma_chan_ref, rcu); | ||
53 | kfree(ref); | ||
54 | } | ||
55 | |||
56 | static void | ||
57 | init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) | ||
58 | { | ||
59 | INIT_LIST_HEAD(&ref->node); | ||
60 | INIT_RCU_HEAD(&ref->rcu); | ||
61 | ref->chan = chan; | ||
62 | atomic_set(&ref->count, 0); | ||
63 | } | ||
64 | |||
65 | static enum dma_state_client | ||
66 | dma_channel_add_remove(struct dma_client *client, | ||
67 | struct dma_chan *chan, enum dma_state state) | ||
68 | { | ||
69 | unsigned long found, flags; | ||
70 | struct dma_chan_ref *master_ref, *ref; | ||
71 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
72 | |||
73 | switch (state) { | ||
74 | case DMA_RESOURCE_AVAILABLE: | ||
75 | found = 0; | ||
76 | rcu_read_lock(); | ||
77 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
78 | if (ref->chan == chan) { | ||
79 | found = 1; | ||
80 | break; | ||
81 | } | ||
82 | rcu_read_unlock(); | ||
83 | |||
84 | pr_debug("async_tx: dma resource available [%s]\n", | ||
85 | found ? "old" : "new"); | ||
86 | |||
87 | if (!found) | ||
88 | ack = DMA_ACK; | ||
89 | else | ||
90 | break; | ||
91 | |||
92 | /* add the channel to the generic management list */ | ||
93 | master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); | ||
94 | if (master_ref) { | ||
95 | init_dma_chan_ref(master_ref, chan); | ||
96 | spin_lock_irqsave(&async_tx_lock, flags); | ||
97 | list_add_tail_rcu(&master_ref->node, | ||
98 | &async_tx_master_list); | ||
99 | spin_unlock_irqrestore(&async_tx_lock, | ||
100 | flags); | ||
101 | } else { | ||
102 | printk(KERN_WARNING "async_tx: unable to create" | ||
103 | " new master entry in response to" | ||
104 | " a DMA_RESOURCE_ADDED event" | ||
105 | " (-ENOMEM)\n"); | ||
106 | return 0; | ||
107 | } | ||
108 | break; | ||
109 | case DMA_RESOURCE_REMOVED: | ||
110 | found = 0; | ||
111 | spin_lock_irqsave(&async_tx_lock, flags); | ||
112 | list_for_each_entry(ref, &async_tx_master_list, node) | ||
113 | if (ref->chan == chan) { | ||
114 | list_del_rcu(&ref->node); | ||
115 | call_rcu(&ref->rcu, free_dma_chan_ref); | ||
116 | found = 1; | ||
117 | break; | ||
118 | } | ||
119 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
120 | |||
121 | pr_debug("async_tx: dma resource removed [%s]\n", | ||
122 | found ? "ours" : "not ours"); | ||
123 | |||
124 | if (found) | ||
125 | ack = DMA_ACK; | ||
126 | else | ||
127 | break; | ||
128 | break; | ||
129 | case DMA_RESOURCE_SUSPEND: | ||
130 | case DMA_RESOURCE_RESUME: | ||
131 | printk(KERN_WARNING "async_tx: does not support dma channel" | ||
132 | " suspend/resume\n"); | ||
133 | break; | ||
134 | default: | ||
135 | BUG(); | ||
136 | } | ||
137 | |||
138 | return ack; | ||
139 | } | ||
140 | |||
141 | static int __init async_tx_init(void) | 31 | static int __init async_tx_init(void) |
142 | { | 32 | { |
143 | dma_async_client_register(&async_tx_dma); | 33 | dmaengine_get(); |
144 | dma_async_client_chan_request(&async_tx_dma); | ||
145 | 34 | ||
146 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | 35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
147 | 36 | ||
@@ -150,7 +39,7 @@ static int __init async_tx_init(void) | |||
150 | 39 | ||
151 | static void __exit async_tx_exit(void) | 40 | static void __exit async_tx_exit(void) |
152 | { | 41 | { |
153 | dma_async_client_unregister(&async_tx_dma); | 42 | dmaengine_put(); |
154 | } | 43 | } |
155 | 44 | ||
156 | /** | 45 | /** |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 90aca505a1df..3f1849b7f5ef 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -600,10 +600,9 @@ static void dma_clients_notify_available(void) | |||
600 | } | 600 | } |
601 | 601 | ||
602 | /** | 602 | /** |
603 | * dma_async_client_register - register a &dma_client | 603 | * dmaengine_get - register interest in dma_channels |
604 | * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' | ||
605 | */ | 604 | */ |
606 | void dma_async_client_register(struct dma_client *client) | 605 | void dmaengine_get(void) |
607 | { | 606 | { |
608 | struct dma_device *device, *_d; | 607 | struct dma_device *device, *_d; |
609 | struct dma_chan *chan; | 608 | struct dma_chan *chan; |
@@ -634,25 +633,18 @@ void dma_async_client_register(struct dma_client *client) | |||
634 | */ | 633 | */ |
635 | if (dmaengine_ref_count == 1) | 634 | if (dmaengine_ref_count == 1) |
636 | dma_channel_rebalance(); | 635 | dma_channel_rebalance(); |
637 | list_add_tail(&client->global_node, &dma_client_list); | ||
638 | mutex_unlock(&dma_list_mutex); | 636 | mutex_unlock(&dma_list_mutex); |
639 | } | 637 | } |
640 | EXPORT_SYMBOL(dma_async_client_register); | 638 | EXPORT_SYMBOL(dmaengine_get); |
641 | 639 | ||
642 | /** | 640 | /** |
643 | * dma_async_client_unregister - unregister a client and free the &dma_client | 641 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
644 | * @client: &dma_client to free | ||
645 | * | ||
646 | * Force frees any allocated DMA channels, frees the &dma_client memory | ||
647 | */ | 642 | */ |
648 | void dma_async_client_unregister(struct dma_client *client) | 643 | void dmaengine_put(void) |
649 | { | 644 | { |
650 | struct dma_device *device; | 645 | struct dma_device *device; |
651 | struct dma_chan *chan; | 646 | struct dma_chan *chan; |
652 | 647 | ||
653 | if (!client) | ||
654 | return; | ||
655 | |||
656 | mutex_lock(&dma_list_mutex); | 648 | mutex_lock(&dma_list_mutex); |
657 | dmaengine_ref_count--; | 649 | dmaengine_ref_count--; |
658 | BUG_ON(dmaengine_ref_count < 0); | 650 | BUG_ON(dmaengine_ref_count < 0); |
@@ -663,11 +655,9 @@ void dma_async_client_unregister(struct dma_client *client) | |||
663 | list_for_each_entry(chan, &device->channels, device_node) | 655 | list_for_each_entry(chan, &device->channels, device_node) |
664 | dma_chan_put(chan); | 656 | dma_chan_put(chan); |
665 | } | 657 | } |
666 | |||
667 | list_del(&client->global_node); | ||
668 | mutex_unlock(&dma_list_mutex); | 658 | mutex_unlock(&dma_list_mutex); |
669 | } | 659 | } |
670 | EXPORT_SYMBOL(dma_async_client_unregister); | 660 | EXPORT_SYMBOL(dmaengine_put); |
671 | 661 | ||
672 | /** | 662 | /** |
673 | * dma_async_client_chan_request - send all available channels to the | 663 | * dma_async_client_chan_request - send all available channels to the |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d63544cf8a1a..37d95db156d3 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -318,8 +318,8 @@ struct dma_device { | |||
318 | 318 | ||
319 | /* --- public DMA engine API --- */ | 319 | /* --- public DMA engine API --- */ |
320 | 320 | ||
321 | void dma_async_client_register(struct dma_client *client); | 321 | void dmaengine_get(void); |
322 | void dma_async_client_unregister(struct dma_client *client); | 322 | void dmaengine_put(void); |
323 | void dma_async_client_chan_request(struct dma_client *client); | 323 | void dma_async_client_chan_request(struct dma_client *client); |
324 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 324 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
325 | void *dest, void *src, size_t len); | 325 | void *dest, void *src, size_t len); |
diff --git a/net/core/dev.c b/net/core/dev.c index bbb07dbe1740..7596fc9403c8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4894,8 +4894,7 @@ static int __init netdev_dma_register(void) | |||
4894 | } | 4894 | } |
4895 | spin_lock_init(&net_dma.lock); | 4895 | spin_lock_init(&net_dma.lock); |
4896 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | 4896 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); |
4897 | dma_async_client_register(&net_dma.client); | 4897 | dmaengine_get(); |
4898 | dma_async_client_chan_request(&net_dma.client); | ||
4899 | return 0; | 4898 | return 0; |
4900 | } | 4899 | } |
4901 | 4900 | ||