diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:17 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:17 -0500 |
commit | 209b84a88fe81341b4d8d465acc4a67cb7c3feb3 (patch) | |
tree | 134632ed8c914f0ee497e7a22bc616d84e068119 /crypto | |
parent | 74465b4ff9ac1da503025c0a0042e023bfa6505c (diff) |
dmaengine: replace dma_async_client_register with dmaengine_get
Now that clients no longer need to be notified of channel arrival
dma_async_client_register can simply increment the dmaengine_ref_count.
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_tx.c | 115 |
1 files changed, 2 insertions, 113 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 2cdf7a0867b7..f21147f3626a 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -28,120 +28,9 @@ | |||
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | 29 | ||
30 | #ifdef CONFIG_DMA_ENGINE | 30 | #ifdef CONFIG_DMA_ENGINE |
31 | static enum dma_state_client | ||
32 | dma_channel_add_remove(struct dma_client *client, | ||
33 | struct dma_chan *chan, enum dma_state state); | ||
34 | |||
35 | static struct dma_client async_tx_dma = { | ||
36 | .event_callback = dma_channel_add_remove, | ||
37 | /* .cap_mask == 0 defaults to all channels */ | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * async_tx_lock - protect modification of async_tx_master_list and serialize | ||
42 | * rebalance operations | ||
43 | */ | ||
44 | static DEFINE_SPINLOCK(async_tx_lock); | ||
45 | |||
46 | static LIST_HEAD(async_tx_master_list); | ||
47 | |||
48 | static void | ||
49 | free_dma_chan_ref(struct rcu_head *rcu) | ||
50 | { | ||
51 | struct dma_chan_ref *ref; | ||
52 | ref = container_of(rcu, struct dma_chan_ref, rcu); | ||
53 | kfree(ref); | ||
54 | } | ||
55 | |||
56 | static void | ||
57 | init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) | ||
58 | { | ||
59 | INIT_LIST_HEAD(&ref->node); | ||
60 | INIT_RCU_HEAD(&ref->rcu); | ||
61 | ref->chan = chan; | ||
62 | atomic_set(&ref->count, 0); | ||
63 | } | ||
64 | |||
65 | static enum dma_state_client | ||
66 | dma_channel_add_remove(struct dma_client *client, | ||
67 | struct dma_chan *chan, enum dma_state state) | ||
68 | { | ||
69 | unsigned long found, flags; | ||
70 | struct dma_chan_ref *master_ref, *ref; | ||
71 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
72 | |||
73 | switch (state) { | ||
74 | case DMA_RESOURCE_AVAILABLE: | ||
75 | found = 0; | ||
76 | rcu_read_lock(); | ||
77 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
78 | if (ref->chan == chan) { | ||
79 | found = 1; | ||
80 | break; | ||
81 | } | ||
82 | rcu_read_unlock(); | ||
83 | |||
84 | pr_debug("async_tx: dma resource available [%s]\n", | ||
85 | found ? "old" : "new"); | ||
86 | |||
87 | if (!found) | ||
88 | ack = DMA_ACK; | ||
89 | else | ||
90 | break; | ||
91 | |||
92 | /* add the channel to the generic management list */ | ||
93 | master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); | ||
94 | if (master_ref) { | ||
95 | init_dma_chan_ref(master_ref, chan); | ||
96 | spin_lock_irqsave(&async_tx_lock, flags); | ||
97 | list_add_tail_rcu(&master_ref->node, | ||
98 | &async_tx_master_list); | ||
99 | spin_unlock_irqrestore(&async_tx_lock, | ||
100 | flags); | ||
101 | } else { | ||
102 | printk(KERN_WARNING "async_tx: unable to create" | ||
103 | " new master entry in response to" | ||
104 | " a DMA_RESOURCE_ADDED event" | ||
105 | " (-ENOMEM)\n"); | ||
106 | return 0; | ||
107 | } | ||
108 | break; | ||
109 | case DMA_RESOURCE_REMOVED: | ||
110 | found = 0; | ||
111 | spin_lock_irqsave(&async_tx_lock, flags); | ||
112 | list_for_each_entry(ref, &async_tx_master_list, node) | ||
113 | if (ref->chan == chan) { | ||
114 | list_del_rcu(&ref->node); | ||
115 | call_rcu(&ref->rcu, free_dma_chan_ref); | ||
116 | found = 1; | ||
117 | break; | ||
118 | } | ||
119 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
120 | |||
121 | pr_debug("async_tx: dma resource removed [%s]\n", | ||
122 | found ? "ours" : "not ours"); | ||
123 | |||
124 | if (found) | ||
125 | ack = DMA_ACK; | ||
126 | else | ||
127 | break; | ||
128 | break; | ||
129 | case DMA_RESOURCE_SUSPEND: | ||
130 | case DMA_RESOURCE_RESUME: | ||
131 | printk(KERN_WARNING "async_tx: does not support dma channel" | ||
132 | " suspend/resume\n"); | ||
133 | break; | ||
134 | default: | ||
135 | BUG(); | ||
136 | } | ||
137 | |||
138 | return ack; | ||
139 | } | ||
140 | |||
141 | static int __init async_tx_init(void) | 31 | static int __init async_tx_init(void) |
142 | { | 32 | { |
143 | dma_async_client_register(&async_tx_dma); | 33 | dmaengine_get(); |
144 | dma_async_client_chan_request(&async_tx_dma); | ||
145 | 34 | ||
146 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | 35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
147 | 36 | ||
@@ -150,7 +39,7 @@ static int __init async_tx_init(void) | |||
150 | 39 | ||
151 | static void __exit async_tx_exit(void) | 40 | static void __exit async_tx_exit(void) |
152 | { | 41 | { |
153 | dma_async_client_unregister(&async_tx_dma); | 42 | dmaengine_put(); |
154 | } | 43 | } |
155 | 44 | ||
156 | /** | 45 | /** |