diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-10 22:45:50 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-10 22:45:50 -0500 |
commit | e8b722f487589a1f60ca27adc695494f188d404e (patch) | |
tree | be3897dceb9b7c0949a8917ab11eea2752375e3b /drivers/dma/dmaengine.c | |
parent | 01d07820a0df6b6134c1bb75b1e84c9d0cdab3be (diff) | |
parent | c59765042f53a79a7a65585042ff463b69cb248c (diff) |
Merge commit 'v2.6.29-rc1' into irq/urgent
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 778 |
1 files changed, 569 insertions, 209 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 657996517374..403dbe781122 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -31,32 +31,18 @@ | |||
31 | * | 31 | * |
32 | * LOCKING: | 32 | * LOCKING: |
33 | * | 33 | * |
34 | * The subsystem keeps two global lists, dma_device_list and dma_client_list. | 34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * Both of these are protected by a mutex, dma_list_mutex. | 35 | * mutex, dma_list_mutex. |
36 | * | ||
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed | ||
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | ||
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | ||
40 | * against its corresponding driver to disable removal. | ||
36 | * | 41 | * |
37 | * Each device has a channels list, which runs unlocked but is never modified | 42 | * Each device has a channels list, which runs unlocked but is never modified |
38 | * once the device is registered, it's just setup by the driver. | 43 | * once the device is registered, it's just setup by the driver. |
39 | * | 44 | * |
40 | * Each client is responsible for keeping track of the channels it uses. See | 45 | * See Documentation/dmaengine.txt for more details |
41 | * the definition of dma_event_callback in dmaengine.h. | ||
42 | * | ||
43 | * Each device has a kref, which is initialized to 1 when the device is | ||
44 | * registered. A kref_get is done for each device registered. When the | ||
45 | * device is released, the corresponding kref_put is done in the release | ||
46 | * method. Every time one of the device's channels is allocated to a client, | ||
47 | * a kref_get occurs. When the channel is freed, the corresponding kref_put | ||
48 | * happens. The device's release function does a completion, so | ||
49 | * unregister_device does a remove event, device_unregister, a kref_put | ||
50 | * for the first reference, then waits on the completion for all other | ||
51 | * references to finish. | ||
52 | * | ||
53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | ||
54 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client | ||
55 | * signals that it wants to use a channel, and dma_chan_put is called when | ||
56 | * a channel is removed or a client using it is unregistered. A client can | ||
57 | * take extra references per outstanding transaction, as is the case with | ||
58 | * the NET DMA client. The release function does a kref_put on the device. | ||
59 | * -ChrisL, DanW | ||
60 | */ | 46 | */ |
61 | 47 | ||
62 | #include <linux/init.h> | 48 | #include <linux/init.h> |
@@ -70,54 +56,85 @@ | |||
70 | #include <linux/rcupdate.h> | 56 | #include <linux/rcupdate.h> |
71 | #include <linux/mutex.h> | 57 | #include <linux/mutex.h> |
72 | #include <linux/jiffies.h> | 58 | #include <linux/jiffies.h> |
59 | #include <linux/rculist.h> | ||
60 | #include <linux/idr.h> | ||
73 | 61 | ||
74 | static DEFINE_MUTEX(dma_list_mutex); | 62 | static DEFINE_MUTEX(dma_list_mutex); |
75 | static LIST_HEAD(dma_device_list); | 63 | static LIST_HEAD(dma_device_list); |
76 | static LIST_HEAD(dma_client_list); | 64 | static long dmaengine_ref_count; |
65 | static struct idr dma_idr; | ||
77 | 66 | ||
78 | /* --- sysfs implementation --- */ | 67 | /* --- sysfs implementation --- */ |
79 | 68 | ||
69 | /** | ||
70 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | ||
71 | * @dev - device node | ||
72 | * | ||
73 | * Must be called under dma_list_mutex | ||
74 | */ | ||
75 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | ||
76 | { | ||
77 | struct dma_chan_dev *chan_dev; | ||
78 | |||
79 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
80 | return chan_dev->chan; | ||
81 | } | ||
82 | |||
80 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) | 83 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
81 | { | 84 | { |
82 | struct dma_chan *chan = to_dma_chan(dev); | 85 | struct dma_chan *chan; |
83 | unsigned long count = 0; | 86 | unsigned long count = 0; |
84 | int i; | 87 | int i; |
88 | int err; | ||
85 | 89 | ||
86 | for_each_possible_cpu(i) | 90 | mutex_lock(&dma_list_mutex); |
87 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | 91 | chan = dev_to_dma_chan(dev); |
92 | if (chan) { | ||
93 | for_each_possible_cpu(i) | ||
94 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | ||
95 | err = sprintf(buf, "%lu\n", count); | ||
96 | } else | ||
97 | err = -ENODEV; | ||
98 | mutex_unlock(&dma_list_mutex); | ||
88 | 99 | ||
89 | return sprintf(buf, "%lu\n", count); | 100 | return err; |
90 | } | 101 | } |
91 | 102 | ||
92 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, | 103 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
93 | char *buf) | 104 | char *buf) |
94 | { | 105 | { |
95 | struct dma_chan *chan = to_dma_chan(dev); | 106 | struct dma_chan *chan; |
96 | unsigned long count = 0; | 107 | unsigned long count = 0; |
97 | int i; | 108 | int i; |
109 | int err; | ||
98 | 110 | ||
99 | for_each_possible_cpu(i) | 111 | mutex_lock(&dma_list_mutex); |
100 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | 112 | chan = dev_to_dma_chan(dev); |
113 | if (chan) { | ||
114 | for_each_possible_cpu(i) | ||
115 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | ||
116 | err = sprintf(buf, "%lu\n", count); | ||
117 | } else | ||
118 | err = -ENODEV; | ||
119 | mutex_unlock(&dma_list_mutex); | ||
101 | 120 | ||
102 | return sprintf(buf, "%lu\n", count); | 121 | return err; |
103 | } | 122 | } |
104 | 123 | ||
105 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) | 124 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
106 | { | 125 | { |
107 | struct dma_chan *chan = to_dma_chan(dev); | 126 | struct dma_chan *chan; |
108 | int in_use = 0; | 127 | int err; |
109 | |||
110 | if (unlikely(chan->slow_ref) && | ||
111 | atomic_read(&chan->refcount.refcount) > 1) | ||
112 | in_use = 1; | ||
113 | else { | ||
114 | if (local_read(&(per_cpu_ptr(chan->local, | ||
115 | get_cpu())->refcount)) > 0) | ||
116 | in_use = 1; | ||
117 | put_cpu(); | ||
118 | } | ||
119 | 128 | ||
120 | return sprintf(buf, "%d\n", in_use); | 129 | mutex_lock(&dma_list_mutex); |
130 | chan = dev_to_dma_chan(dev); | ||
131 | if (chan) | ||
132 | err = sprintf(buf, "%d\n", chan->client_count); | ||
133 | else | ||
134 | err = -ENODEV; | ||
135 | mutex_unlock(&dma_list_mutex); | ||
136 | |||
137 | return err; | ||
121 | } | 138 | } |
122 | 139 | ||
123 | static struct device_attribute dma_attrs[] = { | 140 | static struct device_attribute dma_attrs[] = { |
@@ -127,76 +144,110 @@ static struct device_attribute dma_attrs[] = { | |||
127 | __ATTR_NULL | 144 | __ATTR_NULL |
128 | }; | 145 | }; |
129 | 146 | ||
130 | static void dma_async_device_cleanup(struct kref *kref); | 147 | static void chan_dev_release(struct device *dev) |
131 | |||
132 | static void dma_dev_release(struct device *dev) | ||
133 | { | 148 | { |
134 | struct dma_chan *chan = to_dma_chan(dev); | 149 | struct dma_chan_dev *chan_dev; |
135 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | 150 | |
151 | chan_dev = container_of(dev, typeof(*chan_dev), device); | ||
152 | if (atomic_dec_and_test(chan_dev->idr_ref)) { | ||
153 | mutex_lock(&dma_list_mutex); | ||
154 | idr_remove(&dma_idr, chan_dev->dev_id); | ||
155 | mutex_unlock(&dma_list_mutex); | ||
156 | kfree(chan_dev->idr_ref); | ||
157 | } | ||
158 | kfree(chan_dev); | ||
136 | } | 159 | } |
137 | 160 | ||
138 | static struct class dma_devclass = { | 161 | static struct class dma_devclass = { |
139 | .name = "dma", | 162 | .name = "dma", |
140 | .dev_attrs = dma_attrs, | 163 | .dev_attrs = dma_attrs, |
141 | .dev_release = dma_dev_release, | 164 | .dev_release = chan_dev_release, |
142 | }; | 165 | }; |
143 | 166 | ||
144 | /* --- client and device registration --- */ | 167 | /* --- client and device registration --- */ |
145 | 168 | ||
146 | #define dma_chan_satisfies_mask(chan, mask) \ | 169 | #define dma_device_satisfies_mask(device, mask) \ |
147 | __dma_chan_satisfies_mask((chan), &(mask)) | 170 | __dma_device_satisfies_mask((device), &(mask)) |
148 | static int | 171 | static int |
149 | __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) | 172 | __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) |
150 | { | 173 | { |
151 | dma_cap_mask_t has; | 174 | dma_cap_mask_t has; |
152 | 175 | ||
153 | bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, | 176 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
154 | DMA_TX_TYPE_END); | 177 | DMA_TX_TYPE_END); |
155 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | 178 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); |
156 | } | 179 | } |
157 | 180 | ||
181 | static struct module *dma_chan_to_owner(struct dma_chan *chan) | ||
182 | { | ||
183 | return chan->device->dev->driver->owner; | ||
184 | } | ||
185 | |||
158 | /** | 186 | /** |
159 | * dma_client_chan_alloc - try to allocate channels to a client | 187 | * balance_ref_count - catch up the channel reference count |
160 | * @client: &dma_client | 188 | * @chan - channel to balance ->client_count versus dmaengine_ref_count |
161 | * | 189 | * |
162 | * Called with dma_list_mutex held. | 190 | * balance_ref_count must be called under dma_list_mutex |
163 | */ | 191 | */ |
164 | static void dma_client_chan_alloc(struct dma_client *client) | 192 | static void balance_ref_count(struct dma_chan *chan) |
165 | { | 193 | { |
166 | struct dma_device *device; | 194 | struct module *owner = dma_chan_to_owner(chan); |
167 | struct dma_chan *chan; | ||
168 | int desc; /* allocated descriptor count */ | ||
169 | enum dma_state_client ack; | ||
170 | 195 | ||
171 | /* Find a channel */ | 196 | while (chan->client_count < dmaengine_ref_count) { |
172 | list_for_each_entry(device, &dma_device_list, global_node) { | 197 | __module_get(owner); |
173 | /* Does the client require a specific DMA controller? */ | 198 | chan->client_count++; |
174 | if (client->slave && client->slave->dma_dev | 199 | } |
175 | && client->slave->dma_dev != device->dev) | 200 | } |
176 | continue; | ||
177 | 201 | ||
178 | list_for_each_entry(chan, &device->channels, device_node) { | 202 | /** |
179 | if (!dma_chan_satisfies_mask(chan, client->cap_mask)) | 203 | * dma_chan_get - try to grab a dma channel's parent driver module |
180 | continue; | 204 | * @chan - channel to grab |
205 | * | ||
206 | * Must be called under dma_list_mutex | ||
207 | */ | ||
208 | static int dma_chan_get(struct dma_chan *chan) | ||
209 | { | ||
210 | int err = -ENODEV; | ||
211 | struct module *owner = dma_chan_to_owner(chan); | ||
212 | |||
213 | if (chan->client_count) { | ||
214 | __module_get(owner); | ||
215 | err = 0; | ||
216 | } else if (try_module_get(owner)) | ||
217 | err = 0; | ||
218 | |||
219 | if (err == 0) | ||
220 | chan->client_count++; | ||
221 | |||
222 | /* allocate upon first client reference */ | ||
223 | if (chan->client_count == 1 && err == 0) { | ||
224 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); | ||
225 | |||
226 | if (desc_cnt < 0) { | ||
227 | err = desc_cnt; | ||
228 | chan->client_count = 0; | ||
229 | module_put(owner); | ||
230 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) | ||
231 | balance_ref_count(chan); | ||
232 | } | ||
181 | 233 | ||
182 | desc = chan->device->device_alloc_chan_resources( | 234 | return err; |
183 | chan, client); | 235 | } |
184 | if (desc >= 0) { | ||
185 | ack = client->event_callback(client, | ||
186 | chan, | ||
187 | DMA_RESOURCE_AVAILABLE); | ||
188 | 236 | ||
189 | /* we are done once this client rejects | 237 | /** |
190 | * an available resource | 238 | * dma_chan_put - drop a reference to a dma channel's parent driver module |
191 | */ | 239 | * @chan - channel to release |
192 | if (ack == DMA_ACK) { | 240 | * |
193 | dma_chan_get(chan); | 241 | * Must be called under dma_list_mutex |
194 | chan->client_count++; | 242 | */ |
195 | } else if (ack == DMA_NAK) | 243 | static void dma_chan_put(struct dma_chan *chan) |
196 | return; | 244 | { |
197 | } | 245 | if (!chan->client_count) |
198 | } | 246 | return; /* this channel failed alloc_chan_resources */ |
199 | } | 247 | chan->client_count--; |
248 | module_put(dma_chan_to_owner(chan)); | ||
249 | if (chan->client_count == 0) | ||
250 | chan->device->device_free_chan_resources(chan); | ||
200 | } | 251 | } |
201 | 252 | ||
202 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | 253 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
@@ -218,138 +269,342 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |||
218 | EXPORT_SYMBOL(dma_sync_wait); | 269 | EXPORT_SYMBOL(dma_sync_wait); |
219 | 270 | ||
220 | /** | 271 | /** |
221 | * dma_chan_cleanup - release a DMA channel's resources | 272 | * dma_cap_mask_all - enable iteration over all operation types |
222 | * @kref: kernel reference structure that contains the DMA channel device | 273 | */ |
274 | static dma_cap_mask_t dma_cap_mask_all; | ||
275 | |||
276 | /** | ||
277 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | ||
278 | * @chan - associated channel for this entry | ||
279 | */ | ||
280 | struct dma_chan_tbl_ent { | ||
281 | struct dma_chan *chan; | ||
282 | }; | ||
283 | |||
284 | /** | ||
285 | * channel_table - percpu lookup table for memory-to-memory offload providers | ||
223 | */ | 286 | */ |
224 | void dma_chan_cleanup(struct kref *kref) | 287 | static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; |
288 | |||
289 | static int __init dma_channel_table_init(void) | ||
225 | { | 290 | { |
226 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); | 291 | enum dma_transaction_type cap; |
227 | chan->device->device_free_chan_resources(chan); | 292 | int err = 0; |
228 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | 293 | |
294 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | ||
295 | |||
296 | /* 'interrupt', 'private', and 'slave' are channel capabilities, | ||
297 | * but are not associated with an operation so they do not need | ||
298 | * an entry in the channel_table | ||
299 | */ | ||
300 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | ||
301 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); | ||
302 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); | ||
303 | |||
304 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | ||
305 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | ||
306 | if (!channel_table[cap]) { | ||
307 | err = -ENOMEM; | ||
308 | break; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | if (err) { | ||
313 | pr_err("dmaengine: initialization failure\n"); | ||
314 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
315 | if (channel_table[cap]) | ||
316 | free_percpu(channel_table[cap]); | ||
317 | } | ||
318 | |||
319 | return err; | ||
229 | } | 320 | } |
230 | EXPORT_SYMBOL(dma_chan_cleanup); | 321 | arch_initcall(dma_channel_table_init); |
231 | 322 | ||
232 | static void dma_chan_free_rcu(struct rcu_head *rcu) | 323 | /** |
324 | * dma_find_channel - find a channel to carry out the operation | ||
325 | * @tx_type: transaction type | ||
326 | */ | ||
327 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | ||
233 | { | 328 | { |
234 | struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); | 329 | struct dma_chan *chan; |
235 | int bias = 0x7FFFFFFF; | 330 | int cpu; |
236 | int i; | 331 | |
237 | for_each_possible_cpu(i) | 332 | WARN_ONCE(dmaengine_ref_count == 0, |
238 | bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); | 333 | "client called %s without a reference", __func__); |
239 | atomic_sub(bias, &chan->refcount.refcount); | 334 | |
240 | kref_put(&chan->refcount, dma_chan_cleanup); | 335 | cpu = get_cpu(); |
336 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | ||
337 | put_cpu(); | ||
338 | |||
339 | return chan; | ||
241 | } | 340 | } |
341 | EXPORT_SYMBOL(dma_find_channel); | ||
242 | 342 | ||
243 | static void dma_chan_release(struct dma_chan *chan) | 343 | /** |
344 | * dma_issue_pending_all - flush all pending operations across all channels | ||
345 | */ | ||
346 | void dma_issue_pending_all(void) | ||
244 | { | 347 | { |
245 | atomic_add(0x7FFFFFFF, &chan->refcount.refcount); | 348 | struct dma_device *device; |
246 | chan->slow_ref = 1; | 349 | struct dma_chan *chan; |
247 | call_rcu(&chan->rcu, dma_chan_free_rcu); | 350 | |
351 | WARN_ONCE(dmaengine_ref_count == 0, | ||
352 | "client called %s without a reference", __func__); | ||
353 | |||
354 | rcu_read_lock(); | ||
355 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { | ||
356 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
357 | continue; | ||
358 | list_for_each_entry(chan, &device->channels, device_node) | ||
359 | if (chan->client_count) | ||
360 | device->device_issue_pending(chan); | ||
361 | } | ||
362 | rcu_read_unlock(); | ||
248 | } | 363 | } |
364 | EXPORT_SYMBOL(dma_issue_pending_all); | ||
249 | 365 | ||
250 | /** | 366 | /** |
251 | * dma_chans_notify_available - broadcast available channels to the clients | 367 | * nth_chan - returns the nth channel of the given capability |
368 | * @cap: capability to match | ||
369 | * @n: nth channel desired | ||
370 | * | ||
371 | * Defaults to returning the channel with the desired capability and the | ||
372 | * lowest reference count when 'n' cannot be satisfied. Must be called | ||
373 | * under dma_list_mutex. | ||
252 | */ | 374 | */ |
253 | static void dma_clients_notify_available(void) | 375 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) |
254 | { | 376 | { |
255 | struct dma_client *client; | 377 | struct dma_device *device; |
378 | struct dma_chan *chan; | ||
379 | struct dma_chan *ret = NULL; | ||
380 | struct dma_chan *min = NULL; | ||
256 | 381 | ||
257 | mutex_lock(&dma_list_mutex); | 382 | list_for_each_entry(device, &dma_device_list, global_node) { |
383 | if (!dma_has_cap(cap, device->cap_mask) || | ||
384 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
385 | continue; | ||
386 | list_for_each_entry(chan, &device->channels, device_node) { | ||
387 | if (!chan->client_count) | ||
388 | continue; | ||
389 | if (!min) | ||
390 | min = chan; | ||
391 | else if (chan->table_count < min->table_count) | ||
392 | min = chan; | ||
393 | |||
394 | if (n-- == 0) { | ||
395 | ret = chan; | ||
396 | break; /* done */ | ||
397 | } | ||
398 | } | ||
399 | if (ret) | ||
400 | break; /* done */ | ||
401 | } | ||
258 | 402 | ||
259 | list_for_each_entry(client, &dma_client_list, global_node) | 403 | if (!ret) |
260 | dma_client_chan_alloc(client); | 404 | ret = min; |
261 | 405 | ||
262 | mutex_unlock(&dma_list_mutex); | 406 | if (ret) |
407 | ret->table_count++; | ||
408 | |||
409 | return ret; | ||
263 | } | 410 | } |
264 | 411 | ||
265 | /** | 412 | /** |
266 | * dma_chans_notify_available - tell the clients that a channel is going away | 413 | * dma_channel_rebalance - redistribute the available channels |
267 | * @chan: channel on its way out | 414 | * |
415 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | ||
416 | * operation type) in the SMP case, and operation isolation (avoid | ||
417 | * multi-tasking channels) in the non-SMP case. Must be called under | ||
418 | * dma_list_mutex. | ||
268 | */ | 419 | */ |
269 | static void dma_clients_notify_removed(struct dma_chan *chan) | 420 | static void dma_channel_rebalance(void) |
270 | { | 421 | { |
271 | struct dma_client *client; | 422 | struct dma_chan *chan; |
272 | enum dma_state_client ack; | 423 | struct dma_device *device; |
424 | int cpu; | ||
425 | int cap; | ||
426 | int n; | ||
273 | 427 | ||
274 | mutex_lock(&dma_list_mutex); | 428 | /* undo the last distribution */ |
429 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
430 | for_each_possible_cpu(cpu) | ||
431 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | ||
432 | |||
433 | list_for_each_entry(device, &dma_device_list, global_node) { | ||
434 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
435 | continue; | ||
436 | list_for_each_entry(chan, &device->channels, device_node) | ||
437 | chan->table_count = 0; | ||
438 | } | ||
275 | 439 | ||
276 | list_for_each_entry(client, &dma_client_list, global_node) { | 440 | /* don't populate the channel_table if no clients are available */ |
277 | ack = client->event_callback(client, chan, | 441 | if (!dmaengine_ref_count) |
278 | DMA_RESOURCE_REMOVED); | 442 | return; |
279 | 443 | ||
280 | /* client was holding resources for this channel so | 444 | /* redistribute available channels */ |
281 | * free it | 445 | n = 0; |
282 | */ | 446 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
283 | if (ack == DMA_ACK) { | 447 | for_each_online_cpu(cpu) { |
284 | dma_chan_put(chan); | 448 | if (num_possible_cpus() > 1) |
285 | chan->client_count--; | 449 | chan = nth_chan(cap, n++); |
450 | else | ||
451 | chan = nth_chan(cap, -1); | ||
452 | |||
453 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | ||
454 | } | ||
455 | } | ||
456 | |||
457 | static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, | ||
458 | dma_filter_fn fn, void *fn_param) | ||
459 | { | ||
460 | struct dma_chan *chan; | ||
461 | |||
462 | if (!__dma_device_satisfies_mask(dev, mask)) { | ||
463 | pr_debug("%s: wrong capabilities\n", __func__); | ||
464 | return NULL; | ||
465 | } | ||
466 | /* devices with multiple channels need special handling as we need to | ||
467 | * ensure that all channels are either private or public. | ||
468 | */ | ||
469 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | ||
470 | list_for_each_entry(chan, &dev->channels, device_node) { | ||
471 | /* some channels are already publicly allocated */ | ||
472 | if (chan->client_count) | ||
473 | return NULL; | ||
286 | } | 474 | } |
475 | |||
476 | list_for_each_entry(chan, &dev->channels, device_node) { | ||
477 | if (chan->client_count) { | ||
478 | pr_debug("%s: %s busy\n", | ||
479 | __func__, dma_chan_name(chan)); | ||
480 | continue; | ||
481 | } | ||
482 | if (fn && !fn(chan, fn_param)) { | ||
483 | pr_debug("%s: %s filter said false\n", | ||
484 | __func__, dma_chan_name(chan)); | ||
485 | continue; | ||
486 | } | ||
487 | return chan; | ||
287 | } | 488 | } |
288 | 489 | ||
289 | mutex_unlock(&dma_list_mutex); | 490 | return NULL; |
290 | } | 491 | } |
291 | 492 | ||
292 | /** | 493 | /** |
293 | * dma_async_client_register - register a &dma_client | 494 | * dma_request_channel - try to allocate an exclusive channel |
294 | * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' | 495 | * @mask: capabilities that the channel must satisfy |
496 | * @fn: optional callback to disposition available channels | ||
497 | * @fn_param: opaque parameter to pass to dma_filter_fn | ||
295 | */ | 498 | */ |
296 | void dma_async_client_register(struct dma_client *client) | 499 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) |
297 | { | 500 | { |
298 | /* validate client data */ | 501 | struct dma_device *device, *_d; |
299 | BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && | 502 | struct dma_chan *chan = NULL; |
300 | !client->slave); | 503 | int err; |
301 | 504 | ||
505 | /* Find a channel */ | ||
506 | mutex_lock(&dma_list_mutex); | ||
507 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | ||
508 | chan = private_candidate(mask, device, fn, fn_param); | ||
509 | if (chan) { | ||
510 | /* Found a suitable channel, try to grab, prep, and | ||
511 | * return it. We first set DMA_PRIVATE to disable | ||
512 | * balance_ref_count as this channel will not be | ||
513 | * published in the general-purpose allocator | ||
514 | */ | ||
515 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
516 | err = dma_chan_get(chan); | ||
517 | |||
518 | if (err == -ENODEV) { | ||
519 | pr_debug("%s: %s module removed\n", __func__, | ||
520 | dma_chan_name(chan)); | ||
521 | list_del_rcu(&device->global_node); | ||
522 | } else if (err) | ||
523 | pr_err("dmaengine: failed to get %s: (%d)\n", | ||
524 | dma_chan_name(chan), err); | ||
525 | else | ||
526 | break; | ||
527 | chan = NULL; | ||
528 | } | ||
529 | } | ||
530 | mutex_unlock(&dma_list_mutex); | ||
531 | |||
532 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | ||
533 | chan ? dma_chan_name(chan) : NULL); | ||
534 | |||
535 | return chan; | ||
536 | } | ||
537 | EXPORT_SYMBOL_GPL(__dma_request_channel); | ||
538 | |||
539 | void dma_release_channel(struct dma_chan *chan) | ||
540 | { | ||
302 | mutex_lock(&dma_list_mutex); | 541 | mutex_lock(&dma_list_mutex); |
303 | list_add_tail(&client->global_node, &dma_client_list); | 542 | WARN_ONCE(chan->client_count != 1, |
543 | "chan reference count %d != 1\n", chan->client_count); | ||
544 | dma_chan_put(chan); | ||
304 | mutex_unlock(&dma_list_mutex); | 545 | mutex_unlock(&dma_list_mutex); |
305 | } | 546 | } |
306 | EXPORT_SYMBOL(dma_async_client_register); | 547 | EXPORT_SYMBOL_GPL(dma_release_channel); |
307 | 548 | ||
308 | /** | 549 | /** |
309 | * dma_async_client_unregister - unregister a client and free the &dma_client | 550 | * dmaengine_get - register interest in dma_channels |
310 | * @client: &dma_client to free | ||
311 | * | ||
312 | * Force frees any allocated DMA channels, frees the &dma_client memory | ||
313 | */ | 551 | */ |
314 | void dma_async_client_unregister(struct dma_client *client) | 552 | void dmaengine_get(void) |
315 | { | 553 | { |
316 | struct dma_device *device; | 554 | struct dma_device *device, *_d; |
317 | struct dma_chan *chan; | 555 | struct dma_chan *chan; |
318 | enum dma_state_client ack; | 556 | int err; |
319 | |||
320 | if (!client) | ||
321 | return; | ||
322 | 557 | ||
323 | mutex_lock(&dma_list_mutex); | 558 | mutex_lock(&dma_list_mutex); |
324 | /* free all channels the client is holding */ | 559 | dmaengine_ref_count++; |
325 | list_for_each_entry(device, &dma_device_list, global_node) | ||
326 | list_for_each_entry(chan, &device->channels, device_node) { | ||
327 | ack = client->event_callback(client, chan, | ||
328 | DMA_RESOURCE_REMOVED); | ||
329 | 560 | ||
330 | if (ack == DMA_ACK) { | 561 | /* try to grab channels */ |
331 | dma_chan_put(chan); | 562 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
332 | chan->client_count--; | 563 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
333 | } | 564 | continue; |
565 | list_for_each_entry(chan, &device->channels, device_node) { | ||
566 | err = dma_chan_get(chan); | ||
567 | if (err == -ENODEV) { | ||
568 | /* module removed before we could use it */ | ||
569 | list_del_rcu(&device->global_node); | ||
570 | break; | ||
571 | } else if (err) | ||
572 | pr_err("dmaengine: failed to get %s: (%d)\n", | ||
573 | dma_chan_name(chan), err); | ||
334 | } | 574 | } |
575 | } | ||
335 | 576 | ||
336 | list_del(&client->global_node); | 577 | /* if this is the first reference and there were channels |
578 | * waiting we need to rebalance to get those channels | ||
579 | * incorporated into the channel table | ||
580 | */ | ||
581 | if (dmaengine_ref_count == 1) | ||
582 | dma_channel_rebalance(); | ||
337 | mutex_unlock(&dma_list_mutex); | 583 | mutex_unlock(&dma_list_mutex); |
338 | } | 584 | } |
339 | EXPORT_SYMBOL(dma_async_client_unregister); | 585 | EXPORT_SYMBOL(dmaengine_get); |
340 | 586 | ||
341 | /** | 587 | /** |
342 | * dma_async_client_chan_request - send all available channels to the | 588 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
343 | * client that satisfy the capability mask | ||
344 | * @client - requester | ||
345 | */ | 589 | */ |
346 | void dma_async_client_chan_request(struct dma_client *client) | 590 | void dmaengine_put(void) |
347 | { | 591 | { |
592 | struct dma_device *device; | ||
593 | struct dma_chan *chan; | ||
594 | |||
348 | mutex_lock(&dma_list_mutex); | 595 | mutex_lock(&dma_list_mutex); |
349 | dma_client_chan_alloc(client); | 596 | dmaengine_ref_count--; |
597 | BUG_ON(dmaengine_ref_count < 0); | ||
598 | /* drop channel references */ | ||
599 | list_for_each_entry(device, &dma_device_list, global_node) { | ||
600 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
601 | continue; | ||
602 | list_for_each_entry(chan, &device->channels, device_node) | ||
603 | dma_chan_put(chan); | ||
604 | } | ||
350 | mutex_unlock(&dma_list_mutex); | 605 | mutex_unlock(&dma_list_mutex); |
351 | } | 606 | } |
352 | EXPORT_SYMBOL(dma_async_client_chan_request); | 607 | EXPORT_SYMBOL(dmaengine_put); |
353 | 608 | ||
354 | /** | 609 | /** |
355 | * dma_async_device_register - registers DMA devices found | 610 | * dma_async_device_register - registers DMA devices found |
@@ -357,9 +612,9 @@ EXPORT_SYMBOL(dma_async_client_chan_request); | |||
357 | */ | 612 | */ |
358 | int dma_async_device_register(struct dma_device *device) | 613 | int dma_async_device_register(struct dma_device *device) |
359 | { | 614 | { |
360 | static int id; | ||
361 | int chancnt = 0, rc; | 615 | int chancnt = 0, rc; |
362 | struct dma_chan* chan; | 616 | struct dma_chan* chan; |
617 | atomic_t *idr_ref; | ||
363 | 618 | ||
364 | if (!device) | 619 | if (!device) |
365 | return -ENODEV; | 620 | return -ENODEV; |
@@ -386,57 +641,83 @@ int dma_async_device_register(struct dma_device *device) | |||
386 | BUG_ON(!device->device_issue_pending); | 641 | BUG_ON(!device->device_issue_pending); |
387 | BUG_ON(!device->dev); | 642 | BUG_ON(!device->dev); |
388 | 643 | ||
389 | init_completion(&device->done); | 644 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
390 | kref_init(&device->refcount); | 645 | if (!idr_ref) |
391 | 646 | return -ENOMEM; | |
647 | atomic_set(idr_ref, 0); | ||
648 | idr_retry: | ||
649 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | ||
650 | return -ENOMEM; | ||
392 | mutex_lock(&dma_list_mutex); | 651 | mutex_lock(&dma_list_mutex); |
393 | device->dev_id = id++; | 652 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); |
394 | mutex_unlock(&dma_list_mutex); | 653 | mutex_unlock(&dma_list_mutex); |
654 | if (rc == -EAGAIN) | ||
655 | goto idr_retry; | ||
656 | else if (rc != 0) | ||
657 | return rc; | ||
395 | 658 | ||
396 | /* represent channels in sysfs. Probably want devs too */ | 659 | /* represent channels in sysfs. Probably want devs too */ |
397 | list_for_each_entry(chan, &device->channels, device_node) { | 660 | list_for_each_entry(chan, &device->channels, device_node) { |
398 | chan->local = alloc_percpu(typeof(*chan->local)); | 661 | chan->local = alloc_percpu(typeof(*chan->local)); |
399 | if (chan->local == NULL) | 662 | if (chan->local == NULL) |
400 | continue; | 663 | continue; |
664 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); | ||
665 | if (chan->dev == NULL) { | ||
666 | free_percpu(chan->local); | ||
667 | continue; | ||
668 | } | ||
401 | 669 | ||
402 | chan->chan_id = chancnt++; | 670 | chan->chan_id = chancnt++; |
403 | chan->dev.class = &dma_devclass; | 671 | chan->dev->device.class = &dma_devclass; |
404 | chan->dev.parent = device->dev; | 672 | chan->dev->device.parent = device->dev; |
405 | dev_set_name(&chan->dev, "dma%dchan%d", | 673 | chan->dev->chan = chan; |
674 | chan->dev->idr_ref = idr_ref; | ||
675 | chan->dev->dev_id = device->dev_id; | ||
676 | atomic_inc(idr_ref); | ||
677 | dev_set_name(&chan->dev->device, "dma%dchan%d", | ||
406 | device->dev_id, chan->chan_id); | 678 | device->dev_id, chan->chan_id); |
407 | 679 | ||
408 | rc = device_register(&chan->dev); | 680 | rc = device_register(&chan->dev->device); |
409 | if (rc) { | 681 | if (rc) { |
410 | chancnt--; | ||
411 | free_percpu(chan->local); | 682 | free_percpu(chan->local); |
412 | chan->local = NULL; | 683 | chan->local = NULL; |
413 | goto err_out; | 684 | goto err_out; |
414 | } | 685 | } |
415 | |||
416 | /* One for the channel, one of the class device */ | ||
417 | kref_get(&device->refcount); | ||
418 | kref_get(&device->refcount); | ||
419 | kref_init(&chan->refcount); | ||
420 | chan->client_count = 0; | 686 | chan->client_count = 0; |
421 | chan->slow_ref = 0; | ||
422 | INIT_RCU_HEAD(&chan->rcu); | ||
423 | } | 687 | } |
688 | device->chancnt = chancnt; | ||
424 | 689 | ||
425 | mutex_lock(&dma_list_mutex); | 690 | mutex_lock(&dma_list_mutex); |
426 | list_add_tail(&device->global_node, &dma_device_list); | 691 | /* take references on public channels */ |
692 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | ||
693 | list_for_each_entry(chan, &device->channels, device_node) { | ||
694 | /* if clients are already waiting for channels we need | ||
695 | * to take references on their behalf | ||
696 | */ | ||
697 | if (dma_chan_get(chan) == -ENODEV) { | ||
698 | /* note we can only get here for the first | ||
699 | * channel as the remaining channels are | ||
700 | * guaranteed to get a reference | ||
701 | */ | ||
702 | rc = -ENODEV; | ||
703 | mutex_unlock(&dma_list_mutex); | ||
704 | goto err_out; | ||
705 | } | ||
706 | } | ||
707 | list_add_tail_rcu(&device->global_node, &dma_device_list); | ||
708 | dma_channel_rebalance(); | ||
427 | mutex_unlock(&dma_list_mutex); | 709 | mutex_unlock(&dma_list_mutex); |
428 | 710 | ||
429 | dma_clients_notify_available(); | ||
430 | |||
431 | return 0; | 711 | return 0; |
432 | 712 | ||
433 | err_out: | 713 | err_out: |
434 | list_for_each_entry(chan, &device->channels, device_node) { | 714 | list_for_each_entry(chan, &device->channels, device_node) { |
435 | if (chan->local == NULL) | 715 | if (chan->local == NULL) |
436 | continue; | 716 | continue; |
437 | kref_put(&device->refcount, dma_async_device_cleanup); | 717 | mutex_lock(&dma_list_mutex); |
438 | device_unregister(&chan->dev); | 718 | chan->dev->chan = NULL; |
439 | chancnt--; | 719 | mutex_unlock(&dma_list_mutex); |
720 | device_unregister(&chan->dev->device); | ||
440 | free_percpu(chan->local); | 721 | free_percpu(chan->local); |
441 | } | 722 | } |
442 | return rc; | 723 | return rc; |
@@ -444,37 +725,30 @@ err_out: | |||
444 | EXPORT_SYMBOL(dma_async_device_register); | 725 | EXPORT_SYMBOL(dma_async_device_register); |
445 | 726 | ||
446 | /** | 727 | /** |
447 | * dma_async_device_cleanup - function called when all references are released | 728 | * dma_async_device_unregister - unregister a DMA device |
448 | * @kref: kernel reference object | ||
449 | */ | ||
450 | static void dma_async_device_cleanup(struct kref *kref) | ||
451 | { | ||
452 | struct dma_device *device; | ||
453 | |||
454 | device = container_of(kref, struct dma_device, refcount); | ||
455 | complete(&device->done); | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * dma_async_device_unregister - unregisters DMA devices | ||
460 | * @device: &dma_device | 729 | * @device: &dma_device |
730 | * | ||
731 | * This routine is called by dma driver exit routines, dmaengine holds module | ||
732 | * references to prevent it being called while channels are in use. | ||
461 | */ | 733 | */ |
462 | void dma_async_device_unregister(struct dma_device *device) | 734 | void dma_async_device_unregister(struct dma_device *device) |
463 | { | 735 | { |
464 | struct dma_chan *chan; | 736 | struct dma_chan *chan; |
465 | 737 | ||
466 | mutex_lock(&dma_list_mutex); | 738 | mutex_lock(&dma_list_mutex); |
467 | list_del(&device->global_node); | 739 | list_del_rcu(&device->global_node); |
740 | dma_channel_rebalance(); | ||
468 | mutex_unlock(&dma_list_mutex); | 741 | mutex_unlock(&dma_list_mutex); |
469 | 742 | ||
470 | list_for_each_entry(chan, &device->channels, device_node) { | 743 | list_for_each_entry(chan, &device->channels, device_node) { |
471 | dma_clients_notify_removed(chan); | 744 | WARN_ONCE(chan->client_count, |
472 | device_unregister(&chan->dev); | 745 | "%s called while %d clients hold a reference\n", |
473 | dma_chan_release(chan); | 746 | __func__, chan->client_count); |
747 | mutex_lock(&dma_list_mutex); | ||
748 | chan->dev->chan = NULL; | ||
749 | mutex_unlock(&dma_list_mutex); | ||
750 | device_unregister(&chan->dev->device); | ||
474 | } | 751 | } |
475 | |||
476 | kref_put(&device->refcount, dma_async_device_cleanup); | ||
477 | wait_for_completion(&device->done); | ||
478 | } | 752 | } |
479 | EXPORT_SYMBOL(dma_async_device_unregister); | 753 | EXPORT_SYMBOL(dma_async_device_unregister); |
480 | 754 | ||
@@ -626,10 +900,96 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
626 | } | 900 | } |
627 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 901 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
628 | 902 | ||
903 | /* dma_wait_for_async_tx - spin wait for a transaction to complete | ||
904 | * @tx: in-flight transaction to wait on | ||
905 | * | ||
906 | * This routine assumes that tx was obtained from a call to async_memcpy, | ||
907 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | ||
908 | * and submitted). Walking the parent chain is only meant to cover for DMA | ||
909 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | ||
910 | * the driver's descriptor cleanup routine. | ||
911 | */ | ||
912 | enum dma_status | ||
913 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
914 | { | ||
915 | enum dma_status status; | ||
916 | struct dma_async_tx_descriptor *iter; | ||
917 | struct dma_async_tx_descriptor *parent; | ||
918 | |||
919 | if (!tx) | ||
920 | return DMA_SUCCESS; | ||
921 | |||
922 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | ||
923 | " %s\n", __func__, dma_chan_name(tx->chan)); | ||
924 | |||
925 | /* poll through the dependency chain, return when tx is complete */ | ||
926 | do { | ||
927 | iter = tx; | ||
928 | |||
929 | /* find the root of the unsubmitted dependency chain */ | ||
930 | do { | ||
931 | parent = iter->parent; | ||
932 | if (!parent) | ||
933 | break; | ||
934 | else | ||
935 | iter = parent; | ||
936 | } while (parent); | ||
937 | |||
938 | /* there is a small window for ->parent == NULL and | ||
939 | * ->cookie == -EBUSY | ||
940 | */ | ||
941 | while (iter->cookie == -EBUSY) | ||
942 | cpu_relax(); | ||
943 | |||
944 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
945 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
946 | |||
947 | return status; | ||
948 | } | ||
949 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | ||
950 | |||
951 | /* dma_run_dependencies - helper routine for dma drivers to process | ||
952 | * (start) dependent operations on their target channel | ||
953 | * @tx: transaction with dependencies | ||
954 | */ | ||
955 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | ||
956 | { | ||
957 | struct dma_async_tx_descriptor *dep = tx->next; | ||
958 | struct dma_async_tx_descriptor *dep_next; | ||
959 | struct dma_chan *chan; | ||
960 | |||
961 | if (!dep) | ||
962 | return; | ||
963 | |||
964 | chan = dep->chan; | ||
965 | |||
966 | /* keep submitting up until a channel switch is detected | ||
967 | * in that case we will be called again as a result of | ||
968 | * processing the interrupt from async_tx_channel_switch | ||
969 | */ | ||
970 | for (; dep; dep = dep_next) { | ||
971 | spin_lock_bh(&dep->lock); | ||
972 | dep->parent = NULL; | ||
973 | dep_next = dep->next; | ||
974 | if (dep_next && dep_next->chan == chan) | ||
975 | dep->next = NULL; /* ->next will be submitted */ | ||
976 | else | ||
977 | dep_next = NULL; /* submit current dep and terminate */ | ||
978 | spin_unlock_bh(&dep->lock); | ||
979 | |||
980 | dep->tx_submit(dep); | ||
981 | } | ||
982 | |||
983 | chan->device->device_issue_pending(chan); | ||
984 | } | ||
985 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | ||
986 | |||
629 | static int __init dma_bus_init(void) | 987 | static int __init dma_bus_init(void) |
630 | { | 988 | { |
989 | idr_init(&dma_idr); | ||
631 | mutex_init(&dma_list_mutex); | 990 | mutex_init(&dma_list_mutex); |
632 | return class_register(&dma_devclass); | 991 | return class_register(&dma_devclass); |
633 | } | 992 | } |
634 | subsys_initcall(dma_bus_init); | 993 | arch_initcall(dma_bus_init); |
994 | |||
635 | 995 | ||