diff options
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 419 |
1 files changed, 312 insertions, 107 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 322ee2984e3d..82489923af09 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -37,11 +37,11 @@ | |||
37 | * Each device has a channels list, which runs unlocked but is never modified | 37 | * Each device has a channels list, which runs unlocked but is never modified |
38 | * once the device is registered, it's just setup by the driver. | 38 | * once the device is registered, it's just setup by the driver. |
39 | * | 39 | * |
40 | * Each client has a channels list, it's only modified under the client->lock | 40 | * Each client is responsible for keeping track of the channels it uses. See |
41 | * and in an RCU callback, so it's safe to read under rcu_read_lock(). | 41 | * the definition of dma_event_callback in dmaengine.h. |
42 | * | 42 | * |
43 | * Each device has a kref, which is initialized to 1 when the device is | 43 | * Each device has a kref, which is initialized to 1 when the device is |
44 | * registered. A kref_put is done for each class_device registered. When the | 44 | * registered. A kref_get is done for each class_device registered. When the |
45 | * class_device is released, the coresponding kref_put is done in the release | 45 | * class_device is released, the coresponding kref_put is done in the release |
46 | * method. Every time one of the device's channels is allocated to a client, | 46 | * method. Every time one of the device's channels is allocated to a client, |
47 | * a kref_get occurs. When the channel is freed, the coresponding kref_put | 47 | * a kref_get occurs. When the channel is freed, the coresponding kref_put |
@@ -51,14 +51,17 @@ | |||
51 | * references to finish. | 51 | * references to finish. |
52 | * | 52 | * |
53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | 53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," |
54 | * with a kref and a per_cpu local_t. A single reference is set when on an | 54 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client |
55 | * ADDED event, and removed with a REMOVE event. Net DMA client takes an | 55 | * signals that it wants to use a channel, and dma_chan_put is called when |
56 | * extra reference per outstanding transaction. The relase function does a | 56 | * a channel is removed or a client using it is unregesitered. A client can |
57 | * kref_put on the device. -ChrisL | 57 | * take extra references per outstanding transaction, as is the case with |
58 | * the NET DMA client. The release function does a kref_put on the device. | ||
59 | * -ChrisL, DanW | ||
58 | */ | 60 | */ |
59 | 61 | ||
60 | #include <linux/init.h> | 62 | #include <linux/init.h> |
61 | #include <linux/module.h> | 63 | #include <linux/module.h> |
64 | #include <linux/mm.h> | ||
62 | #include <linux/device.h> | 65 | #include <linux/device.h> |
63 | #include <linux/dmaengine.h> | 66 | #include <linux/dmaengine.h> |
64 | #include <linux/hardirq.h> | 67 | #include <linux/hardirq.h> |
@@ -66,6 +69,7 @@ | |||
66 | #include <linux/percpu.h> | 69 | #include <linux/percpu.h> |
67 | #include <linux/rcupdate.h> | 70 | #include <linux/rcupdate.h> |
68 | #include <linux/mutex.h> | 71 | #include <linux/mutex.h> |
72 | #include <linux/jiffies.h> | ||
69 | 73 | ||
70 | static DEFINE_MUTEX(dma_list_mutex); | 74 | static DEFINE_MUTEX(dma_list_mutex); |
71 | static LIST_HEAD(dma_device_list); | 75 | static LIST_HEAD(dma_device_list); |
@@ -100,8 +104,19 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) | |||
100 | static ssize_t show_in_use(struct class_device *cd, char *buf) | 104 | static ssize_t show_in_use(struct class_device *cd, char *buf) |
101 | { | 105 | { |
102 | struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); | 106 | struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); |
107 | int in_use = 0; | ||
108 | |||
109 | if (unlikely(chan->slow_ref) && | ||
110 | atomic_read(&chan->refcount.refcount) > 1) | ||
111 | in_use = 1; | ||
112 | else { | ||
113 | if (local_read(&(per_cpu_ptr(chan->local, | ||
114 | get_cpu())->refcount)) > 0) | ||
115 | in_use = 1; | ||
116 | put_cpu(); | ||
117 | } | ||
103 | 118 | ||
104 | return sprintf(buf, "%d\n", (chan->client ? 1 : 0)); | 119 | return sprintf(buf, "%d\n", in_use); |
105 | } | 120 | } |
106 | 121 | ||
107 | static struct class_device_attribute dma_class_attrs[] = { | 122 | static struct class_device_attribute dma_class_attrs[] = { |
@@ -127,43 +142,72 @@ static struct class dma_devclass = { | |||
127 | 142 | ||
128 | /* --- client and device registration --- */ | 143 | /* --- client and device registration --- */ |
129 | 144 | ||
145 | #define dma_chan_satisfies_mask(chan, mask) \ | ||
146 | __dma_chan_satisfies_mask((chan), &(mask)) | ||
147 | static int | ||
148 | __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) | ||
149 | { | ||
150 | dma_cap_mask_t has; | ||
151 | |||
152 | bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, | ||
153 | DMA_TX_TYPE_END); | ||
154 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | ||
155 | } | ||
156 | |||
130 | /** | 157 | /** |
131 | * dma_client_chan_alloc - try to allocate a channel to a client | 158 | * dma_client_chan_alloc - try to allocate channels to a client |
132 | * @client: &dma_client | 159 | * @client: &dma_client |
133 | * | 160 | * |
134 | * Called with dma_list_mutex held. | 161 | * Called with dma_list_mutex held. |
135 | */ | 162 | */ |
136 | static struct dma_chan *dma_client_chan_alloc(struct dma_client *client) | 163 | static void dma_client_chan_alloc(struct dma_client *client) |
137 | { | 164 | { |
138 | struct dma_device *device; | 165 | struct dma_device *device; |
139 | struct dma_chan *chan; | 166 | struct dma_chan *chan; |
140 | unsigned long flags; | ||
141 | int desc; /* allocated descriptor count */ | 167 | int desc; /* allocated descriptor count */ |
168 | enum dma_state_client ack; | ||
142 | 169 | ||
143 | /* Find a channel, any DMA engine will do */ | 170 | /* Find a channel */ |
144 | list_for_each_entry(device, &dma_device_list, global_node) { | 171 | list_for_each_entry(device, &dma_device_list, global_node) |
145 | list_for_each_entry(chan, &device->channels, device_node) { | 172 | list_for_each_entry(chan, &device->channels, device_node) { |
146 | if (chan->client) | 173 | if (!dma_chan_satisfies_mask(chan, client->cap_mask)) |
147 | continue; | 174 | continue; |
148 | 175 | ||
149 | desc = chan->device->device_alloc_chan_resources(chan); | 176 | desc = chan->device->device_alloc_chan_resources(chan); |
150 | if (desc >= 0) { | 177 | if (desc >= 0) { |
151 | kref_get(&device->refcount); | 178 | ack = client->event_callback(client, |
152 | kref_init(&chan->refcount); | 179 | chan, |
153 | chan->slow_ref = 0; | 180 | DMA_RESOURCE_AVAILABLE); |
154 | INIT_RCU_HEAD(&chan->rcu); | 181 | |
155 | chan->client = client; | 182 | /* we are done once this client rejects |
156 | spin_lock_irqsave(&client->lock, flags); | 183 | * an available resource |
157 | list_add_tail_rcu(&chan->client_node, | 184 | */ |
158 | &client->channels); | 185 | if (ack == DMA_ACK) { |
159 | spin_unlock_irqrestore(&client->lock, flags); | 186 | dma_chan_get(chan); |
160 | return chan; | 187 | kref_get(&device->refcount); |
188 | } else if (ack == DMA_NAK) | ||
189 | return; | ||
161 | } | 190 | } |
162 | } | 191 | } |
163 | } | 192 | } |
193 | |||
194 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | ||
195 | { | ||
196 | enum dma_status status; | ||
197 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | ||
198 | |||
199 | dma_async_issue_pending(chan); | ||
200 | do { | ||
201 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | ||
202 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | ||
203 | printk(KERN_ERR "dma_sync_wait_timeout!\n"); | ||
204 | return DMA_ERROR; | ||
205 | } | ||
206 | } while (status == DMA_IN_PROGRESS); | ||
164 | 207 | ||
165 | return NULL; | 208 | return status; |
166 | } | 209 | } |
210 | EXPORT_SYMBOL(dma_sync_wait); | ||
167 | 211 | ||
168 | /** | 212 | /** |
169 | * dma_chan_cleanup - release a DMA channel's resources | 213 | * dma_chan_cleanup - release a DMA channel's resources |
@@ -173,7 +217,6 @@ void dma_chan_cleanup(struct kref *kref) | |||
173 | { | 217 | { |
174 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); | 218 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); |
175 | chan->device->device_free_chan_resources(chan); | 219 | chan->device->device_free_chan_resources(chan); |
176 | chan->client = NULL; | ||
177 | kref_put(&chan->device->refcount, dma_async_device_cleanup); | 220 | kref_put(&chan->device->refcount, dma_async_device_cleanup); |
178 | } | 221 | } |
179 | EXPORT_SYMBOL(dma_chan_cleanup); | 222 | EXPORT_SYMBOL(dma_chan_cleanup); |
@@ -189,7 +232,7 @@ static void dma_chan_free_rcu(struct rcu_head *rcu) | |||
189 | kref_put(&chan->refcount, dma_chan_cleanup); | 232 | kref_put(&chan->refcount, dma_chan_cleanup); |
190 | } | 233 | } |
191 | 234 | ||
192 | static void dma_client_chan_free(struct dma_chan *chan) | 235 | static void dma_chan_release(struct dma_chan *chan) |
193 | { | 236 | { |
194 | atomic_add(0x7FFFFFFF, &chan->refcount.refcount); | 237 | atomic_add(0x7FFFFFFF, &chan->refcount.refcount); |
195 | chan->slow_ref = 1; | 238 | chan->slow_ref = 1; |
@@ -197,70 +240,57 @@ static void dma_client_chan_free(struct dma_chan *chan) | |||
197 | } | 240 | } |
198 | 241 | ||
199 | /** | 242 | /** |
200 | * dma_chans_rebalance - reallocate channels to clients | 243 | * dma_chans_notify_available - broadcast available channels to the clients |
201 | * | ||
202 | * When the number of DMA channel in the system changes, | ||
203 | * channels need to be rebalanced among clients. | ||
204 | */ | 244 | */ |
205 | static void dma_chans_rebalance(void) | 245 | static void dma_clients_notify_available(void) |
206 | { | 246 | { |
207 | struct dma_client *client; | 247 | struct dma_client *client; |
208 | struct dma_chan *chan; | ||
209 | unsigned long flags; | ||
210 | 248 | ||
211 | mutex_lock(&dma_list_mutex); | 249 | mutex_lock(&dma_list_mutex); |
212 | 250 | ||
213 | list_for_each_entry(client, &dma_client_list, global_node) { | 251 | list_for_each_entry(client, &dma_client_list, global_node) |
214 | while (client->chans_desired > client->chan_count) { | 252 | dma_client_chan_alloc(client); |
215 | chan = dma_client_chan_alloc(client); | ||
216 | if (!chan) | ||
217 | break; | ||
218 | client->chan_count++; | ||
219 | client->event_callback(client, | ||
220 | chan, | ||
221 | DMA_RESOURCE_ADDED); | ||
222 | } | ||
223 | while (client->chans_desired < client->chan_count) { | ||
224 | spin_lock_irqsave(&client->lock, flags); | ||
225 | chan = list_entry(client->channels.next, | ||
226 | struct dma_chan, | ||
227 | client_node); | ||
228 | list_del_rcu(&chan->client_node); | ||
229 | spin_unlock_irqrestore(&client->lock, flags); | ||
230 | client->chan_count--; | ||
231 | client->event_callback(client, | ||
232 | chan, | ||
233 | DMA_RESOURCE_REMOVED); | ||
234 | dma_client_chan_free(chan); | ||
235 | } | ||
236 | } | ||
237 | 253 | ||
238 | mutex_unlock(&dma_list_mutex); | 254 | mutex_unlock(&dma_list_mutex); |
239 | } | 255 | } |
240 | 256 | ||
241 | /** | 257 | /** |
242 | * dma_async_client_register - allocate and register a &dma_client | 258 | * dma_chans_notify_available - tell the clients that a channel is going away |
243 | * @event_callback: callback for notification of channel addition/removal | 259 | * @chan: channel on its way out |
244 | */ | 260 | */ |
245 | struct dma_client *dma_async_client_register(dma_event_callback event_callback) | 261 | static void dma_clients_notify_removed(struct dma_chan *chan) |
246 | { | 262 | { |
247 | struct dma_client *client; | 263 | struct dma_client *client; |
264 | enum dma_state_client ack; | ||
248 | 265 | ||
249 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 266 | mutex_lock(&dma_list_mutex); |
250 | if (!client) | ||
251 | return NULL; | ||
252 | 267 | ||
253 | INIT_LIST_HEAD(&client->channels); | 268 | list_for_each_entry(client, &dma_client_list, global_node) { |
254 | spin_lock_init(&client->lock); | 269 | ack = client->event_callback(client, chan, |
255 | client->chans_desired = 0; | 270 | DMA_RESOURCE_REMOVED); |
256 | client->chan_count = 0; | 271 | |
257 | client->event_callback = event_callback; | 272 | /* client was holding resources for this channel so |
273 | * free it | ||
274 | */ | ||
275 | if (ack == DMA_ACK) { | ||
276 | dma_chan_put(chan); | ||
277 | kref_put(&chan->device->refcount, | ||
278 | dma_async_device_cleanup); | ||
279 | } | ||
280 | } | ||
258 | 281 | ||
282 | mutex_unlock(&dma_list_mutex); | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * dma_async_client_register - register a &dma_client | ||
287 | * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' | ||
288 | */ | ||
289 | void dma_async_client_register(struct dma_client *client) | ||
290 | { | ||
259 | mutex_lock(&dma_list_mutex); | 291 | mutex_lock(&dma_list_mutex); |
260 | list_add_tail(&client->global_node, &dma_client_list); | 292 | list_add_tail(&client->global_node, &dma_client_list); |
261 | mutex_unlock(&dma_list_mutex); | 293 | mutex_unlock(&dma_list_mutex); |
262 | |||
263 | return client; | ||
264 | } | 294 | } |
265 | EXPORT_SYMBOL(dma_async_client_register); | 295 | EXPORT_SYMBOL(dma_async_client_register); |
266 | 296 | ||
@@ -272,40 +302,42 @@ EXPORT_SYMBOL(dma_async_client_register); | |||
272 | */ | 302 | */ |
273 | void dma_async_client_unregister(struct dma_client *client) | 303 | void dma_async_client_unregister(struct dma_client *client) |
274 | { | 304 | { |
305 | struct dma_device *device; | ||
275 | struct dma_chan *chan; | 306 | struct dma_chan *chan; |
307 | enum dma_state_client ack; | ||
276 | 308 | ||
277 | if (!client) | 309 | if (!client) |
278 | return; | 310 | return; |
279 | 311 | ||
280 | rcu_read_lock(); | ||
281 | list_for_each_entry_rcu(chan, &client->channels, client_node) | ||
282 | dma_client_chan_free(chan); | ||
283 | rcu_read_unlock(); | ||
284 | |||
285 | mutex_lock(&dma_list_mutex); | 312 | mutex_lock(&dma_list_mutex); |
313 | /* free all channels the client is holding */ | ||
314 | list_for_each_entry(device, &dma_device_list, global_node) | ||
315 | list_for_each_entry(chan, &device->channels, device_node) { | ||
316 | ack = client->event_callback(client, chan, | ||
317 | DMA_RESOURCE_REMOVED); | ||
318 | |||
319 | if (ack == DMA_ACK) { | ||
320 | dma_chan_put(chan); | ||
321 | kref_put(&chan->device->refcount, | ||
322 | dma_async_device_cleanup); | ||
323 | } | ||
324 | } | ||
325 | |||
286 | list_del(&client->global_node); | 326 | list_del(&client->global_node); |
287 | mutex_unlock(&dma_list_mutex); | 327 | mutex_unlock(&dma_list_mutex); |
288 | |||
289 | kfree(client); | ||
290 | dma_chans_rebalance(); | ||
291 | } | 328 | } |
292 | EXPORT_SYMBOL(dma_async_client_unregister); | 329 | EXPORT_SYMBOL(dma_async_client_unregister); |
293 | 330 | ||
294 | /** | 331 | /** |
295 | * dma_async_client_chan_request - request DMA channels | 332 | * dma_async_client_chan_request - send all available channels to the |
296 | * @client: &dma_client | 333 | * client that satisfy the capability mask |
297 | * @number: count of DMA channels requested | 334 | * @client - requester |
298 | * | ||
299 | * Clients call dma_async_client_chan_request() to specify how many | ||
300 | * DMA channels they need, 0 to free all currently allocated. | ||
301 | * The resulting allocations/frees are indicated to the client via the | ||
302 | * event callback. | ||
303 | */ | 335 | */ |
304 | void dma_async_client_chan_request(struct dma_client *client, | 336 | void dma_async_client_chan_request(struct dma_client *client) |
305 | unsigned int number) | ||
306 | { | 337 | { |
307 | client->chans_desired = number; | 338 | mutex_lock(&dma_list_mutex); |
308 | dma_chans_rebalance(); | 339 | dma_client_chan_alloc(client); |
340 | mutex_unlock(&dma_list_mutex); | ||
309 | } | 341 | } |
310 | EXPORT_SYMBOL(dma_async_client_chan_request); | 342 | EXPORT_SYMBOL(dma_async_client_chan_request); |
311 | 343 | ||
@@ -316,12 +348,31 @@ EXPORT_SYMBOL(dma_async_client_chan_request); | |||
316 | int dma_async_device_register(struct dma_device *device) | 348 | int dma_async_device_register(struct dma_device *device) |
317 | { | 349 | { |
318 | static int id; | 350 | static int id; |
319 | int chancnt = 0; | 351 | int chancnt = 0, rc; |
320 | struct dma_chan* chan; | 352 | struct dma_chan* chan; |
321 | 353 | ||
322 | if (!device) | 354 | if (!device) |
323 | return -ENODEV; | 355 | return -ENODEV; |
324 | 356 | ||
357 | /* validate device routines */ | ||
358 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | ||
359 | !device->device_prep_dma_memcpy); | ||
360 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | ||
361 | !device->device_prep_dma_xor); | ||
362 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | ||
363 | !device->device_prep_dma_zero_sum); | ||
364 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | ||
365 | !device->device_prep_dma_memset); | ||
366 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | ||
367 | !device->device_prep_dma_interrupt); | ||
368 | |||
369 | BUG_ON(!device->device_alloc_chan_resources); | ||
370 | BUG_ON(!device->device_free_chan_resources); | ||
371 | BUG_ON(!device->device_dependency_added); | ||
372 | BUG_ON(!device->device_is_tx_complete); | ||
373 | BUG_ON(!device->device_issue_pending); | ||
374 | BUG_ON(!device->dev); | ||
375 | |||
325 | init_completion(&device->done); | 376 | init_completion(&device->done); |
326 | kref_init(&device->refcount); | 377 | kref_init(&device->refcount); |
327 | device->dev_id = id++; | 378 | device->dev_id = id++; |
@@ -338,17 +389,38 @@ int dma_async_device_register(struct dma_device *device) | |||
338 | snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d", | 389 | snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d", |
339 | device->dev_id, chan->chan_id); | 390 | device->dev_id, chan->chan_id); |
340 | 391 | ||
392 | rc = class_device_register(&chan->class_dev); | ||
393 | if (rc) { | ||
394 | chancnt--; | ||
395 | free_percpu(chan->local); | ||
396 | chan->local = NULL; | ||
397 | goto err_out; | ||
398 | } | ||
399 | |||
341 | kref_get(&device->refcount); | 400 | kref_get(&device->refcount); |
342 | class_device_register(&chan->class_dev); | 401 | kref_init(&chan->refcount); |
402 | chan->slow_ref = 0; | ||
403 | INIT_RCU_HEAD(&chan->rcu); | ||
343 | } | 404 | } |
344 | 405 | ||
345 | mutex_lock(&dma_list_mutex); | 406 | mutex_lock(&dma_list_mutex); |
346 | list_add_tail(&device->global_node, &dma_device_list); | 407 | list_add_tail(&device->global_node, &dma_device_list); |
347 | mutex_unlock(&dma_list_mutex); | 408 | mutex_unlock(&dma_list_mutex); |
348 | 409 | ||
349 | dma_chans_rebalance(); | 410 | dma_clients_notify_available(); |
350 | 411 | ||
351 | return 0; | 412 | return 0; |
413 | |||
414 | err_out: | ||
415 | list_for_each_entry(chan, &device->channels, device_node) { | ||
416 | if (chan->local == NULL) | ||
417 | continue; | ||
418 | kref_put(&device->refcount, dma_async_device_cleanup); | ||
419 | class_device_unregister(&chan->class_dev); | ||
420 | chancnt--; | ||
421 | free_percpu(chan->local); | ||
422 | } | ||
423 | return rc; | ||
352 | } | 424 | } |
353 | EXPORT_SYMBOL(dma_async_device_register); | 425 | EXPORT_SYMBOL(dma_async_device_register); |
354 | 426 | ||
@@ -371,32 +443,165 @@ static void dma_async_device_cleanup(struct kref *kref) | |||
371 | void dma_async_device_unregister(struct dma_device *device) | 443 | void dma_async_device_unregister(struct dma_device *device) |
372 | { | 444 | { |
373 | struct dma_chan *chan; | 445 | struct dma_chan *chan; |
374 | unsigned long flags; | ||
375 | 446 | ||
376 | mutex_lock(&dma_list_mutex); | 447 | mutex_lock(&dma_list_mutex); |
377 | list_del(&device->global_node); | 448 | list_del(&device->global_node); |
378 | mutex_unlock(&dma_list_mutex); | 449 | mutex_unlock(&dma_list_mutex); |
379 | 450 | ||
380 | list_for_each_entry(chan, &device->channels, device_node) { | 451 | list_for_each_entry(chan, &device->channels, device_node) { |
381 | if (chan->client) { | 452 | dma_clients_notify_removed(chan); |
382 | spin_lock_irqsave(&chan->client->lock, flags); | ||
383 | list_del(&chan->client_node); | ||
384 | chan->client->chan_count--; | ||
385 | spin_unlock_irqrestore(&chan->client->lock, flags); | ||
386 | chan->client->event_callback(chan->client, | ||
387 | chan, | ||
388 | DMA_RESOURCE_REMOVED); | ||
389 | dma_client_chan_free(chan); | ||
390 | } | ||
391 | class_device_unregister(&chan->class_dev); | 453 | class_device_unregister(&chan->class_dev); |
454 | dma_chan_release(chan); | ||
392 | } | 455 | } |
393 | dma_chans_rebalance(); | ||
394 | 456 | ||
395 | kref_put(&device->refcount, dma_async_device_cleanup); | 457 | kref_put(&device->refcount, dma_async_device_cleanup); |
396 | wait_for_completion(&device->done); | 458 | wait_for_completion(&device->done); |
397 | } | 459 | } |
398 | EXPORT_SYMBOL(dma_async_device_unregister); | 460 | EXPORT_SYMBOL(dma_async_device_unregister); |
399 | 461 | ||
462 | /** | ||
463 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | ||
464 | * @chan: DMA channel to offload copy to | ||
465 | * @dest: destination address (virtual) | ||
466 | * @src: source address (virtual) | ||
467 | * @len: length | ||
468 | * | ||
469 | * Both @dest and @src must be mappable to a bus address according to the | ||
470 | * DMA mapping API rules for streaming mappings. | ||
471 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
472 | * user space pages). | ||
473 | */ | ||
474 | dma_cookie_t | ||
475 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | ||
476 | void *src, size_t len) | ||
477 | { | ||
478 | struct dma_device *dev = chan->device; | ||
479 | struct dma_async_tx_descriptor *tx; | ||
480 | dma_addr_t addr; | ||
481 | dma_cookie_t cookie; | ||
482 | int cpu; | ||
483 | |||
484 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | ||
485 | if (!tx) | ||
486 | return -ENOMEM; | ||
487 | |||
488 | tx->ack = 1; | ||
489 | tx->callback = NULL; | ||
490 | addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | ||
491 | tx->tx_set_src(addr, tx, 0); | ||
492 | addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | ||
493 | tx->tx_set_dest(addr, tx, 0); | ||
494 | cookie = tx->tx_submit(tx); | ||
495 | |||
496 | cpu = get_cpu(); | ||
497 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
498 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
499 | put_cpu(); | ||
500 | |||
501 | return cookie; | ||
502 | } | ||
503 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | ||
504 | |||
505 | /** | ||
506 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | ||
507 | * @chan: DMA channel to offload copy to | ||
508 | * @page: destination page | ||
509 | * @offset: offset in page to copy to | ||
510 | * @kdata: source address (virtual) | ||
511 | * @len: length | ||
512 | * | ||
513 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
514 | * to the DMA mapping API rules for streaming mappings. | ||
515 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
516 | * locked user space pages) | ||
517 | */ | ||
518 | dma_cookie_t | ||
519 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | ||
520 | unsigned int offset, void *kdata, size_t len) | ||
521 | { | ||
522 | struct dma_device *dev = chan->device; | ||
523 | struct dma_async_tx_descriptor *tx; | ||
524 | dma_addr_t addr; | ||
525 | dma_cookie_t cookie; | ||
526 | int cpu; | ||
527 | |||
528 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | ||
529 | if (!tx) | ||
530 | return -ENOMEM; | ||
531 | |||
532 | tx->ack = 1; | ||
533 | tx->callback = NULL; | ||
534 | addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | ||
535 | tx->tx_set_src(addr, tx, 0); | ||
536 | addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | ||
537 | tx->tx_set_dest(addr, tx, 0); | ||
538 | cookie = tx->tx_submit(tx); | ||
539 | |||
540 | cpu = get_cpu(); | ||
541 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
542 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
543 | put_cpu(); | ||
544 | |||
545 | return cookie; | ||
546 | } | ||
547 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | ||
548 | |||
549 | /** | ||
550 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | ||
551 | * @chan: DMA channel to offload copy to | ||
552 | * @dest_pg: destination page | ||
553 | * @dest_off: offset in page to copy to | ||
554 | * @src_pg: source page | ||
555 | * @src_off: offset in page to copy from | ||
556 | * @len: length | ||
557 | * | ||
558 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | ||
559 | * address according to the DMA mapping API rules for streaming mappings. | ||
560 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | ||
561 | * (kernel memory or locked user space pages). | ||
562 | */ | ||
563 | dma_cookie_t | ||
564 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | ||
565 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | ||
566 | size_t len) | ||
567 | { | ||
568 | struct dma_device *dev = chan->device; | ||
569 | struct dma_async_tx_descriptor *tx; | ||
570 | dma_addr_t addr; | ||
571 | dma_cookie_t cookie; | ||
572 | int cpu; | ||
573 | |||
574 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | ||
575 | if (!tx) | ||
576 | return -ENOMEM; | ||
577 | |||
578 | tx->ack = 1; | ||
579 | tx->callback = NULL; | ||
580 | addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | ||
581 | tx->tx_set_src(addr, tx, 0); | ||
582 | addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); | ||
583 | tx->tx_set_dest(addr, tx, 0); | ||
584 | cookie = tx->tx_submit(tx); | ||
585 | |||
586 | cpu = get_cpu(); | ||
587 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | ||
588 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | ||
589 | put_cpu(); | ||
590 | |||
591 | return cookie; | ||
592 | } | ||
593 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | ||
594 | |||
595 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | ||
596 | struct dma_chan *chan) | ||
597 | { | ||
598 | tx->chan = chan; | ||
599 | spin_lock_init(&tx->lock); | ||
600 | INIT_LIST_HEAD(&tx->depend_node); | ||
601 | INIT_LIST_HEAD(&tx->depend_list); | ||
602 | } | ||
603 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | ||
604 | |||
400 | static int __init dma_bus_init(void) | 605 | static int __init dma_bus_init(void) |
401 | { | 606 | { |
402 | mutex_init(&dma_list_mutex); | 607 | mutex_init(&dma_list_mutex); |