aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c778
-rw-r--r--drivers/dma/dmatest.c129
-rw-r--r--drivers/dma/dw_dmac.c119
-rw-r--r--drivers/dma/fsldma.c5
-rw-r--r--drivers/dma/ioat.c92
-rw-r--r--drivers/dma/ioat_dma.c18
-rw-r--r--drivers/dma/iop-adma.c30
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/mmc/host/atmel-mci.c103
11 files changed, 757 insertions, 532 deletions
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index d883e1b8bb8c..55433849bfa6 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,6 +270,6 @@ static void __exit dca_exit(void)
270 dca_sysfs_exit(); 270 dca_sysfs_exit();
271} 271}
272 272
273subsys_initcall(dca_init); 273arch_initcall(dca_init);
274module_exit(dca_exit); 274module_exit(dca_exit);
275 275
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 904e57558bb5..e34b06420816 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,7 +33,6 @@ config INTEL_IOATDMA
33config INTEL_IOP_ADMA 33config INTEL_IOP_ADMA
34 tristate "Intel IOP ADMA support" 34 tristate "Intel IOP ADMA support"
35 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 35 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
36 select ASYNC_CORE
37 select DMA_ENGINE 36 select DMA_ENGINE
38 help 37 help
39 Enable support for the Intel(R) IOP Series RAID engines. 38 Enable support for the Intel(R) IOP Series RAID engines.
@@ -59,7 +58,6 @@ config FSL_DMA
59config MV_XOR 58config MV_XOR
60 bool "Marvell XOR engine support" 59 bool "Marvell XOR engine support"
61 depends on PLAT_ORION 60 depends on PLAT_ORION
62 select ASYNC_CORE
63 select DMA_ENGINE 61 select DMA_ENGINE
64 ---help--- 62 ---help---
65 Enable support for the Marvell XOR engine. 63 Enable support for the Marvell XOR engine.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 657996517374..403dbe781122 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -31,32 +31,18 @@
31 * 31 *
32 * LOCKING: 32 * LOCKING:
33 * 33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list. 34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * Both of these are protected by a mutex, dma_list_mutex. 35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
36 * 41 *
37 * Each device has a channels list, which runs unlocked but is never modified 42 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver. 43 * once the device is registered, it's just setup by the driver.
39 * 44 *
40 * Each client is responsible for keeping track of the channels it uses. See 45 * See Documentation/dmaengine.txt for more details
41 * the definition of dma_event_callback in dmaengine.h.
42 *
43 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_get is done for each device registered. When the
45 * device is released, the corresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the corresponding kref_put
48 * happens. The device's release function does a completion, so
49 * unregister_device does a remove event, device_unregister, a kref_put
50 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
56 * a channel is removed or a client using it is unregistered. A client can
57 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
60 */ 46 */
61 47
62#include <linux/init.h> 48#include <linux/init.h>
@@ -70,54 +56,85 @@
70#include <linux/rcupdate.h> 56#include <linux/rcupdate.h>
71#include <linux/mutex.h> 57#include <linux/mutex.h>
72#include <linux/jiffies.h> 58#include <linux/jiffies.h>
59#include <linux/rculist.h>
60#include <linux/idr.h>
73 61
74static DEFINE_MUTEX(dma_list_mutex); 62static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list); 63static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list); 64static long dmaengine_ref_count;
65static struct idr dma_idr;
77 66
78/* --- sysfs implementation --- */ 67/* --- sysfs implementation --- */
79 68
69/**
70 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
71 * @dev - device node
72 *
73 * Must be called under dma_list_mutex
74 */
75static struct dma_chan *dev_to_dma_chan(struct device *dev)
76{
77 struct dma_chan_dev *chan_dev;
78
79 chan_dev = container_of(dev, typeof(*chan_dev), device);
80 return chan_dev->chan;
81}
82
80static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 83static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
81{ 84{
82 struct dma_chan *chan = to_dma_chan(dev); 85 struct dma_chan *chan;
83 unsigned long count = 0; 86 unsigned long count = 0;
84 int i; 87 int i;
88 int err;
85 89
86 for_each_possible_cpu(i) 90 mutex_lock(&dma_list_mutex);
87 count += per_cpu_ptr(chan->local, i)->memcpy_count; 91 chan = dev_to_dma_chan(dev);
92 if (chan) {
93 for_each_possible_cpu(i)
94 count += per_cpu_ptr(chan->local, i)->memcpy_count;
95 err = sprintf(buf, "%lu\n", count);
96 } else
97 err = -ENODEV;
98 mutex_unlock(&dma_list_mutex);
88 99
89 return sprintf(buf, "%lu\n", count); 100 return err;
90} 101}
91 102
92static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 103static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
93 char *buf) 104 char *buf)
94{ 105{
95 struct dma_chan *chan = to_dma_chan(dev); 106 struct dma_chan *chan;
96 unsigned long count = 0; 107 unsigned long count = 0;
97 int i; 108 int i;
109 int err;
98 110
99 for_each_possible_cpu(i) 111 mutex_lock(&dma_list_mutex);
100 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 112 chan = dev_to_dma_chan(dev);
113 if (chan) {
114 for_each_possible_cpu(i)
115 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
116 err = sprintf(buf, "%lu\n", count);
117 } else
118 err = -ENODEV;
119 mutex_unlock(&dma_list_mutex);
101 120
102 return sprintf(buf, "%lu\n", count); 121 return err;
103} 122}
104 123
105static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 124static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
106{ 125{
107 struct dma_chan *chan = to_dma_chan(dev); 126 struct dma_chan *chan;
108 int in_use = 0; 127 int err;
109
110 if (unlikely(chan->slow_ref) &&
111 atomic_read(&chan->refcount.refcount) > 1)
112 in_use = 1;
113 else {
114 if (local_read(&(per_cpu_ptr(chan->local,
115 get_cpu())->refcount)) > 0)
116 in_use = 1;
117 put_cpu();
118 }
119 128
120 return sprintf(buf, "%d\n", in_use); 129 mutex_lock(&dma_list_mutex);
130 chan = dev_to_dma_chan(dev);
131 if (chan)
132 err = sprintf(buf, "%d\n", chan->client_count);
133 else
134 err = -ENODEV;
135 mutex_unlock(&dma_list_mutex);
136
137 return err;
121} 138}
122 139
123static struct device_attribute dma_attrs[] = { 140static struct device_attribute dma_attrs[] = {
@@ -127,76 +144,110 @@ static struct device_attribute dma_attrs[] = {
127 __ATTR_NULL 144 __ATTR_NULL
128}; 145};
129 146
130static void dma_async_device_cleanup(struct kref *kref); 147static void chan_dev_release(struct device *dev)
131
132static void dma_dev_release(struct device *dev)
133{ 148{
134 struct dma_chan *chan = to_dma_chan(dev); 149 struct dma_chan_dev *chan_dev;
135 kref_put(&chan->device->refcount, dma_async_device_cleanup); 150
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 mutex_lock(&dma_list_mutex);
154 idr_remove(&dma_idr, chan_dev->dev_id);
155 mutex_unlock(&dma_list_mutex);
156 kfree(chan_dev->idr_ref);
157 }
158 kfree(chan_dev);
136} 159}
137 160
138static struct class dma_devclass = { 161static struct class dma_devclass = {
139 .name = "dma", 162 .name = "dma",
140 .dev_attrs = dma_attrs, 163 .dev_attrs = dma_attrs,
141 .dev_release = dma_dev_release, 164 .dev_release = chan_dev_release,
142}; 165};
143 166
144/* --- client and device registration --- */ 167/* --- client and device registration --- */
145 168
146#define dma_chan_satisfies_mask(chan, mask) \ 169#define dma_device_satisfies_mask(device, mask) \
147 __dma_chan_satisfies_mask((chan), &(mask)) 170 __dma_device_satisfies_mask((device), &(mask))
148static int 171static int
149__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want) 172__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
150{ 173{
151 dma_cap_mask_t has; 174 dma_cap_mask_t has;
152 175
153 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits, 176 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
154 DMA_TX_TYPE_END); 177 DMA_TX_TYPE_END);
155 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 178 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156} 179}
157 180
181static struct module *dma_chan_to_owner(struct dma_chan *chan)
182{
183 return chan->device->dev->driver->owner;
184}
185
158/** 186/**
159 * dma_client_chan_alloc - try to allocate channels to a client 187 * balance_ref_count - catch up the channel reference count
160 * @client: &dma_client 188 * @chan - channel to balance ->client_count versus dmaengine_ref_count
161 * 189 *
162 * Called with dma_list_mutex held. 190 * balance_ref_count must be called under dma_list_mutex
163 */ 191 */
164static void dma_client_chan_alloc(struct dma_client *client) 192static void balance_ref_count(struct dma_chan *chan)
165{ 193{
166 struct dma_device *device; 194 struct module *owner = dma_chan_to_owner(chan);
167 struct dma_chan *chan;
168 int desc; /* allocated descriptor count */
169 enum dma_state_client ack;
170 195
171 /* Find a channel */ 196 while (chan->client_count < dmaengine_ref_count) {
172 list_for_each_entry(device, &dma_device_list, global_node) { 197 __module_get(owner);
173 /* Does the client require a specific DMA controller? */ 198 chan->client_count++;
174 if (client->slave && client->slave->dma_dev 199 }
175 && client->slave->dma_dev != device->dev) 200}
176 continue;
177 201
178 list_for_each_entry(chan, &device->channels, device_node) { 202/**
179 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 203 * dma_chan_get - try to grab a dma channel's parent driver module
180 continue; 204 * @chan - channel to grab
205 *
206 * Must be called under dma_list_mutex
207 */
208static int dma_chan_get(struct dma_chan *chan)
209{
210 int err = -ENODEV;
211 struct module *owner = dma_chan_to_owner(chan);
212
213 if (chan->client_count) {
214 __module_get(owner);
215 err = 0;
216 } else if (try_module_get(owner))
217 err = 0;
218
219 if (err == 0)
220 chan->client_count++;
221
222 /* allocate upon first client reference */
223 if (chan->client_count == 1 && err == 0) {
224 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
225
226 if (desc_cnt < 0) {
227 err = desc_cnt;
228 chan->client_count = 0;
229 module_put(owner);
230 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
231 balance_ref_count(chan);
232 }
181 233
182 desc = chan->device->device_alloc_chan_resources( 234 return err;
183 chan, client); 235}
184 if (desc >= 0) {
185 ack = client->event_callback(client,
186 chan,
187 DMA_RESOURCE_AVAILABLE);
188 236
189 /* we are done once this client rejects 237/**
190 * an available resource 238 * dma_chan_put - drop a reference to a dma channel's parent driver module
191 */ 239 * @chan - channel to release
192 if (ack == DMA_ACK) { 240 *
193 dma_chan_get(chan); 241 * Must be called under dma_list_mutex
194 chan->client_count++; 242 */
195 } else if (ack == DMA_NAK) 243static void dma_chan_put(struct dma_chan *chan)
196 return; 244{
197 } 245 if (!chan->client_count)
198 } 246 return; /* this channel failed alloc_chan_resources */
199 } 247 chan->client_count--;
248 module_put(dma_chan_to_owner(chan));
249 if (chan->client_count == 0)
250 chan->device->device_free_chan_resources(chan);
200} 251}
201 252
202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 253enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -218,138 +269,342 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
218EXPORT_SYMBOL(dma_sync_wait); 269EXPORT_SYMBOL(dma_sync_wait);
219 270
220/** 271/**
221 * dma_chan_cleanup - release a DMA channel's resources 272 * dma_cap_mask_all - enable iteration over all operation types
222 * @kref: kernel reference structure that contains the DMA channel device 273 */
274static dma_cap_mask_t dma_cap_mask_all;
275
276/**
277 * dma_chan_tbl_ent - tracks channel allocations per core/operation
278 * @chan - associated channel for this entry
279 */
280struct dma_chan_tbl_ent {
281 struct dma_chan *chan;
282};
283
284/**
285 * channel_table - percpu lookup table for memory-to-memory offload providers
223 */ 286 */
224void dma_chan_cleanup(struct kref *kref) 287static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
288
289static int __init dma_channel_table_init(void)
225{ 290{
226 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); 291 enum dma_transaction_type cap;
227 chan->device->device_free_chan_resources(chan); 292 int err = 0;
228 kref_put(&chan->device->refcount, dma_async_device_cleanup); 293
294 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
295
296 /* 'interrupt', 'private', and 'slave' are channel capabilities,
297 * but are not associated with an operation so they do not need
298 * an entry in the channel_table
299 */
300 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
301 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
302 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
303
304 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
305 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
306 if (!channel_table[cap]) {
307 err = -ENOMEM;
308 break;
309 }
310 }
311
312 if (err) {
313 pr_err("dmaengine: initialization failure\n");
314 for_each_dma_cap_mask(cap, dma_cap_mask_all)
315 if (channel_table[cap])
316 free_percpu(channel_table[cap]);
317 }
318
319 return err;
229} 320}
230EXPORT_SYMBOL(dma_chan_cleanup); 321arch_initcall(dma_channel_table_init);
231 322
232static void dma_chan_free_rcu(struct rcu_head *rcu) 323/**
324 * dma_find_channel - find a channel to carry out the operation
325 * @tx_type: transaction type
326 */
327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
233{ 328{
234 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); 329 struct dma_chan *chan;
235 int bias = 0x7FFFFFFF; 330 int cpu;
236 int i; 331
237 for_each_possible_cpu(i) 332 WARN_ONCE(dmaengine_ref_count == 0,
238 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount); 333 "client called %s without a reference", __func__);
239 atomic_sub(bias, &chan->refcount.refcount); 334
240 kref_put(&chan->refcount, dma_chan_cleanup); 335 cpu = get_cpu();
336 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
337 put_cpu();
338
339 return chan;
241} 340}
341EXPORT_SYMBOL(dma_find_channel);
242 342
243static void dma_chan_release(struct dma_chan *chan) 343/**
344 * dma_issue_pending_all - flush all pending operations across all channels
345 */
346void dma_issue_pending_all(void)
244{ 347{
245 atomic_add(0x7FFFFFFF, &chan->refcount.refcount); 348 struct dma_device *device;
246 chan->slow_ref = 1; 349 struct dma_chan *chan;
247 call_rcu(&chan->rcu, dma_chan_free_rcu); 350
351 WARN_ONCE(dmaengine_ref_count == 0,
352 "client called %s without a reference", __func__);
353
354 rcu_read_lock();
355 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
356 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
357 continue;
358 list_for_each_entry(chan, &device->channels, device_node)
359 if (chan->client_count)
360 device->device_issue_pending(chan);
361 }
362 rcu_read_unlock();
248} 363}
364EXPORT_SYMBOL(dma_issue_pending_all);
249 365
250/** 366/**
251 * dma_chans_notify_available - broadcast available channels to the clients 367 * nth_chan - returns the nth channel of the given capability
368 * @cap: capability to match
369 * @n: nth channel desired
370 *
371 * Defaults to returning the channel with the desired capability and the
372 * lowest reference count when 'n' cannot be satisfied. Must be called
373 * under dma_list_mutex.
252 */ 374 */
253static void dma_clients_notify_available(void) 375static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
254{ 376{
255 struct dma_client *client; 377 struct dma_device *device;
378 struct dma_chan *chan;
379 struct dma_chan *ret = NULL;
380 struct dma_chan *min = NULL;
256 381
257 mutex_lock(&dma_list_mutex); 382 list_for_each_entry(device, &dma_device_list, global_node) {
383 if (!dma_has_cap(cap, device->cap_mask) ||
384 dma_has_cap(DMA_PRIVATE, device->cap_mask))
385 continue;
386 list_for_each_entry(chan, &device->channels, device_node) {
387 if (!chan->client_count)
388 continue;
389 if (!min)
390 min = chan;
391 else if (chan->table_count < min->table_count)
392 min = chan;
393
394 if (n-- == 0) {
395 ret = chan;
396 break; /* done */
397 }
398 }
399 if (ret)
400 break; /* done */
401 }
258 402
259 list_for_each_entry(client, &dma_client_list, global_node) 403 if (!ret)
260 dma_client_chan_alloc(client); 404 ret = min;
261 405
262 mutex_unlock(&dma_list_mutex); 406 if (ret)
407 ret->table_count++;
408
409 return ret;
263} 410}
264 411
265/** 412/**
266 * dma_chans_notify_available - tell the clients that a channel is going away 413 * dma_channel_rebalance - redistribute the available channels
267 * @chan: channel on its way out 414 *
415 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
416 * operation type) in the SMP case, and operation isolation (avoid
417 * multi-tasking channels) in the non-SMP case. Must be called under
418 * dma_list_mutex.
268 */ 419 */
269static void dma_clients_notify_removed(struct dma_chan *chan) 420static void dma_channel_rebalance(void)
270{ 421{
271 struct dma_client *client; 422 struct dma_chan *chan;
272 enum dma_state_client ack; 423 struct dma_device *device;
424 int cpu;
425 int cap;
426 int n;
273 427
274 mutex_lock(&dma_list_mutex); 428 /* undo the last distribution */
429 for_each_dma_cap_mask(cap, dma_cap_mask_all)
430 for_each_possible_cpu(cpu)
431 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
432
433 list_for_each_entry(device, &dma_device_list, global_node) {
434 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
435 continue;
436 list_for_each_entry(chan, &device->channels, device_node)
437 chan->table_count = 0;
438 }
275 439
276 list_for_each_entry(client, &dma_client_list, global_node) { 440 /* don't populate the channel_table if no clients are available */
277 ack = client->event_callback(client, chan, 441 if (!dmaengine_ref_count)
278 DMA_RESOURCE_REMOVED); 442 return;
279 443
280 /* client was holding resources for this channel so 444 /* redistribute available channels */
281 * free it 445 n = 0;
282 */ 446 for_each_dma_cap_mask(cap, dma_cap_mask_all)
283 if (ack == DMA_ACK) { 447 for_each_online_cpu(cpu) {
284 dma_chan_put(chan); 448 if (num_possible_cpus() > 1)
285 chan->client_count--; 449 chan = nth_chan(cap, n++);
450 else
451 chan = nth_chan(cap, -1);
452
453 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
454 }
455}
456
457static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
458 dma_filter_fn fn, void *fn_param)
459{
460 struct dma_chan *chan;
461
462 if (!__dma_device_satisfies_mask(dev, mask)) {
463 pr_debug("%s: wrong capabilities\n", __func__);
464 return NULL;
465 }
466 /* devices with multiple channels need special handling as we need to
467 * ensure that all channels are either private or public.
468 */
469 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
470 list_for_each_entry(chan, &dev->channels, device_node) {
471 /* some channels are already publicly allocated */
472 if (chan->client_count)
473 return NULL;
286 } 474 }
475
476 list_for_each_entry(chan, &dev->channels, device_node) {
477 if (chan->client_count) {
478 pr_debug("%s: %s busy\n",
479 __func__, dma_chan_name(chan));
480 continue;
481 }
482 if (fn && !fn(chan, fn_param)) {
483 pr_debug("%s: %s filter said false\n",
484 __func__, dma_chan_name(chan));
485 continue;
486 }
487 return chan;
287 } 488 }
288 489
289 mutex_unlock(&dma_list_mutex); 490 return NULL;
290} 491}
291 492
292/** 493/**
293 * dma_async_client_register - register a &dma_client 494 * dma_request_channel - try to allocate an exclusive channel
294 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask' 495 * @mask: capabilities that the channel must satisfy
496 * @fn: optional callback to disposition available channels
497 * @fn_param: opaque parameter to pass to dma_filter_fn
295 */ 498 */
296void dma_async_client_register(struct dma_client *client) 499struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
297{ 500{
298 /* validate client data */ 501 struct dma_device *device, *_d;
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && 502 struct dma_chan *chan = NULL;
300 !client->slave); 503 int err;
301 504
505 /* Find a channel */
506 mutex_lock(&dma_list_mutex);
507 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
508 chan = private_candidate(mask, device, fn, fn_param);
509 if (chan) {
510 /* Found a suitable channel, try to grab, prep, and
511 * return it. We first set DMA_PRIVATE to disable
512 * balance_ref_count as this channel will not be
513 * published in the general-purpose allocator
514 */
515 dma_cap_set(DMA_PRIVATE, device->cap_mask);
516 err = dma_chan_get(chan);
517
518 if (err == -ENODEV) {
519 pr_debug("%s: %s module removed\n", __func__,
520 dma_chan_name(chan));
521 list_del_rcu(&device->global_node);
522 } else if (err)
523 pr_err("dmaengine: failed to get %s: (%d)\n",
524 dma_chan_name(chan), err);
525 else
526 break;
527 chan = NULL;
528 }
529 }
530 mutex_unlock(&dma_list_mutex);
531
532 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
533 chan ? dma_chan_name(chan) : NULL);
534
535 return chan;
536}
537EXPORT_SYMBOL_GPL(__dma_request_channel);
538
539void dma_release_channel(struct dma_chan *chan)
540{
302 mutex_lock(&dma_list_mutex); 541 mutex_lock(&dma_list_mutex);
303 list_add_tail(&client->global_node, &dma_client_list); 542 WARN_ONCE(chan->client_count != 1,
543 "chan reference count %d != 1\n", chan->client_count);
544 dma_chan_put(chan);
304 mutex_unlock(&dma_list_mutex); 545 mutex_unlock(&dma_list_mutex);
305} 546}
306EXPORT_SYMBOL(dma_async_client_register); 547EXPORT_SYMBOL_GPL(dma_release_channel);
307 548
308/** 549/**
309 * dma_async_client_unregister - unregister a client and free the &dma_client 550 * dmaengine_get - register interest in dma_channels
310 * @client: &dma_client to free
311 *
312 * Force frees any allocated DMA channels, frees the &dma_client memory
313 */ 551 */
314void dma_async_client_unregister(struct dma_client *client) 552void dmaengine_get(void)
315{ 553{
316 struct dma_device *device; 554 struct dma_device *device, *_d;
317 struct dma_chan *chan; 555 struct dma_chan *chan;
318 enum dma_state_client ack; 556 int err;
319
320 if (!client)
321 return;
322 557
323 mutex_lock(&dma_list_mutex); 558 mutex_lock(&dma_list_mutex);
324 /* free all channels the client is holding */ 559 dmaengine_ref_count++;
325 list_for_each_entry(device, &dma_device_list, global_node)
326 list_for_each_entry(chan, &device->channels, device_node) {
327 ack = client->event_callback(client, chan,
328 DMA_RESOURCE_REMOVED);
329 560
330 if (ack == DMA_ACK) { 561 /* try to grab channels */
331 dma_chan_put(chan); 562 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
332 chan->client_count--; 563 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
333 } 564 continue;
565 list_for_each_entry(chan, &device->channels, device_node) {
566 err = dma_chan_get(chan);
567 if (err == -ENODEV) {
568 /* module removed before we could use it */
569 list_del_rcu(&device->global_node);
570 break;
571 } else if (err)
572 pr_err("dmaengine: failed to get %s: (%d)\n",
573 dma_chan_name(chan), err);
334 } 574 }
575 }
335 576
336 list_del(&client->global_node); 577 /* if this is the first reference and there were channels
578 * waiting we need to rebalance to get those channels
579 * incorporated into the channel table
580 */
581 if (dmaengine_ref_count == 1)
582 dma_channel_rebalance();
337 mutex_unlock(&dma_list_mutex); 583 mutex_unlock(&dma_list_mutex);
338} 584}
339EXPORT_SYMBOL(dma_async_client_unregister); 585EXPORT_SYMBOL(dmaengine_get);
340 586
341/** 587/**
342 * dma_async_client_chan_request - send all available channels to the 588 * dmaengine_put - let dma drivers be removed when ref_count == 0
343 * client that satisfy the capability mask
344 * @client - requester
345 */ 589 */
346void dma_async_client_chan_request(struct dma_client *client) 590void dmaengine_put(void)
347{ 591{
592 struct dma_device *device;
593 struct dma_chan *chan;
594
348 mutex_lock(&dma_list_mutex); 595 mutex_lock(&dma_list_mutex);
349 dma_client_chan_alloc(client); 596 dmaengine_ref_count--;
597 BUG_ON(dmaengine_ref_count < 0);
598 /* drop channel references */
599 list_for_each_entry(device, &dma_device_list, global_node) {
600 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
601 continue;
602 list_for_each_entry(chan, &device->channels, device_node)
603 dma_chan_put(chan);
604 }
350 mutex_unlock(&dma_list_mutex); 605 mutex_unlock(&dma_list_mutex);
351} 606}
352EXPORT_SYMBOL(dma_async_client_chan_request); 607EXPORT_SYMBOL(dmaengine_put);
353 608
354/** 609/**
355 * dma_async_device_register - registers DMA devices found 610 * dma_async_device_register - registers DMA devices found
@@ -357,9 +612,9 @@ EXPORT_SYMBOL(dma_async_client_chan_request);
357 */ 612 */
358int dma_async_device_register(struct dma_device *device) 613int dma_async_device_register(struct dma_device *device)
359{ 614{
360 static int id;
361 int chancnt = 0, rc; 615 int chancnt = 0, rc;
362 struct dma_chan* chan; 616 struct dma_chan* chan;
617 atomic_t *idr_ref;
363 618
364 if (!device) 619 if (!device)
365 return -ENODEV; 620 return -ENODEV;
@@ -386,57 +641,83 @@ int dma_async_device_register(struct dma_device *device)
386 BUG_ON(!device->device_issue_pending); 641 BUG_ON(!device->device_issue_pending);
387 BUG_ON(!device->dev); 642 BUG_ON(!device->dev);
388 643
389 init_completion(&device->done); 644 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
390 kref_init(&device->refcount); 645 if (!idr_ref)
391 646 return -ENOMEM;
647 atomic_set(idr_ref, 0);
648 idr_retry:
649 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
650 return -ENOMEM;
392 mutex_lock(&dma_list_mutex); 651 mutex_lock(&dma_list_mutex);
393 device->dev_id = id++; 652 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
394 mutex_unlock(&dma_list_mutex); 653 mutex_unlock(&dma_list_mutex);
654 if (rc == -EAGAIN)
655 goto idr_retry;
656 else if (rc != 0)
657 return rc;
395 658
396 /* represent channels in sysfs. Probably want devs too */ 659 /* represent channels in sysfs. Probably want devs too */
397 list_for_each_entry(chan, &device->channels, device_node) { 660 list_for_each_entry(chan, &device->channels, device_node) {
398 chan->local = alloc_percpu(typeof(*chan->local)); 661 chan->local = alloc_percpu(typeof(*chan->local));
399 if (chan->local == NULL) 662 if (chan->local == NULL)
400 continue; 663 continue;
664 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
665 if (chan->dev == NULL) {
666 free_percpu(chan->local);
667 continue;
668 }
401 669
402 chan->chan_id = chancnt++; 670 chan->chan_id = chancnt++;
403 chan->dev.class = &dma_devclass; 671 chan->dev->device.class = &dma_devclass;
404 chan->dev.parent = device->dev; 672 chan->dev->device.parent = device->dev;
405 dev_set_name(&chan->dev, "dma%dchan%d", 673 chan->dev->chan = chan;
674 chan->dev->idr_ref = idr_ref;
675 chan->dev->dev_id = device->dev_id;
676 atomic_inc(idr_ref);
677 dev_set_name(&chan->dev->device, "dma%dchan%d",
406 device->dev_id, chan->chan_id); 678 device->dev_id, chan->chan_id);
407 679
408 rc = device_register(&chan->dev); 680 rc = device_register(&chan->dev->device);
409 if (rc) { 681 if (rc) {
410 chancnt--;
411 free_percpu(chan->local); 682 free_percpu(chan->local);
412 chan->local = NULL; 683 chan->local = NULL;
413 goto err_out; 684 goto err_out;
414 } 685 }
415
416 /* One for the channel, one of the class device */
417 kref_get(&device->refcount);
418 kref_get(&device->refcount);
419 kref_init(&chan->refcount);
420 chan->client_count = 0; 686 chan->client_count = 0;
421 chan->slow_ref = 0;
422 INIT_RCU_HEAD(&chan->rcu);
423 } 687 }
688 device->chancnt = chancnt;
424 689
425 mutex_lock(&dma_list_mutex); 690 mutex_lock(&dma_list_mutex);
426 list_add_tail(&device->global_node, &dma_device_list); 691 /* take references on public channels */
692 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
693 list_for_each_entry(chan, &device->channels, device_node) {
694 /* if clients are already waiting for channels we need
695 * to take references on their behalf
696 */
697 if (dma_chan_get(chan) == -ENODEV) {
698 /* note we can only get here for the first
699 * channel as the remaining channels are
700 * guaranteed to get a reference
701 */
702 rc = -ENODEV;
703 mutex_unlock(&dma_list_mutex);
704 goto err_out;
705 }
706 }
707 list_add_tail_rcu(&device->global_node, &dma_device_list);
708 dma_channel_rebalance();
427 mutex_unlock(&dma_list_mutex); 709 mutex_unlock(&dma_list_mutex);
428 710
429 dma_clients_notify_available();
430
431 return 0; 711 return 0;
432 712
433err_out: 713err_out:
434 list_for_each_entry(chan, &device->channels, device_node) { 714 list_for_each_entry(chan, &device->channels, device_node) {
435 if (chan->local == NULL) 715 if (chan->local == NULL)
436 continue; 716 continue;
437 kref_put(&device->refcount, dma_async_device_cleanup); 717 mutex_lock(&dma_list_mutex);
438 device_unregister(&chan->dev); 718 chan->dev->chan = NULL;
439 chancnt--; 719 mutex_unlock(&dma_list_mutex);
720 device_unregister(&chan->dev->device);
440 free_percpu(chan->local); 721 free_percpu(chan->local);
441 } 722 }
442 return rc; 723 return rc;
@@ -444,37 +725,30 @@ err_out:
444EXPORT_SYMBOL(dma_async_device_register); 725EXPORT_SYMBOL(dma_async_device_register);
445 726
446/** 727/**
447 * dma_async_device_cleanup - function called when all references are released 728 * dma_async_device_unregister - unregister a DMA device
448 * @kref: kernel reference object
449 */
450static void dma_async_device_cleanup(struct kref *kref)
451{
452 struct dma_device *device;
453
454 device = container_of(kref, struct dma_device, refcount);
455 complete(&device->done);
456}
457
458/**
459 * dma_async_device_unregister - unregisters DMA devices
460 * @device: &dma_device 729 * @device: &dma_device
730 *
731 * This routine is called by dma driver exit routines, dmaengine holds module
732 * references to prevent it being called while channels are in use.
461 */ 733 */
462void dma_async_device_unregister(struct dma_device *device) 734void dma_async_device_unregister(struct dma_device *device)
463{ 735{
464 struct dma_chan *chan; 736 struct dma_chan *chan;
465 737
466 mutex_lock(&dma_list_mutex); 738 mutex_lock(&dma_list_mutex);
467 list_del(&device->global_node); 739 list_del_rcu(&device->global_node);
740 dma_channel_rebalance();
468 mutex_unlock(&dma_list_mutex); 741 mutex_unlock(&dma_list_mutex);
469 742
470 list_for_each_entry(chan, &device->channels, device_node) { 743 list_for_each_entry(chan, &device->channels, device_node) {
471 dma_clients_notify_removed(chan); 744 WARN_ONCE(chan->client_count,
472 device_unregister(&chan->dev); 745 "%s called while %d clients hold a reference\n",
473 dma_chan_release(chan); 746 __func__, chan->client_count);
747 mutex_lock(&dma_list_mutex);
748 chan->dev->chan = NULL;
749 mutex_unlock(&dma_list_mutex);
750 device_unregister(&chan->dev->device);
474 } 751 }
475
476 kref_put(&device->refcount, dma_async_device_cleanup);
477 wait_for_completion(&device->done);
478} 752}
479EXPORT_SYMBOL(dma_async_device_unregister); 753EXPORT_SYMBOL(dma_async_device_unregister);
480 754
@@ -626,10 +900,96 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
626} 900}
627EXPORT_SYMBOL(dma_async_tx_descriptor_init); 901EXPORT_SYMBOL(dma_async_tx_descriptor_init);
628 902
903/* dma_wait_for_async_tx - spin wait for a transaction to complete
904 * @tx: in-flight transaction to wait on
905 *
906 * This routine assumes that tx was obtained from a call to async_memcpy,
907 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
908 * and submitted). Walking the parent chain is only meant to cover for DMA
909 * drivers that do not implement the DMA_INTERRUPT capability and may race with
910 * the driver's descriptor cleanup routine.
911 */
912enum dma_status
913dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
914{
915 enum dma_status status;
916 struct dma_async_tx_descriptor *iter;
917 struct dma_async_tx_descriptor *parent;
918
919 if (!tx)
920 return DMA_SUCCESS;
921
922 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
923 " %s\n", __func__, dma_chan_name(tx->chan));
924
925 /* poll through the dependency chain, return when tx is complete */
926 do {
927 iter = tx;
928
929 /* find the root of the unsubmitted dependency chain */
930 do {
931 parent = iter->parent;
932 if (!parent)
933 break;
934 else
935 iter = parent;
936 } while (parent);
937
938 /* there is a small window for ->parent == NULL and
939 * ->cookie == -EBUSY
940 */
941 while (iter->cookie == -EBUSY)
942 cpu_relax();
943
944 status = dma_sync_wait(iter->chan, iter->cookie);
945 } while (status == DMA_IN_PROGRESS || (iter != tx));
946
947 return status;
948}
949EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
950
951/* dma_run_dependencies - helper routine for dma drivers to process
952 * (start) dependent operations on their target channel
953 * @tx: transaction with dependencies
954 */
955void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
956{
957 struct dma_async_tx_descriptor *dep = tx->next;
958 struct dma_async_tx_descriptor *dep_next;
959 struct dma_chan *chan;
960
961 if (!dep)
962 return;
963
964 chan = dep->chan;
965
966 /* keep submitting up until a channel switch is detected
967 * in that case we will be called again as a result of
968 * processing the interrupt from async_tx_channel_switch
969 */
970 for (; dep; dep = dep_next) {
971 spin_lock_bh(&dep->lock);
972 dep->parent = NULL;
973 dep_next = dep->next;
974 if (dep_next && dep_next->chan == chan)
975 dep->next = NULL; /* ->next will be submitted */
976 else
977 dep_next = NULL; /* submit current dep and terminate */
978 spin_unlock_bh(&dep->lock);
979
980 dep->tx_submit(dep);
981 }
982
983 chan->device->device_issue_pending(chan);
984}
985EXPORT_SYMBOL_GPL(dma_run_dependencies);
986
629static int __init dma_bus_init(void) 987static int __init dma_bus_init(void)
630{ 988{
989 idr_init(&dma_idr);
631 mutex_init(&dma_list_mutex); 990 mutex_init(&dma_list_mutex);
632 return class_register(&dma_devclass); 991 return class_register(&dma_devclass);
633} 992}
634subsys_initcall(dma_bus_init); 993arch_initcall(dma_bus_init);
994
635 995
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ed9636bfb54a..3603f1ea5b28 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(threads_per_chan,
35 35
36static unsigned int max_channels; 36static unsigned int max_channels;
37module_param(max_channels, uint, S_IRUGO); 37module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(nr_channels, 38MODULE_PARM_DESC(max_channels,
39 "Maximum number of channels to use (default: all)"); 39 "Maximum number of channels to use (default: all)");
40 40
41/* 41/*
@@ -71,7 +71,7 @@ struct dmatest_chan {
71 71
72/* 72/*
73 * These are protected by dma_list_mutex since they're only used by 73 * These are protected by dma_list_mutex since they're only used by
74 * the DMA client event callback 74 * the DMA filter function callback
75 */ 75 */
76static LIST_HEAD(dmatest_channels); 76static LIST_HEAD(dmatest_channels);
77static unsigned int nr_channels; 77static unsigned int nr_channels;
@@ -80,7 +80,7 @@ static bool dmatest_match_channel(struct dma_chan *chan)
80{ 80{
81 if (test_channel[0] == '\0') 81 if (test_channel[0] == '\0')
82 return true; 82 return true;
83 return strcmp(dev_name(&chan->dev), test_channel) == 0; 83 return strcmp(dma_chan_name(chan), test_channel) == 0;
84} 84}
85 85
86static bool dmatest_match_device(struct dma_device *device) 86static bool dmatest_match_device(struct dma_device *device)
@@ -215,7 +215,6 @@ static int dmatest_func(void *data)
215 215
216 smp_rmb(); 216 smp_rmb();
217 chan = thread->chan; 217 chan = thread->chan;
218 dma_chan_get(chan);
219 218
220 while (!kthread_should_stop()) { 219 while (!kthread_should_stop()) {
221 total_tests++; 220 total_tests++;
@@ -293,7 +292,6 @@ static int dmatest_func(void *data)
293 } 292 }
294 293
295 ret = 0; 294 ret = 0;
296 dma_chan_put(chan);
297 kfree(thread->dstbuf); 295 kfree(thread->dstbuf);
298err_dstbuf: 296err_dstbuf:
299 kfree(thread->srcbuf); 297 kfree(thread->srcbuf);
@@ -319,21 +317,16 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
319 kfree(dtc); 317 kfree(dtc);
320} 318}
321 319
322static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) 320static int dmatest_add_channel(struct dma_chan *chan)
323{ 321{
324 struct dmatest_chan *dtc; 322 struct dmatest_chan *dtc;
325 struct dmatest_thread *thread; 323 struct dmatest_thread *thread;
326 unsigned int i; 324 unsigned int i;
327 325
328 /* Have we already been told about this channel? */
329 list_for_each_entry(dtc, &dmatest_channels, node)
330 if (dtc->chan == chan)
331 return DMA_DUP;
332
333 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 326 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
334 if (!dtc) { 327 if (!dtc) {
335 pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev)); 328 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
336 return DMA_NAK; 329 return -ENOMEM;
337 } 330 }
338 331
339 dtc->chan = chan; 332 dtc->chan = chan;
@@ -343,16 +336,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
343 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 336 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
344 if (!thread) { 337 if (!thread) {
345 pr_warning("dmatest: No memory for %s-test%u\n", 338 pr_warning("dmatest: No memory for %s-test%u\n",
346 dev_name(&chan->dev), i); 339 dma_chan_name(chan), i);
347 break; 340 break;
348 } 341 }
349 thread->chan = dtc->chan; 342 thread->chan = dtc->chan;
350 smp_wmb(); 343 smp_wmb();
351 thread->task = kthread_run(dmatest_func, thread, "%s-test%u", 344 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
352 dev_name(&chan->dev), i); 345 dma_chan_name(chan), i);
353 if (IS_ERR(thread->task)) { 346 if (IS_ERR(thread->task)) {
354 pr_warning("dmatest: Failed to run thread %s-test%u\n", 347 pr_warning("dmatest: Failed to run thread %s-test%u\n",
355 dev_name(&chan->dev), i); 348 dma_chan_name(chan), i);
356 kfree(thread); 349 kfree(thread);
357 break; 350 break;
358 } 351 }
@@ -362,86 +355,62 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
362 list_add_tail(&thread->node, &dtc->threads); 355 list_add_tail(&thread->node, &dtc->threads);
363 } 356 }
364 357
365 pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev)); 358 pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
366 359
367 list_add_tail(&dtc->node, &dmatest_channels); 360 list_add_tail(&dtc->node, &dmatest_channels);
368 nr_channels++; 361 nr_channels++;
369 362
370 return DMA_ACK; 363 return 0;
371}
372
373static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
374{
375 struct dmatest_chan *dtc, *_dtc;
376
377 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
378 if (dtc->chan == chan) {
379 list_del(&dtc->node);
380 dmatest_cleanup_channel(dtc);
381 pr_debug("dmatest: lost channel %s\n",
382 dev_name(&chan->dev));
383 return DMA_ACK;
384 }
385 }
386
387 return DMA_DUP;
388} 364}
389 365
390/* 366static bool filter(struct dma_chan *chan, void *param)
391 * Start testing threads as new channels are assigned to us, and kill
392 * them when the channels go away.
393 *
394 * When we unregister the client, all channels are removed so this
395 * will also take care of cleaning things up when the module is
396 * unloaded.
397 */
398static enum dma_state_client
399dmatest_event(struct dma_client *client, struct dma_chan *chan,
400 enum dma_state state)
401{ 367{
402 enum dma_state_client ack = DMA_NAK; 368 if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
403 369 return false;
404 switch (state) { 370 else
405 case DMA_RESOURCE_AVAILABLE: 371 return true;
406 if (!dmatest_match_channel(chan)
407 || !dmatest_match_device(chan->device))
408 ack = DMA_DUP;
409 else if (max_channels && nr_channels >= max_channels)
410 ack = DMA_NAK;
411 else
412 ack = dmatest_add_channel(chan);
413 break;
414
415 case DMA_RESOURCE_REMOVED:
416 ack = dmatest_remove_channel(chan);
417 break;
418
419 default:
420 pr_info("dmatest: Unhandled event %u (%s)\n",
421 state, dev_name(&chan->dev));
422 break;
423 }
424
425 return ack;
426} 372}
427 373
428static struct dma_client dmatest_client = {
429 .event_callback = dmatest_event,
430};
431
432static int __init dmatest_init(void) 374static int __init dmatest_init(void)
433{ 375{
434 dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask); 376 dma_cap_mask_t mask;
435 dma_async_client_register(&dmatest_client); 377 struct dma_chan *chan;
436 dma_async_client_chan_request(&dmatest_client); 378 int err = 0;
379
380 dma_cap_zero(mask);
381 dma_cap_set(DMA_MEMCPY, mask);
382 for (;;) {
383 chan = dma_request_channel(mask, filter, NULL);
384 if (chan) {
385 err = dmatest_add_channel(chan);
386 if (err == 0)
387 continue;
388 else {
389 dma_release_channel(chan);
390 break; /* add_channel failed, punt */
391 }
392 } else
393 break; /* no more channels available */
394 if (max_channels && nr_channels >= max_channels)
395 break; /* we have all we need */
396 }
437 397
438 return 0; 398 return err;
439} 399}
440module_init(dmatest_init); 400/* when compiled-in wait for drivers to load first */
401late_initcall(dmatest_init);
441 402
442static void __exit dmatest_exit(void) 403static void __exit dmatest_exit(void)
443{ 404{
444 dma_async_client_unregister(&dmatest_client); 405 struct dmatest_chan *dtc, *_dtc;
406
407 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
408 list_del(&dtc->node);
409 dmatest_cleanup_channel(dtc);
410 pr_debug("dmatest: dropped channel %s\n",
411 dma_chan_name(dtc->chan));
412 dma_release_channel(dtc->chan);
413 }
445} 414}
446module_exit(dmatest_exit); 415module_exit(dmatest_exit);
447 416
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 0778d99aea7c..6b702cc46b3d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -70,6 +70,15 @@
70 * the controller, though. 70 * the controller, though.
71 */ 71 */
72 72
73static struct device *chan2dev(struct dma_chan *chan)
74{
75 return &chan->dev->device;
76}
77static struct device *chan2parent(struct dma_chan *chan)
78{
79 return chan->dev->device.parent;
80}
81
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 82static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{ 83{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 ret = desc; 102 ret = desc;
94 break; 103 break;
95 } 104 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); 105 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
97 i++; 106 i++;
98 } 107 }
99 spin_unlock_bh(&dwc->lock); 108 spin_unlock_bh(&dwc->lock);
100 109
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); 110 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
102 111
103 return ret; 112 return ret;
104} 113}
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
108 struct dw_desc *child; 117 struct dw_desc *child;
109 118
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 119 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent, 120 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
112 child->txd.phys, sizeof(child->lli), 121 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE); 122 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent, 123 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
115 desc->txd.phys, sizeof(desc->lli), 124 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE); 125 DMA_TO_DEVICE);
117} 126}
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
129 138
130 spin_lock_bh(&dwc->lock); 139 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 140 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev, 141 dev_vdbg(chan2dev(&dwc->chan),
133 "moving child desc %p to freelist\n", 142 "moving child desc %p to freelist\n",
134 child); 143 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list); 144 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); 145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list); 146 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock); 147 spin_unlock_bh(&dwc->lock);
139 } 148 }
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
163 172
164 /* ASSERT: channel is idle */ 173 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) { 174 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev, 175 dev_err(chan2dev(&dwc->chan),
167 "BUG: Attempted to start non-idle channel\n"); 176 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev, 177 dev_err(chan2dev(&dwc->chan),
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 178 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR), 179 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR), 180 channel_readl(dwc, DAR),
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
193 void *param; 202 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd; 203 struct dma_async_tx_descriptor *txd = &desc->txd;
195 204
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); 205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
197 206
198 dwc->completed = txd->cookie; 207 dwc->completed = txd->cookie;
199 callback = txd->callback; 208 callback = txd->callback;
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
208 * mapped before they were submitted... 217 * mapped before they were submitted...
209 */ 218 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) 219 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, 220 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
212 DMA_FROM_DEVICE); 221 desc->len, DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) 222 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, 223 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
215 DMA_TO_DEVICE); 224 desc->len, DMA_TO_DEVICE);
216 225
217 /* 226 /*
218 * The API requires that no submissions are done from a 227 * The API requires that no submissions are done from a
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
228 LIST_HEAD(list); 237 LIST_HEAD(list);
229 238
230 if (dma_readl(dw, CH_EN) & dwc->mask) { 239 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev, 240 dev_err(chan2dev(&dwc->chan),
232 "BUG: XFER bit set, but channel not idle!\n"); 241 "BUG: XFER bit set, but channel not idle!\n");
233 242
234 /* Try to continue after resetting the channel... */ 243 /* Try to continue after resetting the channel... */
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
273 return; 282 return;
274 } 283 }
275 284
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); 285 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
277 286
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 287 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp) 288 if (desc->lli.llp == llp)
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
292 dwc_descriptor_complete(dwc, desc); 301 dwc_descriptor_complete(dwc, desc);
293 } 302 }
294 303
295 dev_err(&dwc->chan.dev, 304 dev_err(chan2dev(&dwc->chan),
296 "BUG: All descriptors done, but channel not idle!\n"); 305 "BUG: All descriptors done, but channel not idle!\n");
297 306
298 /* Try to continue after resetting the channel... */ 307 /* Try to continue after resetting the channel... */
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
308 317
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 318static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{ 319{
311 dev_printk(KERN_CRIT, &dwc->chan.dev, 320 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 321 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp, 322 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo); 323 lli->ctlhi, lli->ctllo);
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
342 * controller flagged an error instead of scribbling over 351 * controller flagged an error instead of scribbling over
343 * random memory locations. 352 * random memory locations.
344 */ 353 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev, 354 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
346 "Bad descriptor submitted for DMA!\n"); 355 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev, 356 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
348 " cookie: %d\n", bad_desc->txd.cookie); 357 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli); 358 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) 359 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
442 * for DMA. But this is hard to do in a race-free manner. 451 * for DMA. But this is hard to do in a race-free manner.
443 */ 452 */
444 if (list_empty(&dwc->active_list)) { 453 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", 454 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
446 desc->txd.cookie); 455 desc->txd.cookie);
447 dwc_dostart(dwc, desc); 456 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list); 457 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else { 458 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", 459 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
451 desc->txd.cookie); 460 desc->txd.cookie);
452 461
453 list_add_tail(&desc->desc_node, &dwc->queue); 462 list_add_tail(&desc->desc_node, &dwc->queue);
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
472 unsigned int dst_width; 481 unsigned int dst_width;
473 u32 ctllo; 482 u32 ctllo;
474 483
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", 484 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags); 485 dest, src, len, flags);
477 486
478 if (unlikely(!len)) { 487 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); 488 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
480 return NULL; 489 return NULL;
481 } 490 }
482 491
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
516 first = desc; 525 first = desc;
517 } else { 526 } else {
518 prev->lli.llp = desc->txd.phys; 527 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent, 528 dma_sync_single_for_device(chan2parent(chan),
520 prev->txd.phys, sizeof(prev->lli), 529 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE); 530 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node, 531 list_add_tail(&desc->desc_node,
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
531 prev->lli.ctllo |= DWC_CTLL_INT_EN; 540 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532 541
533 prev->lli.llp = 0; 542 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent, 543 dma_sync_single_for_device(chan2parent(chan),
535 prev->txd.phys, sizeof(prev->lli), 544 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE); 545 DMA_TO_DEVICE);
537 546
@@ -562,15 +571,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
562 struct scatterlist *sg; 571 struct scatterlist *sg;
563 size_t total_len = 0; 572 size_t total_len = 0;
564 573
565 dev_vdbg(&chan->dev, "prep_dma_slave\n"); 574 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
566 575
567 if (unlikely(!dws || !sg_len)) 576 if (unlikely(!dws || !sg_len))
568 return NULL; 577 return NULL;
569 578
570 reg_width = dws->slave.reg_width; 579 reg_width = dws->reg_width;
571 prev = first = NULL; 580 prev = first = NULL;
572 581
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); 582 sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
574 583
575 switch (direction) { 584 switch (direction) {
576 case DMA_TO_DEVICE: 585 case DMA_TO_DEVICE:
@@ -579,7 +588,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
579 | DWC_CTLL_DST_FIX 588 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC 589 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P); 590 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg; 591 reg = dws->tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) { 592 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc; 593 struct dw_desc *desc;
585 u32 len; 594 u32 len;
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
587 596
588 desc = dwc_desc_get(dwc); 597 desc = dwc_desc_get(dwc);
589 if (!desc) { 598 if (!desc) {
590 dev_err(&chan->dev, 599 dev_err(chan2dev(chan),
591 "not enough descriptors available\n"); 600 "not enough descriptors available\n");
592 goto err_desc_get; 601 goto err_desc_get;
593 } 602 }
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
607 first = desc; 616 first = desc;
608 } else { 617 } else {
609 prev->lli.llp = desc->txd.phys; 618 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent, 619 dma_sync_single_for_device(chan2parent(chan),
611 prev->txd.phys, 620 prev->txd.phys,
612 sizeof(prev->lli), 621 sizeof(prev->lli),
613 DMA_TO_DEVICE); 622 DMA_TO_DEVICE);
@@ -625,7 +634,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
625 | DWC_CTLL_SRC_FIX 634 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M); 635 | DWC_CTLL_FC_P2M);
627 636
628 reg = dws->slave.rx_reg; 637 reg = dws->rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) { 638 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc; 639 struct dw_desc *desc;
631 u32 len; 640 u32 len;
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
633 642
634 desc = dwc_desc_get(dwc); 643 desc = dwc_desc_get(dwc);
635 if (!desc) { 644 if (!desc) {
636 dev_err(&chan->dev, 645 dev_err(chan2dev(chan),
637 "not enough descriptors available\n"); 646 "not enough descriptors available\n");
638 goto err_desc_get; 647 goto err_desc_get;
639 } 648 }
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
653 first = desc; 662 first = desc;
654 } else { 663 } else {
655 prev->lli.llp = desc->txd.phys; 664 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent, 665 dma_sync_single_for_device(chan2parent(chan),
657 prev->txd.phys, 666 prev->txd.phys,
658 sizeof(prev->lli), 667 sizeof(prev->lli),
659 DMA_TO_DEVICE); 668 DMA_TO_DEVICE);
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
673 prev->lli.ctllo |= DWC_CTLL_INT_EN; 682 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674 683
675 prev->lli.llp = 0; 684 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent, 685 dma_sync_single_for_device(chan2parent(chan),
677 prev->txd.phys, sizeof(prev->lli), 686 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE); 687 DMA_TO_DEVICE);
679 688
@@ -758,29 +767,21 @@ static void dwc_issue_pending(struct dma_chan *chan)
758 spin_unlock_bh(&dwc->lock); 767 spin_unlock_bh(&dwc->lock);
759} 768}
760 769
761static int dwc_alloc_chan_resources(struct dma_chan *chan, 770static int dwc_alloc_chan_resources(struct dma_chan *chan)
762 struct dma_client *client)
763{ 771{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 772 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device); 773 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc; 774 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws; 775 struct dw_dma_slave *dws;
769 int i; 776 int i;
770 u32 cfghi; 777 u32 cfghi;
771 u32 cfglo; 778 u32 cfglo;
772 779
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n"); 780 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
774
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780 781
781 /* ASSERT: channel is idle */ 782 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) { 783 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n"); 784 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
784 return -EIO; 785 return -EIO;
785 } 786 }
786 787
@@ -789,23 +790,17 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
789 cfghi = DWC_CFGH_FIFO_MODE; 790 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0; 791 cfglo = 0;
791 792
792 slave = client->slave; 793 dws = dwc->dws;
793 if (slave) { 794 if (dws) {
794 /* 795 /*
795 * We need controller-specific data to set up slave 796 * We need controller-specific data to set up slave
796 * transfers. 797 * transfers.
797 */ 798 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); 799 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801 800
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi; 801 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo; 802 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 } 803 }
808
809 channel_writel(dwc, CFG_LO, cfglo); 804 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi); 805 channel_writel(dwc, CFG_HI, cfghi);
811 806
@@ -822,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
822 817
823 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 818 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
824 if (!desc) { 819 if (!desc) {
825 dev_info(&chan->dev, 820 dev_info(chan2dev(chan),
826 "only allocated %d descriptors\n", i); 821 "only allocated %d descriptors\n", i);
827 spin_lock_bh(&dwc->lock); 822 spin_lock_bh(&dwc->lock);
828 break; 823 break;
@@ -832,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
832 desc->txd.tx_submit = dwc_tx_submit; 827 desc->txd.tx_submit = dwc_tx_submit;
833 desc->txd.flags = DMA_CTRL_ACK; 828 desc->txd.flags = DMA_CTRL_ACK;
834 INIT_LIST_HEAD(&desc->txd.tx_list); 829 INIT_LIST_HEAD(&desc->txd.tx_list);
835 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, 830 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
836 sizeof(desc->lli), DMA_TO_DEVICE); 831 sizeof(desc->lli), DMA_TO_DEVICE);
837 dwc_desc_put(dwc, desc); 832 dwc_desc_put(dwc, desc);
838 833
@@ -847,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
847 842
848 spin_unlock_bh(&dwc->lock); 843 spin_unlock_bh(&dwc->lock);
849 844
850 dev_dbg(&chan->dev, 845 dev_dbg(chan2dev(chan),
851 "alloc_chan_resources allocated %d descriptors\n", i); 846 "alloc_chan_resources allocated %d descriptors\n", i);
852 847
853 return i; 848 return i;
@@ -860,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
860 struct dw_desc *desc, *_desc; 855 struct dw_desc *desc, *_desc;
861 LIST_HEAD(list); 856 LIST_HEAD(list);
862 857
863 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", 858 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
864 dwc->descs_allocated); 859 dwc->descs_allocated);
865 860
866 /* ASSERT: channel is idle */ 861 /* ASSERT: channel is idle */
@@ -881,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
881 spin_unlock_bh(&dwc->lock); 876 spin_unlock_bh(&dwc->lock);
882 877
883 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 878 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
884 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); 879 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
885 dma_unmap_single(chan->dev.parent, desc->txd.phys, 880 dma_unmap_single(chan2parent(chan), desc->txd.phys,
886 sizeof(desc->lli), DMA_TO_DEVICE); 881 sizeof(desc->lli), DMA_TO_DEVICE);
887 kfree(desc); 882 kfree(desc);
888 } 883 }
889 884
890 dev_vdbg(&chan->dev, "free_chan_resources done\n"); 885 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
891} 886}
892 887
893/*----------------------------------------------------------------------*/ 888/*----------------------------------------------------------------------*/
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0b95dcce447e..ca70a21afc68 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
370 struct dma_client *client)
371{ 370{
372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
373 372
@@ -823,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
823 */ 822 */
824 WARN_ON(fdev->feature != new_fsl_chan->feature); 823 WARN_ON(fdev->feature != new_fsl_chan->feature);
825 824
826 new_fsl_chan->dev = &new_fsl_chan->common.dev; 825 new_fsl_chan->dev = &new_fsl_chan->common.dev->device;
827 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
828 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
829 828
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 9b16a3af9a0a..4105d6575b64 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -75,60 +75,10 @@ static int ioat_dca_enabled = 1;
75module_param(ioat_dca_enabled, int, 0644); 75module_param(ioat_dca_enabled, int, 0644);
76MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 76MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
77 77
78static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
79{
80 struct ioat_device *device = pci_get_drvdata(pdev);
81 u8 version;
82 int err = 0;
83
84 version = readb(iobase + IOAT_VER_OFFSET);
85 switch (version) {
86 case IOAT_VER_1_2:
87 device->dma = ioat_dma_probe(pdev, iobase);
88 if (device->dma && ioat_dca_enabled)
89 device->dca = ioat_dca_init(pdev, iobase);
90 break;
91 case IOAT_VER_2_0:
92 device->dma = ioat_dma_probe(pdev, iobase);
93 if (device->dma && ioat_dca_enabled)
94 device->dca = ioat2_dca_init(pdev, iobase);
95 break;
96 case IOAT_VER_3_0:
97 device->dma = ioat_dma_probe(pdev, iobase);
98 if (device->dma && ioat_dca_enabled)
99 device->dca = ioat3_dca_init(pdev, iobase);
100 break;
101 default:
102 err = -ENODEV;
103 break;
104 }
105 if (!device->dma)
106 err = -ENODEV;
107 return err;
108}
109
110static void ioat_shutdown_functionality(struct pci_dev *pdev)
111{
112 struct ioat_device *device = pci_get_drvdata(pdev);
113
114 dev_err(&pdev->dev, "Removing dma and dca services\n");
115 if (device->dca) {
116 unregister_dca_provider(device->dca);
117 free_dca_provider(device->dca);
118 device->dca = NULL;
119 }
120
121 if (device->dma) {
122 ioat_dma_remove(device->dma);
123 device->dma = NULL;
124 }
125}
126
127static struct pci_driver ioat_pci_driver = { 78static struct pci_driver ioat_pci_driver = {
128 .name = "ioatdma", 79 .name = "ioatdma",
129 .id_table = ioat_pci_tbl, 80 .id_table = ioat_pci_tbl,
130 .probe = ioat_probe, 81 .probe = ioat_probe,
131 .shutdown = ioat_shutdown_functionality,
132 .remove = __devexit_p(ioat_remove), 82 .remove = __devexit_p(ioat_remove),
133}; 83};
134 84
@@ -179,7 +129,29 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
179 129
180 pci_set_master(pdev); 130 pci_set_master(pdev);
181 131
182 err = ioat_setup_functionality(pdev, iobase); 132 switch (readb(iobase + IOAT_VER_OFFSET)) {
133 case IOAT_VER_1_2:
134 device->dma = ioat_dma_probe(pdev, iobase);
135 if (device->dma && ioat_dca_enabled)
136 device->dca = ioat_dca_init(pdev, iobase);
137 break;
138 case IOAT_VER_2_0:
139 device->dma = ioat_dma_probe(pdev, iobase);
140 if (device->dma && ioat_dca_enabled)
141 device->dca = ioat2_dca_init(pdev, iobase);
142 break;
143 case IOAT_VER_3_0:
144 device->dma = ioat_dma_probe(pdev, iobase);
145 if (device->dma && ioat_dca_enabled)
146 device->dca = ioat3_dca_init(pdev, iobase);
147 break;
148 default:
149 err = -ENODEV;
150 break;
151 }
152 if (!device->dma)
153 err = -ENODEV;
154
183 if (err) 155 if (err)
184 goto err_version; 156 goto err_version;
185 157
@@ -198,17 +170,21 @@ err_enable_device:
198 return err; 170 return err;
199} 171}
200 172
201/*
202 * It is unsafe to remove this module: if removed while a requested
203 * dma is outstanding, esp. from tcp, it is possible to hang while
204 * waiting for something that will never finish. However, if you're
205 * feeling lucky, this usually works just fine.
206 */
207static void __devexit ioat_remove(struct pci_dev *pdev) 173static void __devexit ioat_remove(struct pci_dev *pdev)
208{ 174{
209 struct ioat_device *device = pci_get_drvdata(pdev); 175 struct ioat_device *device = pci_get_drvdata(pdev);
210 176
211 ioat_shutdown_functionality(pdev); 177 dev_err(&pdev->dev, "Removing dma and dca services\n");
178 if (device->dca) {
179 unregister_dca_provider(device->dca);
180 free_dca_provider(device->dca);
181 device->dca = NULL;
182 }
183
184 if (device->dma) {
185 ioat_dma_remove(device->dma);
186 device->dma = NULL;
187 }
212 188
213 kfree(device); 189 kfree(device);
214} 190}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 6607fdd00b1c..b3759c4b6536 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 734 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
735 * @chan: the channel to be filled out 735 * @chan: the channel to be filled out
736 */ 736 */
737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, 737static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
738 struct dma_client *client)
739{ 738{
740 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 739 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
741 struct ioat_desc_sw *desc; 740 struct ioat_desc_sw *desc;
@@ -1341,12 +1340,11 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1341 */ 1340 */
1342#define IOAT_TEST_SIZE 2000 1341#define IOAT_TEST_SIZE 2000
1343 1342
1344DECLARE_COMPLETION(test_completion);
1345static void ioat_dma_test_callback(void *dma_async_param) 1343static void ioat_dma_test_callback(void *dma_async_param)
1346{ 1344{
1347 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", 1345 struct completion *cmp = dma_async_param;
1348 dma_async_param); 1346
1349 complete(&test_completion); 1347 complete(cmp);
1350} 1348}
1351 1349
1352/** 1350/**
@@ -1363,6 +1361,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1363 dma_addr_t dma_dest, dma_src; 1361 dma_addr_t dma_dest, dma_src;
1364 dma_cookie_t cookie; 1362 dma_cookie_t cookie;
1365 int err = 0; 1363 int err = 0;
1364 struct completion cmp;
1366 1365
1367 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 1366 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1368 if (!src) 1367 if (!src)
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1381 dma_chan = container_of(device->common.channels.next, 1380 dma_chan = container_of(device->common.channels.next,
1382 struct dma_chan, 1381 struct dma_chan,
1383 device_node); 1382 device_node);
1384 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { 1383 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
1385 dev_err(&device->pdev->dev, 1384 dev_err(&device->pdev->dev,
1386 "selftest cannot allocate chan resource\n"); 1385 "selftest cannot allocate chan resource\n");
1387 err = -ENODEV; 1386 err = -ENODEV;
@@ -1402,8 +1401,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1402 } 1401 }
1403 1402
1404 async_tx_ack(tx); 1403 async_tx_ack(tx);
1404 init_completion(&cmp);
1405 tx->callback = ioat_dma_test_callback; 1405 tx->callback = ioat_dma_test_callback;
1406 tx->callback_param = (void *)0x8086; 1406 tx->callback_param = &cmp;
1407 cookie = tx->tx_submit(tx); 1407 cookie = tx->tx_submit(tx);
1408 if (cookie < 0) { 1408 if (cookie < 0) {
1409 dev_err(&device->pdev->dev, 1409 dev_err(&device->pdev->dev,
@@ -1413,7 +1413,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1413 } 1413 }
1414 device->common.device_issue_pending(dma_chan); 1414 device->common.device_issue_pending(dma_chan);
1415 1415
1416 wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000)); 1416 wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1417 1417
1418 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) 1418 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1419 != DMA_SUCCESS) { 1419 != DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 6be317262200..ea5440dd10dc 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -24,7 +24,6 @@
24 24
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/async_tx.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
29#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
30#include <linux/spinlock.h> 29#include <linux/spinlock.h>
@@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
116 } 115 }
117 116
118 /* run dependent operations */ 117 /* run dependent operations */
119 async_tx_run_dependencies(&desc->async_tx); 118 dma_run_dependencies(&desc->async_tx);
120 119
121 return cookie; 120 return cookie;
122} 121}
@@ -270,8 +269,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
270 break; 269 break;
271 } 270 }
272 271
273 BUG_ON(!seen_current);
274
275 if (cookie > 0) { 272 if (cookie > 0) {
276 iop_chan->completed_cookie = cookie; 273 iop_chan->completed_cookie = cookie;
277 pr_debug("\tcompleted cookie %d\n", cookie); 274 pr_debug("\tcompleted cookie %d\n", cookie);
@@ -471,8 +468,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
471 * greater than 2x the number slots needed to satisfy a device->max_xor 468 * greater than 2x the number slots needed to satisfy a device->max_xor
472 * request. 469 * request.
473 * */ 470 * */
474static int iop_adma_alloc_chan_resources(struct dma_chan *chan, 471static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
475 struct dma_client *client)
476{ 472{
477 char *hw_desc; 473 char *hw_desc;
478 int idx; 474 int idx;
@@ -866,7 +862,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
866 dma_chan = container_of(device->common.channels.next, 862 dma_chan = container_of(device->common.channels.next,
867 struct dma_chan, 863 struct dma_chan,
868 device_node); 864 device_node);
869 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 865 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
870 err = -ENODEV; 866 err = -ENODEV;
871 goto out; 867 goto out;
872 } 868 }
@@ -964,7 +960,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
964 dma_chan = container_of(device->common.channels.next, 960 dma_chan = container_of(device->common.channels.next,
965 struct dma_chan, 961 struct dma_chan,
966 device_node); 962 device_node);
967 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { 963 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
968 err = -ENODEV; 964 err = -ENODEV;
969 goto out; 965 goto out;
970 } 966 }
@@ -1115,26 +1111,13 @@ static int __devexit iop_adma_remove(struct platform_device *dev)
1115 struct iop_adma_device *device = platform_get_drvdata(dev); 1111 struct iop_adma_device *device = platform_get_drvdata(dev);
1116 struct dma_chan *chan, *_chan; 1112 struct dma_chan *chan, *_chan;
1117 struct iop_adma_chan *iop_chan; 1113 struct iop_adma_chan *iop_chan;
1118 int i;
1119 struct iop_adma_platform_data *plat_data = dev->dev.platform_data; 1114 struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1120 1115
1121 dma_async_device_unregister(&device->common); 1116 dma_async_device_unregister(&device->common);
1122 1117
1123 for (i = 0; i < 3; i++) {
1124 unsigned int irq;
1125 irq = platform_get_irq(dev, i);
1126 free_irq(irq, device);
1127 }
1128
1129 dma_free_coherent(&dev->dev, plat_data->pool_size, 1118 dma_free_coherent(&dev->dev, plat_data->pool_size,
1130 device->dma_desc_pool_virt, device->dma_desc_pool); 1119 device->dma_desc_pool_virt, device->dma_desc_pool);
1131 1120
1132 do {
1133 struct resource *res;
1134 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1135 release_mem_region(res->start, res->end - res->start);
1136 } while (0);
1137
1138 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1121 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1139 device_node) { 1122 device_node) {
1140 iop_chan = to_iop_adma_chan(chan); 1123 iop_chan = to_iop_adma_chan(chan);
@@ -1255,7 +1238,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1255 spin_lock_init(&iop_chan->lock); 1238 spin_lock_init(&iop_chan->lock);
1256 INIT_LIST_HEAD(&iop_chan->chain); 1239 INIT_LIST_HEAD(&iop_chan->chain);
1257 INIT_LIST_HEAD(&iop_chan->all_slots); 1240 INIT_LIST_HEAD(&iop_chan->all_slots);
1258 INIT_RCU_HEAD(&iop_chan->common.rcu);
1259 iop_chan->common.device = dma_dev; 1241 iop_chan->common.device = dma_dev;
1260 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1242 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1261 1243
@@ -1431,16 +1413,12 @@ static int __init iop_adma_init (void)
1431 return platform_driver_register(&iop_adma_driver); 1413 return platform_driver_register(&iop_adma_driver);
1432} 1414}
1433 1415
1434/* it's currently unsafe to unload this module */
1435#if 0
1436static void __exit iop_adma_exit (void) 1416static void __exit iop_adma_exit (void)
1437{ 1417{
1438 platform_driver_unregister(&iop_adma_driver); 1418 platform_driver_unregister(&iop_adma_driver);
1439 return; 1419 return;
1440} 1420}
1441module_exit(iop_adma_exit); 1421module_exit(iop_adma_exit);
1442#endif
1443
1444module_init(iop_adma_init); 1422module_init(iop_adma_init);
1445 1423
1446MODULE_AUTHOR("Intel Corporation"); 1424MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcda17426411..d35cbd1ff0b3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -18,7 +18,6 @@
18 18
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/async_tx.h>
22#include <linux/delay.h> 21#include <linux/delay.h>
23#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
24#include <linux/spinlock.h> 23#include <linux/spinlock.h>
@@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
340 } 339 }
341 340
342 /* run dependent operations */ 341 /* run dependent operations */
343 async_tx_run_dependencies(&desc->async_tx); 342 dma_run_dependencies(&desc->async_tx);
344 343
345 return cookie; 344 return cookie;
346} 345}
@@ -607,8 +606,7 @@ submit_done:
607} 606}
608 607
609/* returns the number of allocated descriptors */ 608/* returns the number of allocated descriptors */
610static int mv_xor_alloc_chan_resources(struct dma_chan *chan, 609static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
611 struct dma_client *client)
612{ 610{
613 char *hw_desc; 611 char *hw_desc;
614 int idx; 612 int idx;
@@ -958,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
958 dma_chan = container_of(device->common.channels.next, 956 dma_chan = container_of(device->common.channels.next,
959 struct dma_chan, 957 struct dma_chan,
960 device_node); 958 device_node);
961 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 959 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
962 err = -ENODEV; 960 err = -ENODEV;
963 goto out; 961 goto out;
964 } 962 }
@@ -1053,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1053 dma_chan = container_of(device->common.channels.next, 1051 dma_chan = container_of(device->common.channels.next,
1054 struct dma_chan, 1052 struct dma_chan,
1055 device_node); 1053 device_node);
1056 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 1054 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1057 err = -ENODEV; 1055 err = -ENODEV;
1058 goto out; 1056 goto out;
1059 } 1057 }
@@ -1221,7 +1219,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1221 INIT_LIST_HEAD(&mv_chan->chain); 1219 INIT_LIST_HEAD(&mv_chan->chain);
1222 INIT_LIST_HEAD(&mv_chan->completed_slots); 1220 INIT_LIST_HEAD(&mv_chan->completed_slots);
1223 INIT_LIST_HEAD(&mv_chan->all_slots); 1221 INIT_LIST_HEAD(&mv_chan->all_slots);
1224 INIT_RCU_HEAD(&mv_chan->common.rcu);
1225 mv_chan->common.device = dma_dev; 1222 mv_chan->common.device = dma_dev;
1226 1223
1227 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1224 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 1e97916914ad..76bfe16c09b1 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -55,7 +55,6 @@ enum atmel_mci_state {
55 55
56struct atmel_mci_dma { 56struct atmel_mci_dma {
57#ifdef CONFIG_MMC_ATMELMCI_DMA 57#ifdef CONFIG_MMC_ATMELMCI_DMA
58 struct dma_client client;
59 struct dma_chan *chan; 58 struct dma_chan *chan;
60 struct dma_async_tx_descriptor *data_desc; 59 struct dma_async_tx_descriptor *data_desc;
61#endif 60#endif
@@ -593,10 +592,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
593 592
594 /* If we don't have a channel, we can't do DMA */ 593 /* If we don't have a channel, we can't do DMA */
595 chan = host->dma.chan; 594 chan = host->dma.chan;
596 if (chan) { 595 if (chan)
597 dma_chan_get(chan);
598 host->data_chan = chan; 596 host->data_chan = chan;
599 }
600 597
601 if (!chan) 598 if (!chan)
602 return -ENODEV; 599 return -ENODEV;
@@ -1443,60 +1440,6 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1443 return IRQ_HANDLED; 1440 return IRQ_HANDLED;
1444} 1441}
1445 1442
1446#ifdef CONFIG_MMC_ATMELMCI_DMA
1447
1448static inline struct atmel_mci *
1449dma_client_to_atmel_mci(struct dma_client *client)
1450{
1451 return container_of(client, struct atmel_mci, dma.client);
1452}
1453
1454static enum dma_state_client atmci_dma_event(struct dma_client *client,
1455 struct dma_chan *chan, enum dma_state state)
1456{
1457 struct atmel_mci *host;
1458 enum dma_state_client ret = DMA_NAK;
1459
1460 host = dma_client_to_atmel_mci(client);
1461
1462 switch (state) {
1463 case DMA_RESOURCE_AVAILABLE:
1464 spin_lock_bh(&host->lock);
1465 if (!host->dma.chan) {
1466 host->dma.chan = chan;
1467 ret = DMA_ACK;
1468 }
1469 spin_unlock_bh(&host->lock);
1470
1471 if (ret == DMA_ACK)
1472 dev_info(&host->pdev->dev,
1473 "Using %s for DMA transfers\n",
1474 chan->dev.bus_id);
1475 break;
1476
1477 case DMA_RESOURCE_REMOVED:
1478 spin_lock_bh(&host->lock);
1479 if (host->dma.chan == chan) {
1480 host->dma.chan = NULL;
1481 ret = DMA_ACK;
1482 }
1483 spin_unlock_bh(&host->lock);
1484
1485 if (ret == DMA_ACK)
1486 dev_info(&host->pdev->dev,
1487 "Lost %s, falling back to PIO\n",
1488 chan->dev.bus_id);
1489 break;
1490
1491 default:
1492 break;
1493 }
1494
1495
1496 return ret;
1497}
1498#endif /* CONFIG_MMC_ATMELMCI_DMA */
1499
1500static int __init atmci_init_slot(struct atmel_mci *host, 1443static int __init atmci_init_slot(struct atmel_mci *host,
1501 struct mci_slot_pdata *slot_data, unsigned int id, 1444 struct mci_slot_pdata *slot_data, unsigned int id,
1502 u32 sdc_reg) 1445 u32 sdc_reg)
@@ -1600,6 +1543,18 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1600 mmc_free_host(slot->mmc); 1543 mmc_free_host(slot->mmc);
1601} 1544}
1602 1545
1546#ifdef CONFIG_MMC_ATMELMCI_DMA
1547static bool filter(struct dma_chan *chan, void *slave)
1548{
1549 struct dw_dma_slave *dws = slave;
1550
1551 if (dws->dma_dev == chan->device->dev)
1552 return true;
1553 else
1554 return false;
1555}
1556#endif
1557
1603static int __init atmci_probe(struct platform_device *pdev) 1558static int __init atmci_probe(struct platform_device *pdev)
1604{ 1559{
1605 struct mci_platform_data *pdata; 1560 struct mci_platform_data *pdata;
@@ -1652,22 +1607,20 @@ static int __init atmci_probe(struct platform_device *pdev)
1652 goto err_request_irq; 1607 goto err_request_irq;
1653 1608
1654#ifdef CONFIG_MMC_ATMELMCI_DMA 1609#ifdef CONFIG_MMC_ATMELMCI_DMA
1655 if (pdata->dma_slave) { 1610 if (pdata->dma_slave.dma_dev) {
1656 struct dma_slave *slave = pdata->dma_slave; 1611 struct dw_dma_slave *dws = &pdata->dma_slave;
1612 dma_cap_mask_t mask;
1657 1613
1658 slave->tx_reg = regs->start + MCI_TDR; 1614 dws->tx_reg = regs->start + MCI_TDR;
1659 slave->rx_reg = regs->start + MCI_RDR; 1615 dws->rx_reg = regs->start + MCI_RDR;
1660 1616
1661 /* Try to grab a DMA channel */ 1617 /* Try to grab a DMA channel */
1662 host->dma.client.event_callback = atmci_dma_event; 1618 dma_cap_zero(mask);
1663 dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask); 1619 dma_cap_set(DMA_SLAVE, mask);
1664 host->dma.client.slave = slave; 1620 host->dma.chan = dma_request_channel(mask, filter, dws);
1665
1666 dma_async_client_register(&host->dma.client);
1667 dma_async_client_chan_request(&host->dma.client);
1668 } else {
1669 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1670 } 1621 }
1622 if (!host->dma.chan)
1623 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1671#endif /* CONFIG_MMC_ATMELMCI_DMA */ 1624#endif /* CONFIG_MMC_ATMELMCI_DMA */
1672 1625
1673 platform_set_drvdata(pdev, host); 1626 platform_set_drvdata(pdev, host);
@@ -1699,8 +1652,8 @@ static int __init atmci_probe(struct platform_device *pdev)
1699 1652
1700err_init_slot: 1653err_init_slot:
1701#ifdef CONFIG_MMC_ATMELMCI_DMA 1654#ifdef CONFIG_MMC_ATMELMCI_DMA
1702 if (pdata->dma_slave) 1655 if (host->dma.chan)
1703 dma_async_client_unregister(&host->dma.client); 1656 dma_release_channel(host->dma.chan);
1704#endif 1657#endif
1705 free_irq(irq, host); 1658 free_irq(irq, host);
1706err_request_irq: 1659err_request_irq:
@@ -1731,8 +1684,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
1731 clk_disable(host->mck); 1684 clk_disable(host->mck);
1732 1685
1733#ifdef CONFIG_MMC_ATMELMCI_DMA 1686#ifdef CONFIG_MMC_ATMELMCI_DMA
1734 if (host->dma.client.slave) 1687 if (host->dma.chan)
1735 dma_async_client_unregister(&host->dma.client); 1688 dma_release_channel(host->dma.chan);
1736#endif 1689#endif
1737 1690
1738 free_irq(platform_get_irq(pdev, 0), host); 1691 free_irq(platform_get_irq(pdev, 0), host);
@@ -1761,7 +1714,7 @@ static void __exit atmci_exit(void)
1761 platform_driver_unregister(&atmci_driver); 1714 platform_driver_unregister(&atmci_driver);
1762} 1715}
1763 1716
1764module_init(atmci_init); 1717late_initcall(atmci_init); /* try to load after dma driver when built-in */
1765module_exit(atmci_exit); 1718module_exit(atmci_exit);
1766 1719
1767MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); 1720MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");