aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:21 -0500
committerDan Williams <dan.j.williams@intel.com>2009-01-06 13:38:21 -0500
commit41d5e59c1299f27983977bcfe3b360600996051c (patch)
treef0e80b6fea3af04f266843af97f433198ad535c7 /drivers/dma
parent4fac7fa57cf8001be259688468c825f836daf739 (diff)
dmaengine: add a release for dma class devices and dependent infrastructure
Resolves: WARNING: at drivers/base/core.c:122 device_release+0x4d/0x52() Device 'dma0chan0' does not have a release() function, it is broken and must be fixed. The dma_chan_dev object is introduced to gear-match sysfs kobject and dmaengine channel lifetimes. When a channel is removed access to the sysfs entries return -ENODEV until the kobject can be released. The bulk of the change is updates to existing code to handle the extra layer of indirection between a dma_chan and its struct device. Reported-by: Alexander Beregalov <a.beregalov@gmail.com> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c106
-rw-r--r--drivers/dma/dmatest.c14
-rw-r--r--drivers/dma/dw_dmac.c91
-rw-r--r--drivers/dma/fsldma.c2
4 files changed, 141 insertions, 72 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index cdc8ecfc2c2c..93c4c9ac8997 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -64,36 +64,75 @@ static long dmaengine_ref_count;
64 64
65/* --- sysfs implementation --- */ 65/* --- sysfs implementation --- */
66 66
67/**
68 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
69 * @dev - device node
70 *
71 * Must be called under dma_list_mutex
72 */
73static struct dma_chan *dev_to_dma_chan(struct device *dev)
74{
75 struct dma_chan_dev *chan_dev;
76
77 chan_dev = container_of(dev, typeof(*chan_dev), device);
78 return chan_dev->chan;
79}
80
67static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 81static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
68{ 82{
69 struct dma_chan *chan = to_dma_chan(dev); 83 struct dma_chan *chan;
70 unsigned long count = 0; 84 unsigned long count = 0;
71 int i; 85 int i;
86 int err;
72 87
73 for_each_possible_cpu(i) 88 mutex_lock(&dma_list_mutex);
74 count += per_cpu_ptr(chan->local, i)->memcpy_count; 89 chan = dev_to_dma_chan(dev);
90 if (chan) {
91 for_each_possible_cpu(i)
92 count += per_cpu_ptr(chan->local, i)->memcpy_count;
93 err = sprintf(buf, "%lu\n", count);
94 } else
95 err = -ENODEV;
96 mutex_unlock(&dma_list_mutex);
75 97
76 return sprintf(buf, "%lu\n", count); 98 return err;
77} 99}
78 100
79static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 101static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
80 char *buf) 102 char *buf)
81{ 103{
82 struct dma_chan *chan = to_dma_chan(dev); 104 struct dma_chan *chan;
83 unsigned long count = 0; 105 unsigned long count = 0;
84 int i; 106 int i;
107 int err;
85 108
86 for_each_possible_cpu(i) 109 mutex_lock(&dma_list_mutex);
87 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 110 chan = dev_to_dma_chan(dev);
111 if (chan) {
112 for_each_possible_cpu(i)
113 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
114 err = sprintf(buf, "%lu\n", count);
115 } else
116 err = -ENODEV;
117 mutex_unlock(&dma_list_mutex);
88 118
89 return sprintf(buf, "%lu\n", count); 119 return err;
90} 120}
91 121
92static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 122static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
93{ 123{
94 struct dma_chan *chan = to_dma_chan(dev); 124 struct dma_chan *chan;
125 int err;
95 126
96 return sprintf(buf, "%d\n", chan->client_count); 127 mutex_lock(&dma_list_mutex);
128 chan = dev_to_dma_chan(dev);
129 if (chan)
130 err = sprintf(buf, "%d\n", chan->client_count);
131 else
132 err = -ENODEV;
133 mutex_unlock(&dma_list_mutex);
134
135 return err;
97} 136}
98 137
99static struct device_attribute dma_attrs[] = { 138static struct device_attribute dma_attrs[] = {
@@ -103,9 +142,18 @@ static struct device_attribute dma_attrs[] = {
103 __ATTR_NULL 142 __ATTR_NULL
104}; 143};
105 144
145static void chan_dev_release(struct device *dev)
146{
147 struct dma_chan_dev *chan_dev;
148
149 chan_dev = container_of(dev, typeof(*chan_dev), device);
150 kfree(chan_dev);
151}
152
106static struct class dma_devclass = { 153static struct class dma_devclass = {
107 .name = "dma", 154 .name = "dma",
108 .dev_attrs = dma_attrs, 155 .dev_attrs = dma_attrs,
156 .dev_release = chan_dev_release,
109}; 157};
110 158
111/* --- client and device registration --- */ 159/* --- client and device registration --- */
@@ -420,7 +468,7 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic
420 list_for_each_entry(chan, &dev->channels, device_node) { 468 list_for_each_entry(chan, &dev->channels, device_node) {
421 if (chan->client_count) { 469 if (chan->client_count) {
422 pr_debug("%s: %s busy\n", 470 pr_debug("%s: %s busy\n",
423 __func__, dev_name(&chan->dev)); 471 __func__, dma_chan_name(chan));
424 continue; 472 continue;
425 } 473 }
426 ret = chan; 474 ret = chan;
@@ -466,22 +514,22 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
466 514
467 if (err == -ENODEV) { 515 if (err == -ENODEV) {
468 pr_debug("%s: %s module removed\n", __func__, 516 pr_debug("%s: %s module removed\n", __func__,
469 dev_name(&chan->dev)); 517 dma_chan_name(chan));
470 list_del_rcu(&device->global_node); 518 list_del_rcu(&device->global_node);
471 } else if (err) 519 } else if (err)
472 pr_err("dmaengine: failed to get %s: (%d)\n", 520 pr_err("dmaengine: failed to get %s: (%d)\n",
473 dev_name(&chan->dev), err); 521 dma_chan_name(chan), err);
474 else 522 else
475 break; 523 break;
476 } else 524 } else
477 pr_debug("%s: %s filter said false\n", 525 pr_debug("%s: %s filter said false\n",
478 __func__, dev_name(&chan->dev)); 526 __func__, dma_chan_name(chan));
479 chan = NULL; 527 chan = NULL;
480 } 528 }
481 mutex_unlock(&dma_list_mutex); 529 mutex_unlock(&dma_list_mutex);
482 530
483 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", 531 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
484 chan ? dev_name(&chan->dev) : NULL); 532 chan ? dma_chan_name(chan) : NULL);
485 533
486 return chan; 534 return chan;
487} 535}
@@ -521,7 +569,7 @@ void dmaengine_get(void)
521 break; 569 break;
522 } else if (err) 570 } else if (err)
523 pr_err("dmaengine: failed to get %s: (%d)\n", 571 pr_err("dmaengine: failed to get %s: (%d)\n",
524 dev_name(&chan->dev), err); 572 dma_chan_name(chan), err);
525 } 573 }
526 } 574 }
527 575
@@ -601,14 +649,20 @@ int dma_async_device_register(struct dma_device *device)
601 chan->local = alloc_percpu(typeof(*chan->local)); 649 chan->local = alloc_percpu(typeof(*chan->local));
602 if (chan->local == NULL) 650 if (chan->local == NULL)
603 continue; 651 continue;
652 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
653 if (chan->dev == NULL) {
654 free_percpu(chan->local);
655 continue;
656 }
604 657
605 chan->chan_id = chancnt++; 658 chan->chan_id = chancnt++;
606 chan->dev.class = &dma_devclass; 659 chan->dev->device.class = &dma_devclass;
607 chan->dev.parent = device->dev; 660 chan->dev->device.parent = device->dev;
608 dev_set_name(&chan->dev, "dma%dchan%d", 661 chan->dev->chan = chan;
662 dev_set_name(&chan->dev->device, "dma%dchan%d",
609 device->dev_id, chan->chan_id); 663 device->dev_id, chan->chan_id);
610 664
611 rc = device_register(&chan->dev); 665 rc = device_register(&chan->dev->device);
612 if (rc) { 666 if (rc) {
613 free_percpu(chan->local); 667 free_percpu(chan->local);
614 chan->local = NULL; 668 chan->local = NULL;
@@ -645,7 +699,10 @@ err_out:
645 list_for_each_entry(chan, &device->channels, device_node) { 699 list_for_each_entry(chan, &device->channels, device_node) {
646 if (chan->local == NULL) 700 if (chan->local == NULL)
647 continue; 701 continue;
648 device_unregister(&chan->dev); 702 mutex_lock(&dma_list_mutex);
703 chan->dev->chan = NULL;
704 mutex_unlock(&dma_list_mutex);
705 device_unregister(&chan->dev->device);
649 free_percpu(chan->local); 706 free_percpu(chan->local);
650 } 707 }
651 return rc; 708 return rc;
@@ -672,7 +729,10 @@ void dma_async_device_unregister(struct dma_device *device)
672 WARN_ONCE(chan->client_count, 729 WARN_ONCE(chan->client_count,
673 "%s called while %d clients hold a reference\n", 730 "%s called while %d clients hold a reference\n",
674 __func__, chan->client_count); 731 __func__, chan->client_count);
675 device_unregister(&chan->dev); 732 mutex_lock(&dma_list_mutex);
733 chan->dev->chan = NULL;
734 mutex_unlock(&dma_list_mutex);
735 device_unregister(&chan->dev->device);
676 } 736 }
677} 737}
678EXPORT_SYMBOL(dma_async_device_unregister); 738EXPORT_SYMBOL(dma_async_device_unregister);
@@ -845,7 +905,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
845 return DMA_SUCCESS; 905 return DMA_SUCCESS;
846 906
847 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" 907 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
848 " %s\n", __func__, dev_name(&tx->chan->dev)); 908 " %s\n", __func__, dma_chan_name(tx->chan));
849 909
850 /* poll through the dependency chain, return when tx is complete */ 910 /* poll through the dependency chain, return when tx is complete */
851 do { 911 do {
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index c77d47c4ec5b..3603f1ea5b28 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -80,7 +80,7 @@ static bool dmatest_match_channel(struct dma_chan *chan)
80{ 80{
81 if (test_channel[0] == '\0') 81 if (test_channel[0] == '\0')
82 return true; 82 return true;
83 return strcmp(dev_name(&chan->dev), test_channel) == 0; 83 return strcmp(dma_chan_name(chan), test_channel) == 0;
84} 84}
85 85
86static bool dmatest_match_device(struct dma_device *device) 86static bool dmatest_match_device(struct dma_device *device)
@@ -325,7 +325,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
325 325
326 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 326 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
327 if (!dtc) { 327 if (!dtc) {
328 pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev)); 328 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
329 return -ENOMEM; 329 return -ENOMEM;
330 } 330 }
331 331
@@ -336,16 +336,16 @@ static int dmatest_add_channel(struct dma_chan *chan)
336 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 336 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
337 if (!thread) { 337 if (!thread) {
338 pr_warning("dmatest: No memory for %s-test%u\n", 338 pr_warning("dmatest: No memory for %s-test%u\n",
339 dev_name(&chan->dev), i); 339 dma_chan_name(chan), i);
340 break; 340 break;
341 } 341 }
342 thread->chan = dtc->chan; 342 thread->chan = dtc->chan;
343 smp_wmb(); 343 smp_wmb();
344 thread->task = kthread_run(dmatest_func, thread, "%s-test%u", 344 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
345 dev_name(&chan->dev), i); 345 dma_chan_name(chan), i);
346 if (IS_ERR(thread->task)) { 346 if (IS_ERR(thread->task)) {
347 pr_warning("dmatest: Failed to run thread %s-test%u\n", 347 pr_warning("dmatest: Failed to run thread %s-test%u\n",
348 dev_name(&chan->dev), i); 348 dma_chan_name(chan), i);
349 kfree(thread); 349 kfree(thread);
350 break; 350 break;
351 } 351 }
@@ -355,7 +355,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
355 list_add_tail(&thread->node, &dtc->threads); 355 list_add_tail(&thread->node, &dtc->threads);
356 } 356 }
357 357
358 pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev)); 358 pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
359 359
360 list_add_tail(&dtc->node, &dmatest_channels); 360 list_add_tail(&dtc->node, &dmatest_channels);
361 nr_channels++; 361 nr_channels++;
@@ -408,7 +408,7 @@ static void __exit dmatest_exit(void)
408 list_del(&dtc->node); 408 list_del(&dtc->node);
409 dmatest_cleanup_channel(dtc); 409 dmatest_cleanup_channel(dtc);
410 pr_debug("dmatest: dropped channel %s\n", 410 pr_debug("dmatest: dropped channel %s\n",
411 dev_name(&dtc->chan->dev)); 411 dma_chan_name(dtc->chan));
412 dma_release_channel(dtc->chan); 412 dma_release_channel(dtc->chan);
413 } 413 }
414} 414}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a29dda8f801b..6b702cc46b3d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -70,6 +70,15 @@
70 * the controller, though. 70 * the controller, though.
71 */ 71 */
72 72
73static struct device *chan2dev(struct dma_chan *chan)
74{
75 return &chan->dev->device;
76}
77static struct device *chan2parent(struct dma_chan *chan)
78{
79 return chan->dev->device.parent;
80}
81
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 82static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{ 83{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 ret = desc; 102 ret = desc;
94 break; 103 break;
95 } 104 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); 105 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
97 i++; 106 i++;
98 } 107 }
99 spin_unlock_bh(&dwc->lock); 108 spin_unlock_bh(&dwc->lock);
100 109
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); 110 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
102 111
103 return ret; 112 return ret;
104} 113}
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
108 struct dw_desc *child; 117 struct dw_desc *child;
109 118
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 119 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent, 120 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
112 child->txd.phys, sizeof(child->lli), 121 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE); 122 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent, 123 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
115 desc->txd.phys, sizeof(desc->lli), 124 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE); 125 DMA_TO_DEVICE);
117} 126}
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
129 138
130 spin_lock_bh(&dwc->lock); 139 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node) 140 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev, 141 dev_vdbg(chan2dev(&dwc->chan),
133 "moving child desc %p to freelist\n", 142 "moving child desc %p to freelist\n",
134 child); 143 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list); 144 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); 145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list); 146 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock); 147 spin_unlock_bh(&dwc->lock);
139 } 148 }
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
163 172
164 /* ASSERT: channel is idle */ 173 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) { 174 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev, 175 dev_err(chan2dev(&dwc->chan),
167 "BUG: Attempted to start non-idle channel\n"); 176 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev, 177 dev_err(chan2dev(&dwc->chan),
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 178 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR), 179 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR), 180 channel_readl(dwc, DAR),
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
193 void *param; 202 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd; 203 struct dma_async_tx_descriptor *txd = &desc->txd;
195 204
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); 205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
197 206
198 dwc->completed = txd->cookie; 207 dwc->completed = txd->cookie;
199 callback = txd->callback; 208 callback = txd->callback;
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
208 * mapped before they were submitted... 217 * mapped before they were submitted...
209 */ 218 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) 219 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, 220 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
212 DMA_FROM_DEVICE); 221 desc->len, DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) 222 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, 223 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
215 DMA_TO_DEVICE); 224 desc->len, DMA_TO_DEVICE);
216 225
217 /* 226 /*
218 * The API requires that no submissions are done from a 227 * The API requires that no submissions are done from a
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
228 LIST_HEAD(list); 237 LIST_HEAD(list);
229 238
230 if (dma_readl(dw, CH_EN) & dwc->mask) { 239 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev, 240 dev_err(chan2dev(&dwc->chan),
232 "BUG: XFER bit set, but channel not idle!\n"); 241 "BUG: XFER bit set, but channel not idle!\n");
233 242
234 /* Try to continue after resetting the channel... */ 243 /* Try to continue after resetting the channel... */
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
273 return; 282 return;
274 } 283 }
275 284
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); 285 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
277 286
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 287 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp) 288 if (desc->lli.llp == llp)
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
292 dwc_descriptor_complete(dwc, desc); 301 dwc_descriptor_complete(dwc, desc);
293 } 302 }
294 303
295 dev_err(&dwc->chan.dev, 304 dev_err(chan2dev(&dwc->chan),
296 "BUG: All descriptors done, but channel not idle!\n"); 305 "BUG: All descriptors done, but channel not idle!\n");
297 306
298 /* Try to continue after resetting the channel... */ 307 /* Try to continue after resetting the channel... */
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
308 317
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 318static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{ 319{
311 dev_printk(KERN_CRIT, &dwc->chan.dev, 320 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 321 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp, 322 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo); 323 lli->ctlhi, lli->ctllo);
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
342 * controller flagged an error instead of scribbling over 351 * controller flagged an error instead of scribbling over
343 * random memory locations. 352 * random memory locations.
344 */ 353 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev, 354 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
346 "Bad descriptor submitted for DMA!\n"); 355 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev, 356 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
348 " cookie: %d\n", bad_desc->txd.cookie); 357 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli); 358 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) 359 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
442 * for DMA. But this is hard to do in a race-free manner. 451 * for DMA. But this is hard to do in a race-free manner.
443 */ 452 */
444 if (list_empty(&dwc->active_list)) { 453 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", 454 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
446 desc->txd.cookie); 455 desc->txd.cookie);
447 dwc_dostart(dwc, desc); 456 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list); 457 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else { 458 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", 459 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
451 desc->txd.cookie); 460 desc->txd.cookie);
452 461
453 list_add_tail(&desc->desc_node, &dwc->queue); 462 list_add_tail(&desc->desc_node, &dwc->queue);
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
472 unsigned int dst_width; 481 unsigned int dst_width;
473 u32 ctllo; 482 u32 ctllo;
474 483
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", 484 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags); 485 dest, src, len, flags);
477 486
478 if (unlikely(!len)) { 487 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); 488 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
480 return NULL; 489 return NULL;
481 } 490 }
482 491
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
516 first = desc; 525 first = desc;
517 } else { 526 } else {
518 prev->lli.llp = desc->txd.phys; 527 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent, 528 dma_sync_single_for_device(chan2parent(chan),
520 prev->txd.phys, sizeof(prev->lli), 529 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE); 530 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node, 531 list_add_tail(&desc->desc_node,
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
531 prev->lli.ctllo |= DWC_CTLL_INT_EN; 540 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532 541
533 prev->lli.llp = 0; 542 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent, 543 dma_sync_single_for_device(chan2parent(chan),
535 prev->txd.phys, sizeof(prev->lli), 544 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE); 545 DMA_TO_DEVICE);
537 546
@@ -562,7 +571,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
562 struct scatterlist *sg; 571 struct scatterlist *sg;
563 size_t total_len = 0; 572 size_t total_len = 0;
564 573
565 dev_vdbg(&chan->dev, "prep_dma_slave\n"); 574 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
566 575
567 if (unlikely(!dws || !sg_len)) 576 if (unlikely(!dws || !sg_len))
568 return NULL; 577 return NULL;
@@ -570,7 +579,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
570 reg_width = dws->reg_width; 579 reg_width = dws->reg_width;
571 prev = first = NULL; 580 prev = first = NULL;
572 581
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); 582 sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
574 583
575 switch (direction) { 584 switch (direction) {
576 case DMA_TO_DEVICE: 585 case DMA_TO_DEVICE:
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
587 596
588 desc = dwc_desc_get(dwc); 597 desc = dwc_desc_get(dwc);
589 if (!desc) { 598 if (!desc) {
590 dev_err(&chan->dev, 599 dev_err(chan2dev(chan),
591 "not enough descriptors available\n"); 600 "not enough descriptors available\n");
592 goto err_desc_get; 601 goto err_desc_get;
593 } 602 }
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
607 first = desc; 616 first = desc;
608 } else { 617 } else {
609 prev->lli.llp = desc->txd.phys; 618 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent, 619 dma_sync_single_for_device(chan2parent(chan),
611 prev->txd.phys, 620 prev->txd.phys,
612 sizeof(prev->lli), 621 sizeof(prev->lli),
613 DMA_TO_DEVICE); 622 DMA_TO_DEVICE);
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
633 642
634 desc = dwc_desc_get(dwc); 643 desc = dwc_desc_get(dwc);
635 if (!desc) { 644 if (!desc) {
636 dev_err(&chan->dev, 645 dev_err(chan2dev(chan),
637 "not enough descriptors available\n"); 646 "not enough descriptors available\n");
638 goto err_desc_get; 647 goto err_desc_get;
639 } 648 }
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
653 first = desc; 662 first = desc;
654 } else { 663 } else {
655 prev->lli.llp = desc->txd.phys; 664 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent, 665 dma_sync_single_for_device(chan2parent(chan),
657 prev->txd.phys, 666 prev->txd.phys,
658 sizeof(prev->lli), 667 sizeof(prev->lli),
659 DMA_TO_DEVICE); 668 DMA_TO_DEVICE);
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
673 prev->lli.ctllo |= DWC_CTLL_INT_EN; 682 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674 683
675 prev->lli.llp = 0; 684 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent, 685 dma_sync_single_for_device(chan2parent(chan),
677 prev->txd.phys, sizeof(prev->lli), 686 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE); 687 DMA_TO_DEVICE);
679 688
@@ -768,11 +777,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
768 u32 cfghi; 777 u32 cfghi;
769 u32 cfglo; 778 u32 cfglo;
770 779
771 dev_vdbg(&chan->dev, "alloc_chan_resources\n"); 780 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
772 781
773 /* ASSERT: channel is idle */ 782 /* ASSERT: channel is idle */
774 if (dma_readl(dw, CH_EN) & dwc->mask) { 783 if (dma_readl(dw, CH_EN) & dwc->mask) {
775 dev_dbg(&chan->dev, "DMA channel not idle?\n"); 784 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
776 return -EIO; 785 return -EIO;
777 } 786 }
778 787
@@ -808,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
808 817
809 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 818 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
810 if (!desc) { 819 if (!desc) {
811 dev_info(&chan->dev, 820 dev_info(chan2dev(chan),
812 "only allocated %d descriptors\n", i); 821 "only allocated %d descriptors\n", i);
813 spin_lock_bh(&dwc->lock); 822 spin_lock_bh(&dwc->lock);
814 break; 823 break;
@@ -818,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
818 desc->txd.tx_submit = dwc_tx_submit; 827 desc->txd.tx_submit = dwc_tx_submit;
819 desc->txd.flags = DMA_CTRL_ACK; 828 desc->txd.flags = DMA_CTRL_ACK;
820 INIT_LIST_HEAD(&desc->txd.tx_list); 829 INIT_LIST_HEAD(&desc->txd.tx_list);
821 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, 830 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
822 sizeof(desc->lli), DMA_TO_DEVICE); 831 sizeof(desc->lli), DMA_TO_DEVICE);
823 dwc_desc_put(dwc, desc); 832 dwc_desc_put(dwc, desc);
824 833
@@ -833,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
833 842
834 spin_unlock_bh(&dwc->lock); 843 spin_unlock_bh(&dwc->lock);
835 844
836 dev_dbg(&chan->dev, 845 dev_dbg(chan2dev(chan),
837 "alloc_chan_resources allocated %d descriptors\n", i); 846 "alloc_chan_resources allocated %d descriptors\n", i);
838 847
839 return i; 848 return i;
@@ -846,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
846 struct dw_desc *desc, *_desc; 855 struct dw_desc *desc, *_desc;
847 LIST_HEAD(list); 856 LIST_HEAD(list);
848 857
849 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", 858 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
850 dwc->descs_allocated); 859 dwc->descs_allocated);
851 860
852 /* ASSERT: channel is idle */ 861 /* ASSERT: channel is idle */
@@ -867,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
867 spin_unlock_bh(&dwc->lock); 876 spin_unlock_bh(&dwc->lock);
868 877
869 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 878 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
870 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); 879 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
871 dma_unmap_single(chan->dev.parent, desc->txd.phys, 880 dma_unmap_single(chan2parent(chan), desc->txd.phys,
872 sizeof(desc->lli), DMA_TO_DEVICE); 881 sizeof(desc->lli), DMA_TO_DEVICE);
873 kfree(desc); 882 kfree(desc);
874 } 883 }
875 884
876 dev_vdbg(&chan->dev, "free_chan_resources done\n"); 885 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
877} 886}
878 887
879/*----------------------------------------------------------------------*/ 888/*----------------------------------------------------------------------*/
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 46e0128929a0..ca70a21afc68 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -822,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
822 */ 822 */
823 WARN_ON(fdev->feature != new_fsl_chan->feature); 823 WARN_ON(fdev->feature != new_fsl_chan->feature);
824 824
825 new_fsl_chan->dev = &new_fsl_chan->common.dev; 825 new_fsl_chan->dev = &new_fsl_chan->common.dev->device;
826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
828 828