diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:21 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:21 -0500 |
commit | 41d5e59c1299f27983977bcfe3b360600996051c (patch) | |
tree | f0e80b6fea3af04f266843af97f433198ad535c7 /drivers/dma/dw_dmac.c | |
parent | 4fac7fa57cf8001be259688468c825f836daf739 (diff) |
dmaengine: add a release for dma class devices and dependent infrastructure
Resolves:
WARNING: at drivers/base/core.c:122 device_release+0x4d/0x52()
Device 'dma0chan0' does not have a release() function, it is broken and must be fixed.
The dma_chan_dev object is introduced to gear-match sysfs kobject and
dmaengine channel lifetimes. When a channel is removed access to the
sysfs entries return -ENODEV until the kobject can be released.
The bulk of the change is updates to existing code to handle the extra
layer of indirection between a dma_chan and its struct device.
Reported-by: Alexander Beregalov <a.beregalov@gmail.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r-- | drivers/dma/dw_dmac.c | 91 |
1 files changed, 50 insertions, 41 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a29dda8f801b..6b702cc46b3d 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -70,6 +70,15 @@ | |||
70 | * the controller, though. | 70 | * the controller, though. |
71 | */ | 71 | */ |
72 | 72 | ||
73 | static struct device *chan2dev(struct dma_chan *chan) | ||
74 | { | ||
75 | return &chan->dev->device; | ||
76 | } | ||
77 | static struct device *chan2parent(struct dma_chan *chan) | ||
78 | { | ||
79 | return chan->dev->device.parent; | ||
80 | } | ||
81 | |||
73 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 82 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
74 | { | 83 | { |
75 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
93 | ret = desc; | 102 | ret = desc; |
94 | break; | 103 | break; |
95 | } | 104 | } |
96 | dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); | 105 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
97 | i++; | 106 | i++; |
98 | } | 107 | } |
99 | spin_unlock_bh(&dwc->lock); | 108 | spin_unlock_bh(&dwc->lock); |
100 | 109 | ||
101 | dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); | 110 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
102 | 111 | ||
103 | return ret; | 112 | return ret; |
104 | } | 113 | } |
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
108 | struct dw_desc *child; | 117 | struct dw_desc *child; |
109 | 118 | ||
110 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 119 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) |
111 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | 120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
112 | child->txd.phys, sizeof(child->lli), | 121 | child->txd.phys, sizeof(child->lli), |
113 | DMA_TO_DEVICE); | 122 | DMA_TO_DEVICE); |
114 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | 123 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
115 | desc->txd.phys, sizeof(desc->lli), | 124 | desc->txd.phys, sizeof(desc->lli), |
116 | DMA_TO_DEVICE); | 125 | DMA_TO_DEVICE); |
117 | } | 126 | } |
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
129 | 138 | ||
130 | spin_lock_bh(&dwc->lock); | 139 | spin_lock_bh(&dwc->lock); |
131 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | 140 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) |
132 | dev_vdbg(&dwc->chan.dev, | 141 | dev_vdbg(chan2dev(&dwc->chan), |
133 | "moving child desc %p to freelist\n", | 142 | "moving child desc %p to freelist\n", |
134 | child); | 143 | child); |
135 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); | 144 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); |
136 | dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); | 145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
137 | list_add(&desc->desc_node, &dwc->free_list); | 146 | list_add(&desc->desc_node, &dwc->free_list); |
138 | spin_unlock_bh(&dwc->lock); | 147 | spin_unlock_bh(&dwc->lock); |
139 | } | 148 | } |
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
163 | 172 | ||
164 | /* ASSERT: channel is idle */ | 173 | /* ASSERT: channel is idle */ |
165 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 174 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
166 | dev_err(&dwc->chan.dev, | 175 | dev_err(chan2dev(&dwc->chan), |
167 | "BUG: Attempted to start non-idle channel\n"); | 176 | "BUG: Attempted to start non-idle channel\n"); |
168 | dev_err(&dwc->chan.dev, | 177 | dev_err(chan2dev(&dwc->chan), |
169 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 178 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
170 | channel_readl(dwc, SAR), | 179 | channel_readl(dwc, SAR), |
171 | channel_readl(dwc, DAR), | 180 | channel_readl(dwc, DAR), |
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
193 | void *param; | 202 | void *param; |
194 | struct dma_async_tx_descriptor *txd = &desc->txd; | 203 | struct dma_async_tx_descriptor *txd = &desc->txd; |
195 | 204 | ||
196 | dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); | 205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
197 | 206 | ||
198 | dwc->completed = txd->cookie; | 207 | dwc->completed = txd->cookie; |
199 | callback = txd->callback; | 208 | callback = txd->callback; |
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
208 | * mapped before they were submitted... | 217 | * mapped before they were submitted... |
209 | */ | 218 | */ |
210 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) | 219 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) |
211 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, | 220 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar, |
212 | DMA_FROM_DEVICE); | 221 | desc->len, DMA_FROM_DEVICE); |
213 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 222 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) |
214 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, | 223 | dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar, |
215 | DMA_TO_DEVICE); | 224 | desc->len, DMA_TO_DEVICE); |
216 | 225 | ||
217 | /* | 226 | /* |
218 | * The API requires that no submissions are done from a | 227 | * The API requires that no submissions are done from a |
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
228 | LIST_HEAD(list); | 237 | LIST_HEAD(list); |
229 | 238 | ||
230 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 239 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
231 | dev_err(&dwc->chan.dev, | 240 | dev_err(chan2dev(&dwc->chan), |
232 | "BUG: XFER bit set, but channel not idle!\n"); | 241 | "BUG: XFER bit set, but channel not idle!\n"); |
233 | 242 | ||
234 | /* Try to continue after resetting the channel... */ | 243 | /* Try to continue after resetting the channel... */ |
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
273 | return; | 282 | return; |
274 | } | 283 | } |
275 | 284 | ||
276 | dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); | 285 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
277 | 286 | ||
278 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 287 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
279 | if (desc->lli.llp == llp) | 288 | if (desc->lli.llp == llp) |
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
292 | dwc_descriptor_complete(dwc, desc); | 301 | dwc_descriptor_complete(dwc, desc); |
293 | } | 302 | } |
294 | 303 | ||
295 | dev_err(&dwc->chan.dev, | 304 | dev_err(chan2dev(&dwc->chan), |
296 | "BUG: All descriptors done, but channel not idle!\n"); | 305 | "BUG: All descriptors done, but channel not idle!\n"); |
297 | 306 | ||
298 | /* Try to continue after resetting the channel... */ | 307 | /* Try to continue after resetting the channel... */ |
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
308 | 317 | ||
309 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 318 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
310 | { | 319 | { |
311 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 320 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
312 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 321 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
313 | lli->sar, lli->dar, lli->llp, | 322 | lli->sar, lli->dar, lli->llp, |
314 | lli->ctlhi, lli->ctllo); | 323 | lli->ctlhi, lli->ctllo); |
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
342 | * controller flagged an error instead of scribbling over | 351 | * controller flagged an error instead of scribbling over |
343 | * random memory locations. | 352 | * random memory locations. |
344 | */ | 353 | */ |
345 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 354 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
346 | "Bad descriptor submitted for DMA!\n"); | 355 | "Bad descriptor submitted for DMA!\n"); |
347 | dev_printk(KERN_CRIT, &dwc->chan.dev, | 356 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
348 | " cookie: %d\n", bad_desc->txd.cookie); | 357 | " cookie: %d\n", bad_desc->txd.cookie); |
349 | dwc_dump_lli(dwc, &bad_desc->lli); | 358 | dwc_dump_lli(dwc, &bad_desc->lli); |
350 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | 359 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) |
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
442 | * for DMA. But this is hard to do in a race-free manner. | 451 | * for DMA. But this is hard to do in a race-free manner. |
443 | */ | 452 | */ |
444 | if (list_empty(&dwc->active_list)) { | 453 | if (list_empty(&dwc->active_list)) { |
445 | dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", | 454 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
446 | desc->txd.cookie); | 455 | desc->txd.cookie); |
447 | dwc_dostart(dwc, desc); | 456 | dwc_dostart(dwc, desc); |
448 | list_add_tail(&desc->desc_node, &dwc->active_list); | 457 | list_add_tail(&desc->desc_node, &dwc->active_list); |
449 | } else { | 458 | } else { |
450 | dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", | 459 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
451 | desc->txd.cookie); | 460 | desc->txd.cookie); |
452 | 461 | ||
453 | list_add_tail(&desc->desc_node, &dwc->queue); | 462 | list_add_tail(&desc->desc_node, &dwc->queue); |
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
472 | unsigned int dst_width; | 481 | unsigned int dst_width; |
473 | u32 ctllo; | 482 | u32 ctllo; |
474 | 483 | ||
475 | dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | 484 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", |
476 | dest, src, len, flags); | 485 | dest, src, len, flags); |
477 | 486 | ||
478 | if (unlikely(!len)) { | 487 | if (unlikely(!len)) { |
479 | dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); | 488 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
480 | return NULL; | 489 | return NULL; |
481 | } | 490 | } |
482 | 491 | ||
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
516 | first = desc; | 525 | first = desc; |
517 | } else { | 526 | } else { |
518 | prev->lli.llp = desc->txd.phys; | 527 | prev->lli.llp = desc->txd.phys; |
519 | dma_sync_single_for_device(chan->dev.parent, | 528 | dma_sync_single_for_device(chan2parent(chan), |
520 | prev->txd.phys, sizeof(prev->lli), | 529 | prev->txd.phys, sizeof(prev->lli), |
521 | DMA_TO_DEVICE); | 530 | DMA_TO_DEVICE); |
522 | list_add_tail(&desc->desc_node, | 531 | list_add_tail(&desc->desc_node, |
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
531 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 540 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
532 | 541 | ||
533 | prev->lli.llp = 0; | 542 | prev->lli.llp = 0; |
534 | dma_sync_single_for_device(chan->dev.parent, | 543 | dma_sync_single_for_device(chan2parent(chan), |
535 | prev->txd.phys, sizeof(prev->lli), | 544 | prev->txd.phys, sizeof(prev->lli), |
536 | DMA_TO_DEVICE); | 545 | DMA_TO_DEVICE); |
537 | 546 | ||
@@ -562,7 +571,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
562 | struct scatterlist *sg; | 571 | struct scatterlist *sg; |
563 | size_t total_len = 0; | 572 | size_t total_len = 0; |
564 | 573 | ||
565 | dev_vdbg(&chan->dev, "prep_dma_slave\n"); | 574 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
566 | 575 | ||
567 | if (unlikely(!dws || !sg_len)) | 576 | if (unlikely(!dws || !sg_len)) |
568 | return NULL; | 577 | return NULL; |
@@ -570,7 +579,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
570 | reg_width = dws->reg_width; | 579 | reg_width = dws->reg_width; |
571 | prev = first = NULL; | 580 | prev = first = NULL; |
572 | 581 | ||
573 | sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); | 582 | sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); |
574 | 583 | ||
575 | switch (direction) { | 584 | switch (direction) { |
576 | case DMA_TO_DEVICE: | 585 | case DMA_TO_DEVICE: |
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
587 | 596 | ||
588 | desc = dwc_desc_get(dwc); | 597 | desc = dwc_desc_get(dwc); |
589 | if (!desc) { | 598 | if (!desc) { |
590 | dev_err(&chan->dev, | 599 | dev_err(chan2dev(chan), |
591 | "not enough descriptors available\n"); | 600 | "not enough descriptors available\n"); |
592 | goto err_desc_get; | 601 | goto err_desc_get; |
593 | } | 602 | } |
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
607 | first = desc; | 616 | first = desc; |
608 | } else { | 617 | } else { |
609 | prev->lli.llp = desc->txd.phys; | 618 | prev->lli.llp = desc->txd.phys; |
610 | dma_sync_single_for_device(chan->dev.parent, | 619 | dma_sync_single_for_device(chan2parent(chan), |
611 | prev->txd.phys, | 620 | prev->txd.phys, |
612 | sizeof(prev->lli), | 621 | sizeof(prev->lli), |
613 | DMA_TO_DEVICE); | 622 | DMA_TO_DEVICE); |
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
633 | 642 | ||
634 | desc = dwc_desc_get(dwc); | 643 | desc = dwc_desc_get(dwc); |
635 | if (!desc) { | 644 | if (!desc) { |
636 | dev_err(&chan->dev, | 645 | dev_err(chan2dev(chan), |
637 | "not enough descriptors available\n"); | 646 | "not enough descriptors available\n"); |
638 | goto err_desc_get; | 647 | goto err_desc_get; |
639 | } | 648 | } |
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
653 | first = desc; | 662 | first = desc; |
654 | } else { | 663 | } else { |
655 | prev->lli.llp = desc->txd.phys; | 664 | prev->lli.llp = desc->txd.phys; |
656 | dma_sync_single_for_device(chan->dev.parent, | 665 | dma_sync_single_for_device(chan2parent(chan), |
657 | prev->txd.phys, | 666 | prev->txd.phys, |
658 | sizeof(prev->lli), | 667 | sizeof(prev->lli), |
659 | DMA_TO_DEVICE); | 668 | DMA_TO_DEVICE); |
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
673 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 682 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
674 | 683 | ||
675 | prev->lli.llp = 0; | 684 | prev->lli.llp = 0; |
676 | dma_sync_single_for_device(chan->dev.parent, | 685 | dma_sync_single_for_device(chan2parent(chan), |
677 | prev->txd.phys, sizeof(prev->lli), | 686 | prev->txd.phys, sizeof(prev->lli), |
678 | DMA_TO_DEVICE); | 687 | DMA_TO_DEVICE); |
679 | 688 | ||
@@ -768,11 +777,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
768 | u32 cfghi; | 777 | u32 cfghi; |
769 | u32 cfglo; | 778 | u32 cfglo; |
770 | 779 | ||
771 | dev_vdbg(&chan->dev, "alloc_chan_resources\n"); | 780 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
772 | 781 | ||
773 | /* ASSERT: channel is idle */ | 782 | /* ASSERT: channel is idle */ |
774 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 783 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
775 | dev_dbg(&chan->dev, "DMA channel not idle?\n"); | 784 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
776 | return -EIO; | 785 | return -EIO; |
777 | } | 786 | } |
778 | 787 | ||
@@ -808,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
808 | 817 | ||
809 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 818 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); |
810 | if (!desc) { | 819 | if (!desc) { |
811 | dev_info(&chan->dev, | 820 | dev_info(chan2dev(chan), |
812 | "only allocated %d descriptors\n", i); | 821 | "only allocated %d descriptors\n", i); |
813 | spin_lock_bh(&dwc->lock); | 822 | spin_lock_bh(&dwc->lock); |
814 | break; | 823 | break; |
@@ -818,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
818 | desc->txd.tx_submit = dwc_tx_submit; | 827 | desc->txd.tx_submit = dwc_tx_submit; |
819 | desc->txd.flags = DMA_CTRL_ACK; | 828 | desc->txd.flags = DMA_CTRL_ACK; |
820 | INIT_LIST_HEAD(&desc->txd.tx_list); | 829 | INIT_LIST_HEAD(&desc->txd.tx_list); |
821 | desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, | 830 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
822 | sizeof(desc->lli), DMA_TO_DEVICE); | 831 | sizeof(desc->lli), DMA_TO_DEVICE); |
823 | dwc_desc_put(dwc, desc); | 832 | dwc_desc_put(dwc, desc); |
824 | 833 | ||
@@ -833,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
833 | 842 | ||
834 | spin_unlock_bh(&dwc->lock); | 843 | spin_unlock_bh(&dwc->lock); |
835 | 844 | ||
836 | dev_dbg(&chan->dev, | 845 | dev_dbg(chan2dev(chan), |
837 | "alloc_chan_resources allocated %d descriptors\n", i); | 846 | "alloc_chan_resources allocated %d descriptors\n", i); |
838 | 847 | ||
839 | return i; | 848 | return i; |
@@ -846,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
846 | struct dw_desc *desc, *_desc; | 855 | struct dw_desc *desc, *_desc; |
847 | LIST_HEAD(list); | 856 | LIST_HEAD(list); |
848 | 857 | ||
849 | dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", | 858 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
850 | dwc->descs_allocated); | 859 | dwc->descs_allocated); |
851 | 860 | ||
852 | /* ASSERT: channel is idle */ | 861 | /* ASSERT: channel is idle */ |
@@ -867,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
867 | spin_unlock_bh(&dwc->lock); | 876 | spin_unlock_bh(&dwc->lock); |
868 | 877 | ||
869 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 878 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
870 | dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); | 879 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
871 | dma_unmap_single(chan->dev.parent, desc->txd.phys, | 880 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
872 | sizeof(desc->lli), DMA_TO_DEVICE); | 881 | sizeof(desc->lli), DMA_TO_DEVICE); |
873 | kfree(desc); | 882 | kfree(desc); |
874 | } | 883 | } |
875 | 884 | ||
876 | dev_vdbg(&chan->dev, "free_chan_resources done\n"); | 885 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
877 | } | 886 | } |
878 | 887 | ||
879 | /*----------------------------------------------------------------------*/ | 888 | /*----------------------------------------------------------------------*/ |