aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-30 16:46:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-30 16:46:29 -0500
commit05a625486efc3209ae4d98e253dafa6ce0124385 (patch)
treeb78b4a854639fecc91598ecb05900c929a6ab85e
parent1f11abc966b82b9fd0c834707486ef301b2f398d (diff)
parentf80ca163d65903276bec7045a484a79c0897eb2d (diff)
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: drivers/dma: Correct use after free drivers/dma: drop unnecesary memset ioat2,3: put channel hardware in known state at init async_tx: expand async raid6 test to cover ioatdma corner case ioat3: fix p-disabled q-continuation sh: fix DMA driver's descriptor chaining and cookie assignment dma: at_hdmac: correct incompatible type for argument 1 of 'spin_lock_bh'
-rw-r--r--crypto/async_tx/raid6test.c7
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h18
-rw-r--r--drivers/dma/ioat/dma_v2.c69
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c60
-rw-r--r--drivers/dma/ioat/registers.h1
-rw-r--r--drivers/dma/shdma.c324
-rw-r--r--drivers/dma/shdma.h9
12 files changed, 341 insertions, 159 deletions
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 3ec27c7e62e..f84f6b4301d 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -214,6 +214,13 @@ static int raid6_test(void)
214 err += test(4, &tests); 214 err += test(4, &tests);
215 if (NDISKS > 5) 215 if (NDISKS > 5)
216 err += test(5, &tests); 216 err += test(5, &tests);
217 /* the 11 and 12 disk cases are special for ioatdma (p-disabled
218 * q-continuation without extended descriptor)
219 */
220 if (NDISKS > 12) {
221 err += test(11, &tests);
222 err += test(12, &tests);
223 }
217 err += test(NDISKS, &tests); 224 err += test(NDISKS, &tests);
218 225
219 pr("\n"); 226 pr("\n");
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f15112569c1..efc1a61ca23 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
815 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", 815 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
816 cookie, done ? *done : 0, used ? *used : 0); 816 cookie, done ? *done : 0, used ? *used : 0);
817 817
818 spin_lock_bh(atchan->lock); 818 spin_lock_bh(&atchan->lock);
819 819
820 last_complete = atchan->completed_cookie; 820 last_complete = atchan->completed_cookie;
821 last_used = chan->cookie; 821 last_used = chan->cookie;
@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
830 ret = dma_async_is_complete(cookie, last_complete, last_used); 830 ret = dma_async_is_complete(cookie, last_complete, last_used);
831 } 831 }
832 832
833 spin_unlock_bh(atchan->lock); 833 spin_unlock_bh(&atchan->lock);
834 834
835 if (done) 835 if (done)
836 *done = last_complete; 836 *done = last_complete;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4a99cd94536..b5f2ee0f8e2 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1294,8 +1294,8 @@ static int __exit coh901318_remove(struct platform_device *pdev)
1294 dma_async_device_unregister(&base->dma_slave); 1294 dma_async_device_unregister(&base->dma_slave);
1295 coh901318_pool_destroy(&base->pool); 1295 coh901318_pool_destroy(&base->pool);
1296 free_irq(platform_get_irq(pdev, 0), base); 1296 free_irq(platform_get_irq(pdev, 0), base);
1297 kfree(base);
1298 iounmap(base->virtbase); 1297 iounmap(base->virtbase);
1298 kfree(base);
1299 release_mem_region(pdev->resource->start, 1299 release_mem_region(pdev->resource->start,
1300 resource_size(pdev->resource)); 1300 resource_size(pdev->resource));
1301 return 0; 1301 return 0;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 285bed0fe17..d28369f7afd 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1270,8 +1270,6 @@ static int __init dw_probe(struct platform_device *pdev)
1270 goto err_kfree; 1270 goto err_kfree;
1271 } 1271 }
1272 1272
1273 memset(dw, 0, sizeof *dw);
1274
1275 dw->regs = ioremap(io->start, DW_REGLEN); 1273 dw->regs = ioremap(io->start, DW_REGLEN);
1276 if (!dw->regs) { 1274 if (!dw->regs) {
1277 err = -ENOMEM; 1275 err = -ENOMEM;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c524d36d3c2..dcc4ab78b32 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
1032 dma->dev = &pdev->dev; 1032 dma->dev = &pdev->dev;
1033 1033
1034 if (!dma->chancnt) { 1034 if (!dma->chancnt) {
1035 dev_err(dev, "zero channels detected\n"); 1035 dev_err(dev, "channel enumeration error\n");
1036 goto err_setup_interrupts; 1036 goto err_setup_interrupts;
1037 } 1037 }
1038 1038
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 45edde99648..bbc3e78ef33 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,7 @@
60 * @dca: direct cache access context 60 * @dca: direct cache access context
61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) 61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
62 * @enumerate_channels: hw version specific channel enumeration 62 * @enumerate_channels: hw version specific channel enumeration
63 * @reset_hw: hw version specific channel (re)initialization
63 * @cleanup_tasklet: select between the v2 and v3 cleanup routines 64 * @cleanup_tasklet: select between the v2 and v3 cleanup routines
64 * @timer_fn: select between the v2 and v3 timer watchdog routines 65 * @timer_fn: select between the v2 and v3 timer watchdog routines
65 * @self_test: hardware version specific self test for each supported op type 66 * @self_test: hardware version specific self test for each supported op type
@@ -78,6 +79,7 @@ struct ioatdma_device {
78 struct dca_provider *dca; 79 struct dca_provider *dca;
79 void (*intr_quirk)(struct ioatdma_device *device); 80 void (*intr_quirk)(struct ioatdma_device *device);
80 int (*enumerate_channels)(struct ioatdma_device *device); 81 int (*enumerate_channels)(struct ioatdma_device *device);
82 int (*reset_hw)(struct ioat_chan_common *chan);
81 void (*cleanup_tasklet)(unsigned long data); 83 void (*cleanup_tasklet)(unsigned long data);
82 void (*timer_fn)(unsigned long data); 84 void (*timer_fn)(unsigned long data);
83 int (*self_test)(struct ioatdma_device *device); 85 int (*self_test)(struct ioatdma_device *device);
@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
264 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 266 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
265} 267}
266 268
269static inline void ioat_reset(struct ioat_chan_common *chan)
270{
271 u8 ver = chan->device->version;
272
273 writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
274}
275
276static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
277{
278 u8 ver = chan->device->version;
279 u8 cmd;
280
281 cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
282 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
283}
284
267static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) 285static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
268{ 286{
269 struct ioat_chan_common *chan = &ioat->base; 287 struct ioat_chan_common *chan = &ioat->base;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8f1f7f05dea..5f7a500e18d 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
239 __ioat2_start_null_desc(ioat); 239 __ioat2_start_null_desc(ioat);
240} 240}
241 241
242static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 242int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
243{ 243{
244 struct ioat_chan_common *chan = &ioat->base; 244 unsigned long end = jiffies + tmo;
245 unsigned long phys_complete; 245 int err = 0;
246 u32 status; 246 u32 status;
247 247
248 status = ioat_chansts(chan); 248 status = ioat_chansts(chan);
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (end && time_after(jiffies, end)) {
253 err = -ETIMEDOUT;
254 break;
255 }
252 status = ioat_chansts(chan); 256 status = ioat_chansts(chan);
253 cpu_relax(); 257 cpu_relax();
254 } 258 }
255 259
260 return err;
261}
262
263int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
264{
265 unsigned long end = jiffies + tmo;
266 int err = 0;
267
268 ioat_reset(chan);
269 while (ioat_reset_pending(chan)) {
270 if (end && time_after(jiffies, end)) {
271 err = -ETIMEDOUT;
272 break;
273 }
274 cpu_relax();
275 }
276
277 return err;
278}
279
280static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
281{
282 struct ioat_chan_common *chan = &ioat->base;
283 unsigned long phys_complete;
284
285 ioat2_quiesce(chan, 0);
256 if (ioat_cleanup_preamble(chan, &phys_complete)) 286 if (ioat_cleanup_preamble(chan, &phys_complete))
257 __cleanup(ioat, phys_complete); 287 __cleanup(ioat, phys_complete);
258 288
@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
318 spin_unlock_bh(&chan->cleanup_lock); 348 spin_unlock_bh(&chan->cleanup_lock);
319} 349}
320 350
351static int ioat2_reset_hw(struct ioat_chan_common *chan)
352{
353 /* throw away whatever the channel was doing and get it initialized */
354 u32 chanerr;
355
356 ioat2_quiesce(chan, msecs_to_jiffies(100));
357
358 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
359 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
360
361 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
362}
363
321/** 364/**
322 * ioat2_enumerate_channels - find and initialize the device's channels 365 * ioat2_enumerate_channels - find and initialize the device's channels
323 * @device: the device to be enumerated 366 * @device: the device to be enumerated
@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
360 (unsigned long) ioat); 403 (unsigned long) ioat);
361 ioat->xfercap_log = xfercap_log; 404 ioat->xfercap_log = xfercap_log;
362 spin_lock_init(&ioat->ring_lock); 405 spin_lock_init(&ioat->ring_lock);
406 if (device->reset_hw(&ioat->base)) {
407 i = 0;
408 break;
409 }
363 } 410 }
364 dma->chancnt = i; 411 dma->chancnt = i;
365 return i; 412 return i;
@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
467 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 514 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
468 struct ioat_chan_common *chan = &ioat->base; 515 struct ioat_chan_common *chan = &ioat->base;
469 struct ioat_ring_ent **ring; 516 struct ioat_ring_ent **ring;
470 u32 chanerr;
471 int order; 517 int order;
472 518
473 /* have we already been set up? */ 519 /* have we already been set up? */
@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
477 /* Setup register to interrupt and write completion status on error */ 523 /* Setup register to interrupt and write completion status on error */
478 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); 524 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
479 525
480 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
481 if (chanerr) {
482 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
483 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
484 }
485
486 /* allocate a completion writeback area */ 526 /* allocate a completion writeback area */
487 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 527 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
488 chan->completion = pci_pool_alloc(chan->device->completion_pool, 528 chan->completion = pci_pool_alloc(chan->device->completion_pool,
@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
746 tasklet_disable(&chan->cleanup_task); 786 tasklet_disable(&chan->cleanup_task);
747 del_timer_sync(&chan->timer); 787 del_timer_sync(&chan->timer);
748 device->cleanup_tasklet((unsigned long) ioat); 788 device->cleanup_tasklet((unsigned long) ioat);
749 789 device->reset_hw(chan);
750 /* Delay 100ms after reset to allow internal DMA logic to quiesce
751 * before removing DMA descriptor resources.
752 */
753 writeb(IOAT_CHANCMD_RESET,
754 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
755 mdelay(100);
756 790
757 spin_lock_bh(&ioat->ring_lock); 791 spin_lock_bh(&ioat->ring_lock);
758 descs = ioat2_ring_space(ioat); 792 descs = ioat2_ring_space(ioat);
@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
839 int err; 873 int err;
840 874
841 device->enumerate_channels = ioat2_enumerate_channels; 875 device->enumerate_channels = ioat2_enumerate_channels;
876 device->reset_hw = ioat2_reset_hw;
842 device->cleanup_tasklet = ioat2_cleanup_tasklet; 877 device->cleanup_tasklet = ioat2_cleanup_tasklet;
843 device->timer_fn = ioat2_timer_event; 878 device->timer_fn = ioat2_timer_event;
844 device->self_test = ioat_dma_self_test; 879 device->self_test = ioat_dma_self_test;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 1d849ef74d5..3afad8da43c 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); 185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data); 186void ioat2_cleanup_tasklet(unsigned long data);
187void ioat2_timer_event(unsigned long data); 187void ioat2_timer_event(unsigned long data);
188int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
189int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
188extern struct kobj_type ioat2_ktype; 190extern struct kobj_type ioat2_ktype;
189extern struct kmem_cache *ioat2_cache; 191extern struct kmem_cache *ioat2_cache;
190#endif /* IOATDMA_V2_H */ 192#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 42f6f10fb0c..9908c9e94b2 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
650 650
651 num_descs = ioat2_xferlen_to_descs(ioat, len); 651 num_descs = ioat2_xferlen_to_descs(ioat, len);
652 /* we need 2x the number of descriptors to cover greater than 3 652 /* we need 2x the number of descriptors to cover greater than 3
653 * sources 653 * sources (we need 1 extra source in the q-only continuation
654 * case and 3 extra sources in the p+q continuation case.
654 */ 655 */
655 if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { 656 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
657 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
656 with_ext = 1; 658 with_ext = 1;
657 num_descs *= 2; 659 num_descs *= 2;
658 } else 660 } else
@@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1128 return 0; 1130 return 0;
1129} 1131}
1130 1132
1133static int ioat3_reset_hw(struct ioat_chan_common *chan)
1134{
1135 /* throw away whatever the channel was doing and get it
1136 * initialized, with ioat3 specific workarounds
1137 */
1138 struct ioatdma_device *device = chan->device;
1139 struct pci_dev *pdev = device->pdev;
1140 u32 chanerr;
1141 u16 dev_id;
1142 int err;
1143
1144 ioat2_quiesce(chan, msecs_to_jiffies(100));
1145
1146 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1147 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1148
1149 /* -= IOAT ver.3 workarounds =- */
1150 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1151 * that can cause stability issues for IOAT ver.3, and clear any
1152 * pending errors
1153 */
1154 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1155 err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1156 if (err) {
1157 dev_err(&pdev->dev, "channel error register unreachable\n");
1158 return err;
1159 }
1160 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1161
1162 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1163 * (workaround for spurious config parity error after restart)
1164 */
1165 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1166 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1167 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1168
1169 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1170}
1171
1131int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1172int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1132{ 1173{
1133 struct pci_dev *pdev = device->pdev; 1174 struct pci_dev *pdev = device->pdev;
@@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1137 struct ioat_chan_common *chan; 1178 struct ioat_chan_common *chan;
1138 bool is_raid_device = false; 1179 bool is_raid_device = false;
1139 int err; 1180 int err;
1140 u16 dev_id;
1141 u32 cap; 1181 u32 cap;
1142 1182
1143 device->enumerate_channels = ioat2_enumerate_channels; 1183 device->enumerate_channels = ioat2_enumerate_channels;
1184 device->reset_hw = ioat3_reset_hw;
1144 device->self_test = ioat3_dma_self_test; 1185 device->self_test = ioat3_dma_self_test;
1145 dma = &device->common; 1186 dma = &device->common;
1146 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1187 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
@@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1216 dma->device_prep_dma_xor_val = NULL; 1257 dma->device_prep_dma_xor_val = NULL;
1217 #endif 1258 #endif
1218 1259
1219 /* -= IOAT ver.3 workarounds =- */
1220 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1221 * that can cause stability issues for IOAT ver.3
1222 */
1223 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1224
1225 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1226 * (workaround for spurious config parity error after restart)
1227 */
1228 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1229 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1230 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1231
1232 err = ioat_probe(device); 1260 err = ioat_probe(device);
1233 if (err) 1261 if (err)
1234 return err; 1262 return err;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index f015ec19670..e8ae63baf58 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -27,6 +27,7 @@
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02 28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 31#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31 32
32/* MMIO Device Registers */ 33/* MMIO Device Registers */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 2e4a54c8afe..d10cc899c46 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,16 +23,19 @@
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/dmapool.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <cpu/dma.h> 27#include <cpu/dma.h>
29#include <asm/dma-sh.h> 28#include <asm/dma-sh.h>
30#include "shdma.h" 29#include "shdma.h"
31 30
32/* DMA descriptor control */ 31/* DMA descriptor control */
33#define DESC_LAST (-1) 32enum sh_dmae_desc_status {
34#define DESC_COMP (1) 33 DESC_IDLE,
35#define DESC_NCOMP (0) 34 DESC_PREPARED,
35 DESC_SUBMITTED,
36 DESC_COMPLETED, /* completed, have to call callback */
37 DESC_WAITING, /* callback called, waiting for ack / re-submit */
38};
36 39
37#define NR_DESCS_PER_CHANNEL 32 40#define NR_DESCS_PER_CHANNEL 32
38/* 41/*
@@ -45,6 +48,8 @@
45 */ 48 */
46#define RS_DEFAULT (RS_DUAL) 49#define RS_DEFAULT (RS_DUAL)
47 50
51static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
52
48#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 53#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
49static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 54static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
50{ 55{
@@ -106,11 +111,11 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
106 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; 111 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
107} 112}
108 113
109static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) 114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
110{ 115{
111 sh_dmae_writel(sh_chan, hw.sar, SAR); 116 sh_dmae_writel(sh_chan, hw->sar, SAR);
112 sh_dmae_writel(sh_chan, hw.dar, DAR); 117 sh_dmae_writel(sh_chan, hw->dar, DAR);
113 sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR); 118 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
114} 119}
115 120
116static void dmae_start(struct sh_dmae_chan *sh_chan) 121static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -184,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
184 189
185static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 190static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
186{ 191{
187 struct sh_desc *desc = tx_to_sh_desc(tx); 192 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
188 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 193 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
194 dma_async_tx_callback callback = tx->callback;
189 dma_cookie_t cookie; 195 dma_cookie_t cookie;
190 196
191 spin_lock_bh(&sh_chan->desc_lock); 197 spin_lock_bh(&sh_chan->desc_lock);
@@ -195,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
195 if (cookie < 0) 201 if (cookie < 0)
196 cookie = 1; 202 cookie = 1;
197 203
198 /* If desc only in the case of 1 */ 204 sh_chan->common.cookie = cookie;
199 if (desc->async_tx.cookie != -EBUSY) 205 tx->cookie = cookie;
200 desc->async_tx.cookie = cookie; 206
201 sh_chan->common.cookie = desc->async_tx.cookie; 207 /* Mark all chunks of this descriptor as submitted, move to the queue */
208 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
209 /*
210 * All chunks are on the global ld_free, so, we have to find
211 * the end of the chain ourselves
212 */
213 if (chunk != desc && (chunk->mark == DESC_IDLE ||
214 chunk->async_tx.cookie > 0 ||
215 chunk->async_tx.cookie == -EBUSY ||
216 &chunk->node == &sh_chan->ld_free))
217 break;
218 chunk->mark = DESC_SUBMITTED;
219 /* Callback goes to the last chunk */
220 chunk->async_tx.callback = NULL;
221 chunk->cookie = cookie;
222 list_move_tail(&chunk->node, &sh_chan->ld_queue);
223 last = chunk;
224 }
225
226 last->async_tx.callback = callback;
227 last->async_tx.callback_param = tx->callback_param;
202 228
203 list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); 229 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
230 tx->cookie, &last->async_tx, sh_chan->id,
231 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
204 232
205 spin_unlock_bh(&sh_chan->desc_lock); 233 spin_unlock_bh(&sh_chan->desc_lock);
206 234
207 return cookie; 235 return cookie;
208} 236}
209 237
238/* Called with desc_lock held */
210static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) 239static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
211{ 240{
212 struct sh_desc *desc, *_desc, *ret = NULL; 241 struct sh_desc *desc;
213 242
214 spin_lock_bh(&sh_chan->desc_lock); 243 list_for_each_entry(desc, &sh_chan->ld_free, node)
215 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { 244 if (desc->mark != DESC_PREPARED) {
216 if (async_tx_test_ack(&desc->async_tx)) { 245 BUG_ON(desc->mark != DESC_IDLE);
217 list_del(&desc->node); 246 list_del(&desc->node);
218 ret = desc; 247 return desc;
219 break;
220 } 248 }
221 }
222 spin_unlock_bh(&sh_chan->desc_lock);
223
224 return ret;
225}
226
227static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
228{
229 if (desc) {
230 spin_lock_bh(&sh_chan->desc_lock);
231
232 list_splice_init(&desc->tx_list, &sh_chan->ld_free);
233 list_add(&desc->node, &sh_chan->ld_free);
234 249
235 spin_unlock_bh(&sh_chan->desc_lock); 250 return NULL;
236 }
237} 251}
238 252
239static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 253static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
@@ -252,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
252 dma_async_tx_descriptor_init(&desc->async_tx, 266 dma_async_tx_descriptor_init(&desc->async_tx,
253 &sh_chan->common); 267 &sh_chan->common);
254 desc->async_tx.tx_submit = sh_dmae_tx_submit; 268 desc->async_tx.tx_submit = sh_dmae_tx_submit;
255 desc->async_tx.flags = DMA_CTRL_ACK; 269 desc->mark = DESC_IDLE;
256 INIT_LIST_HEAD(&desc->tx_list);
257 sh_dmae_put_desc(sh_chan, desc);
258 270
259 spin_lock_bh(&sh_chan->desc_lock); 271 spin_lock_bh(&sh_chan->desc_lock);
272 list_add(&desc->node, &sh_chan->ld_free);
260 sh_chan->descs_allocated++; 273 sh_chan->descs_allocated++;
261 } 274 }
262 spin_unlock_bh(&sh_chan->desc_lock); 275 spin_unlock_bh(&sh_chan->desc_lock);
@@ -273,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
273 struct sh_desc *desc, *_desc; 286 struct sh_desc *desc, *_desc;
274 LIST_HEAD(list); 287 LIST_HEAD(list);
275 288
276 BUG_ON(!list_empty(&sh_chan->ld_queue)); 289 /* Prepared and not submitted descriptors can still be on the queue */
290 if (!list_empty(&sh_chan->ld_queue))
291 sh_dmae_chan_ld_cleanup(sh_chan, true);
292
277 spin_lock_bh(&sh_chan->desc_lock); 293 spin_lock_bh(&sh_chan->desc_lock);
278 294
279 list_splice_init(&sh_chan->ld_free, &list); 295 list_splice_init(&sh_chan->ld_free, &list);
@@ -292,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
292 struct sh_dmae_chan *sh_chan; 308 struct sh_dmae_chan *sh_chan;
293 struct sh_desc *first = NULL, *prev = NULL, *new; 309 struct sh_desc *first = NULL, *prev = NULL, *new;
294 size_t copy_size; 310 size_t copy_size;
311 LIST_HEAD(tx_list);
312 int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
295 313
296 if (!chan) 314 if (!chan)
297 return NULL; 315 return NULL;
@@ -301,108 +319,189 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
301 319
302 sh_chan = to_sh_chan(chan); 320 sh_chan = to_sh_chan(chan);
303 321
322 /* Have to lock the whole loop to protect against concurrent release */
323 spin_lock_bh(&sh_chan->desc_lock);
324
325 /*
326 * Chaining:
327 * first descriptor is what user is dealing with in all API calls, its
328 * cookie is at first set to -EBUSY, at tx-submit to a positive
329 * number
330 * if more than one chunk is needed further chunks have cookie = -EINVAL
331 * the last chunk, if not equal to the first, has cookie = -ENOSPC
332 * all chunks are linked onto the tx_list head with their .node heads
333 * only during this function, then they are immediately spliced
334 * back onto the free list in form of a chain
335 */
304 do { 336 do {
305 /* Allocate the link descriptor from DMA pool */ 337 /* Allocate the link descriptor from the free list */
306 new = sh_dmae_get_desc(sh_chan); 338 new = sh_dmae_get_desc(sh_chan);
307 if (!new) { 339 if (!new) {
308 dev_err(sh_chan->dev, 340 dev_err(sh_chan->dev,
309 "No free memory for link descriptor\n"); 341 "No free memory for link descriptor\n");
310 goto err_get_desc; 342 list_for_each_entry(new, &tx_list, node)
343 new->mark = DESC_IDLE;
344 list_splice(&tx_list, &sh_chan->ld_free);
345 spin_unlock_bh(&sh_chan->desc_lock);
346 return NULL;
311 } 347 }
312 348
313 copy_size = min(len, (size_t)SH_DMA_TCR_MAX); 349 copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
314 350
315 new->hw.sar = dma_src; 351 new->hw.sar = dma_src;
316 new->hw.dar = dma_dest; 352 new->hw.dar = dma_dest;
317 new->hw.tcr = copy_size; 353 new->hw.tcr = copy_size;
318 if (!first) 354 if (!first) {
355 /* First desc */
356 new->async_tx.cookie = -EBUSY;
319 first = new; 357 first = new;
358 } else {
359 /* Other desc - invisible to the user */
360 new->async_tx.cookie = -EINVAL;
361 }
320 362
321 new->mark = DESC_NCOMP; 363 dev_dbg(sh_chan->dev,
322 async_tx_ack(&new->async_tx); 364 "chaining %u of %u with %p, dst %x, cookie %d\n",
365 copy_size, len, &new->async_tx, dma_dest,
366 new->async_tx.cookie);
367
368 new->mark = DESC_PREPARED;
369 new->async_tx.flags = flags;
370 new->chunks = chunks--;
323 371
324 prev = new; 372 prev = new;
325 len -= copy_size; 373 len -= copy_size;
326 dma_src += copy_size; 374 dma_src += copy_size;
327 dma_dest += copy_size; 375 dma_dest += copy_size;
328 /* Insert the link descriptor to the LD ring */ 376 /* Insert the link descriptor to the LD ring */
329 list_add_tail(&new->node, &first->tx_list); 377 list_add_tail(&new->node, &tx_list);
330 } while (len); 378 } while (len);
331 379
332 new->async_tx.flags = flags; /* client is in control of this ack */ 380 if (new != first)
333 new->async_tx.cookie = -EBUSY; /* Last desc */ 381 new->async_tx.cookie = -ENOSPC;
334 382
335 return &first->async_tx; 383 /* Put them back on the free list, so, they don't get lost */
384 list_splice_tail(&tx_list, &sh_chan->ld_free);
336 385
337err_get_desc: 386 spin_unlock_bh(&sh_chan->desc_lock);
338 sh_dmae_put_desc(sh_chan, first);
339 return NULL;
340 387
388 return &first->async_tx;
341} 389}
342 390
343/* 391static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
344 * sh_chan_ld_cleanup - Clean up link descriptors
345 *
346 * This function clean up the ld_queue of DMA channel.
347 */
348static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
349{ 392{
350 struct sh_desc *desc, *_desc; 393 struct sh_desc *desc, *_desc;
394 /* Is the "exposed" head of a chain acked? */
395 bool head_acked = false;
396 dma_cookie_t cookie = 0;
397 dma_async_tx_callback callback = NULL;
398 void *param = NULL;
351 399
352 spin_lock_bh(&sh_chan->desc_lock); 400 spin_lock_bh(&sh_chan->desc_lock);
353 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 401 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
354 dma_async_tx_callback callback; 402 struct dma_async_tx_descriptor *tx = &desc->async_tx;
355 void *callback_param; 403
356 404 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
357 /* non send data */ 405 BUG_ON(desc->mark != DESC_SUBMITTED &&
358 if (desc->mark == DESC_NCOMP) 406 desc->mark != DESC_COMPLETED &&
407 desc->mark != DESC_WAITING);
408
409 /*
410 * queue is ordered, and we use this loop to (1) clean up all
411 * completed descriptors, and to (2) update descriptor flags of
412 * any chunks in a (partially) completed chain
413 */
414 if (!all && desc->mark == DESC_SUBMITTED &&
415 desc->cookie != cookie)
359 break; 416 break;
360 417
361 /* send data sesc */ 418 if (tx->cookie > 0)
362 callback = desc->async_tx.callback; 419 cookie = tx->cookie;
363 callback_param = desc->async_tx.callback_param;
364 420
365 /* Remove from ld_queue list */ 421 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
366 list_splice_init(&desc->tx_list, &sh_chan->ld_free); 422 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
423 sh_chan->completed_cookie = desc->cookie;
424 }
367 425
368 dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", 426 /* Call callback on the last chunk */
369 desc); 427 if (desc->mark == DESC_COMPLETED && tx->callback) {
428 desc->mark = DESC_WAITING;
429 callback = tx->callback;
430 param = tx->callback_param;
431 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
432 tx->cookie, tx, sh_chan->id);
433 BUG_ON(desc->chunks != 1);
434 break;
435 }
370 436
371 list_move(&desc->node, &sh_chan->ld_free); 437 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
372 /* Run the link descriptor callback function */ 438 if (desc->mark == DESC_COMPLETED) {
373 if (callback) { 439 BUG_ON(tx->cookie < 0);
374 spin_unlock_bh(&sh_chan->desc_lock); 440 desc->mark = DESC_WAITING;
375 dev_dbg(sh_chan->dev, "link descriptor %p callback\n", 441 }
376 desc); 442 head_acked = async_tx_test_ack(tx);
377 callback(callback_param); 443 } else {
378 spin_lock_bh(&sh_chan->desc_lock); 444 switch (desc->mark) {
445 case DESC_COMPLETED:
446 desc->mark = DESC_WAITING;
447 /* Fall through */
448 case DESC_WAITING:
449 if (head_acked)
450 async_tx_ack(&desc->async_tx);
451 }
452 }
453
454 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
455 tx, tx->cookie);
456
457 if (((desc->mark == DESC_COMPLETED ||
458 desc->mark == DESC_WAITING) &&
459 async_tx_test_ack(&desc->async_tx)) || all) {
460 /* Remove from ld_queue list */
461 desc->mark = DESC_IDLE;
462 list_move(&desc->node, &sh_chan->ld_free);
379 } 463 }
380 } 464 }
381 spin_unlock_bh(&sh_chan->desc_lock); 465 spin_unlock_bh(&sh_chan->desc_lock);
466
467 if (callback)
468 callback(param);
469
470 return callback;
471}
472
473/*
474 * sh_chan_ld_cleanup - Clean up link descriptors
475 *
476 * This function cleans up the ld_queue of DMA channel.
477 */
478static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
479{
480 while (__ld_cleanup(sh_chan, all))
481 ;
382} 482}
383 483
384static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 484static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
385{ 485{
386 struct list_head *ld_node; 486 struct sh_desc *sd;
387 struct sh_dmae_regs hw;
388 487
488 spin_lock_bh(&sh_chan->desc_lock);
389 /* DMA work check */ 489 /* DMA work check */
390 if (dmae_is_busy(sh_chan)) 490 if (dmae_is_busy(sh_chan)) {
491 spin_unlock_bh(&sh_chan->desc_lock);
391 return; 492 return;
493 }
392 494
393 /* Find the first un-transfer desciptor */ 495 /* Find the first un-transfer desciptor */
394 for (ld_node = sh_chan->ld_queue.next; 496 list_for_each_entry(sd, &sh_chan->ld_queue, node)
395 (ld_node != &sh_chan->ld_queue) 497 if (sd->mark == DESC_SUBMITTED) {
396 && (to_sh_desc(ld_node)->mark == DESC_COMP); 498 /* Get the ld start address from ld_queue */
397 ld_node = ld_node->next) 499 dmae_set_reg(sh_chan, &sd->hw);
398 cpu_relax(); 500 dmae_start(sh_chan);
399 501 break;
400 if (ld_node != &sh_chan->ld_queue) { 502 }
401 /* Get the ld start address from ld_queue */ 503
402 hw = to_sh_desc(ld_node)->hw; 504 spin_unlock_bh(&sh_chan->desc_lock);
403 dmae_set_reg(sh_chan, hw);
404 dmae_start(sh_chan);
405 }
406} 505}
407 506
408static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 507static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -420,12 +519,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
420 dma_cookie_t last_used; 519 dma_cookie_t last_used;
421 dma_cookie_t last_complete; 520 dma_cookie_t last_complete;
422 521
423 sh_dmae_chan_ld_cleanup(sh_chan); 522 sh_dmae_chan_ld_cleanup(sh_chan, false);
424 523
425 last_used = chan->cookie; 524 last_used = chan->cookie;
426 last_complete = sh_chan->completed_cookie; 525 last_complete = sh_chan->completed_cookie;
427 if (last_complete == -EBUSY) 526 BUG_ON(last_complete < 0);
428 last_complete = last_used;
429 527
430 if (done) 528 if (done)
431 *done = last_complete; 529 *done = last_complete;
@@ -480,11 +578,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
480 err = sh_dmae_rst(0); 578 err = sh_dmae_rst(0);
481 if (err) 579 if (err)
482 return err; 580 return err;
581#ifdef SH_DMAC_BASE1
483 if (shdev->pdata.mode & SHDMA_DMAOR1) { 582 if (shdev->pdata.mode & SHDMA_DMAOR1) {
484 err = sh_dmae_rst(1); 583 err = sh_dmae_rst(1);
485 if (err) 584 if (err)
486 return err; 585 return err;
487 } 586 }
587#endif
488 disable_irq(irq); 588 disable_irq(irq);
489 return IRQ_HANDLED; 589 return IRQ_HANDLED;
490 } 590 }
@@ -494,35 +594,25 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
494static void dmae_do_tasklet(unsigned long data) 594static void dmae_do_tasklet(unsigned long data)
495{ 595{
496 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 596 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
497 struct sh_desc *desc, *_desc, *cur_desc = NULL; 597 struct sh_desc *desc;
498 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 598 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
499 599
500 list_for_each_entry_safe(desc, _desc, 600 spin_lock(&sh_chan->desc_lock);
501 &sh_chan->ld_queue, node) { 601 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { 602 if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
503 cur_desc = desc; 603 desc->mark == DESC_SUBMITTED) {
604 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
605 desc->async_tx.cookie, &desc->async_tx,
606 desc->hw.dar);
607 desc->mark = DESC_COMPLETED;
504 break; 608 break;
505 } 609 }
506 } 610 }
611 spin_unlock(&sh_chan->desc_lock);
507 612
508 if (cur_desc) {
509 switch (cur_desc->async_tx.cookie) {
510 case 0: /* other desc data */
511 break;
512 case -EBUSY: /* last desc */
513 sh_chan->completed_cookie =
514 cur_desc->async_tx.cookie;
515 break;
516 default: /* first desc ( 0 < )*/
517 sh_chan->completed_cookie =
518 cur_desc->async_tx.cookie - 1;
519 break;
520 }
521 cur_desc->mark = DESC_COMP;
522 }
523 /* Next desc */ 613 /* Next desc */
524 sh_chan_xfer_ld_queue(sh_chan); 614 sh_chan_xfer_ld_queue(sh_chan);
525 sh_dmae_chan_ld_cleanup(sh_chan); 615 sh_dmae_chan_ld_cleanup(sh_chan, false);
526} 616}
527 617
528static unsigned int get_dmae_irq(unsigned int id) 618static unsigned int get_dmae_irq(unsigned int id)
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 60b81e529b4..108f1cffb6f 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -13,9 +13,9 @@
13#ifndef __DMA_SHDMA_H 13#ifndef __DMA_SHDMA_H
14#define __DMA_SHDMA_H 14#define __DMA_SHDMA_H
15 15
16#include <linux/device.h>
17#include <linux/dmapool.h>
18#include <linux/dmaengine.h> 16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/list.h>
19 19
20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
21 21
@@ -26,13 +26,16 @@ struct sh_dmae_regs {
26}; 26};
27 27
28struct sh_desc { 28struct sh_desc {
29 struct list_head tx_list;
30 struct sh_dmae_regs hw; 29 struct sh_dmae_regs hw;
31 struct list_head node; 30 struct list_head node;
32 struct dma_async_tx_descriptor async_tx; 31 struct dma_async_tx_descriptor async_tx;
32 dma_cookie_t cookie;
33 int chunks;
33 int mark; 34 int mark;
34}; 35};
35 36
37struct device;
38
36struct sh_dmae_chan { 39struct sh_dmae_chan {
37 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 40 dma_cookie_t completed_cookie; /* The maximum cookie completed */
38 spinlock_t desc_lock; /* Descriptor operation lock */ 41 spinlock_t desc_lock; /* Descriptor operation lock */