aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 15:01:49 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:30:24 -0400
commit09c8a5b85e5f1e74a19bdd7c85547429d51df1cd (patch)
tree9bb255d9f596ab062996de49032875e8b9253971 /drivers/dma/ioat/dma.c
parentad643f54c8514998333bc6c7b201fda2267496be (diff)
ioat: switch watchdog and reset handler from workqueue to timer
In order to support dynamic resizing of the descriptor ring or polling for a descriptor in the presence of a hung channel the reset handler needs to make progress while in a non-preemptible context. The current workqueue implementation precludes polling channel reset completion under spin_lock(). This conversion also allows us to return to opportunistic cleanup in the ioat2 case as the timer implementation guarantees at least one cleanup after every descriptor is submitted. This means the worst case completion latency becomes the timer frequency (for exceptional circumstances), but with the benefit of avoiding busy waiting when the lock is contended. Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r--drivers/dma/ioat/dma.c351
1 files changed, 136 insertions, 215 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index f59b6f42f866..17a518d0386f 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -99,23 +99,26 @@ static void ioat1_cleanup_tasklet(unsigned long data);
99/* common channel initialization */ 99/* common channel initialization */
100void ioat_init_channel(struct ioatdma_device *device, 100void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx, 101 struct ioat_chan_common *chan, int idx,
102 work_func_t work_fn, void (*tasklet)(unsigned long), 102 void (*timer_fn)(unsigned long),
103 unsigned long tasklet_data) 103 void (*tasklet)(unsigned long),
104 unsigned long ioat)
104{ 105{
105 struct dma_device *dma = &device->common; 106 struct dma_device *dma = &device->common;
106 107
107 chan->device = device; 108 chan->device = device;
108 chan->reg_base = device->reg_base + (0x80 * (idx + 1)); 109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
109 INIT_DELAYED_WORK(&chan->work, work_fn);
110 spin_lock_init(&chan->cleanup_lock); 110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma; 111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels); 112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan; 113 device->idx[idx] = chan;
114 tasklet_init(&chan->cleanup_task, tasklet, tasklet_data); 114 init_timer(&chan->timer);
115 chan->timer.function = timer_fn;
116 chan->timer.data = ioat;
117 tasklet_init(&chan->cleanup_task, tasklet, ioat);
115 tasklet_disable(&chan->cleanup_task); 118 tasklet_disable(&chan->cleanup_task);
116} 119}
117 120
118static void ioat1_reset_part2(struct work_struct *work); 121static void ioat1_timer_event(unsigned long data);
119 122
120/** 123/**
121 * ioat1_dma_enumerate_channels - find and initialize the device's channels 124 * ioat1_dma_enumerate_channels - find and initialize the device's channels
@@ -153,7 +156,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
153 break; 156 break;
154 157
155 ioat_init_channel(device, &ioat->base, i, 158 ioat_init_channel(device, &ioat->base, i,
156 ioat1_reset_part2, 159 ioat1_timer_event,
157 ioat1_cleanup_tasklet, 160 ioat1_cleanup_tasklet,
158 (unsigned long) ioat); 161 (unsigned long) ioat);
159 ioat->xfercap = xfercap; 162 ioat->xfercap = xfercap;
@@ -193,61 +196,6 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
193} 196}
194 197
195/** 198/**
196 * ioat1_reset_part2 - reinit the channel after a reset
197 */
198static void ioat1_reset_part2(struct work_struct *work)
199{
200 struct ioat_chan_common *chan;
201 struct ioat_dma_chan *ioat;
202 struct ioat_desc_sw *desc;
203 int dmacount;
204 bool start_null = false;
205
206 chan = container_of(work, struct ioat_chan_common, work.work);
207 ioat = container_of(chan, struct ioat_dma_chan, base);
208 spin_lock_bh(&chan->cleanup_lock);
209 spin_lock_bh(&ioat->desc_lock);
210
211 *chan->completion = 0;
212 ioat->pending = 0;
213
214 /* count the descriptors waiting */
215 dmacount = 0;
216 if (ioat->used_desc.prev) {
217 desc = to_ioat_desc(ioat->used_desc.prev);
218 do {
219 dmacount++;
220 desc = to_ioat_desc(desc->node.next);
221 } while (&desc->node != ioat->used_desc.next);
222 }
223
224 if (dmacount) {
225 /*
226 * write the new starting descriptor address
227 * this puts channel engine into ARMED state
228 */
229 desc = to_ioat_desc(ioat->used_desc.prev);
230 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
231 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
232 writel(((u64) desc->txd.phys) >> 32,
233 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
234
235 writeb(IOAT_CHANCMD_START, chan->reg_base
236 + IOAT_CHANCMD_OFFSET(chan->device->version));
237 } else
238 start_null = true;
239 spin_unlock_bh(&ioat->desc_lock);
240 spin_unlock_bh(&chan->cleanup_lock);
241
242 dev_err(to_dev(chan),
243 "chan%d reset - %d descs waiting, %d total desc\n",
244 chan_num(chan), dmacount, ioat->desccount);
245
246 if (start_null)
247 ioat1_dma_start_null_desc(ioat);
248}
249
250/**
251 * ioat1_reset_channel - restart a channel 199 * ioat1_reset_channel - restart a channel
252 * @ioat: IOAT DMA channel handle 200 * @ioat: IOAT DMA channel handle
253 */ 201 */
@@ -257,12 +205,9 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
257 void __iomem *reg_base = chan->reg_base; 205 void __iomem *reg_base = chan->reg_base;
258 u32 chansts, chanerr; 206 u32 chansts, chanerr;
259 207
260 if (!ioat->used_desc.prev) 208 dev_warn(to_dev(chan), "reset\n");
261 return;
262
263 dev_dbg(to_dev(chan), "%s\n", __func__);
264 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); 209 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
265 chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; 210 chansts = *chan->completion & IOAT_CHANSTS_STATUS;
266 if (chanerr) { 211 if (chanerr) {
267 dev_err(to_dev(chan), 212 dev_err(to_dev(chan),
268 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", 213 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
@@ -278,93 +223,11 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
278 * while we're waiting. 223 * while we're waiting.
279 */ 224 */
280 225
281 spin_lock_bh(&ioat->desc_lock);
282 ioat->pending = INT_MIN; 226 ioat->pending = INT_MIN;
283 writeb(IOAT_CHANCMD_RESET, 227 writeb(IOAT_CHANCMD_RESET,
284 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); 228 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
285 spin_unlock_bh(&ioat->desc_lock); 229 set_bit(IOAT_RESET_PENDING, &chan->state);
286 230 mod_timer(&chan->timer, jiffies + RESET_DELAY);
287 /* schedule the 2nd half instead of sleeping a long time */
288 schedule_delayed_work(&chan->work, RESET_DELAY);
289}
290
291/**
292 * ioat1_chan_watchdog - watch for stuck channels
293 */
294static void ioat1_chan_watchdog(struct work_struct *work)
295{
296 struct ioatdma_device *device =
297 container_of(work, struct ioatdma_device, work.work);
298 struct ioat_dma_chan *ioat;
299 struct ioat_chan_common *chan;
300 int i;
301 u64 completion;
302 u32 completion_low;
303 unsigned long compl_desc_addr_hw;
304
305 for (i = 0; i < device->common.chancnt; i++) {
306 chan = ioat_chan_by_index(device, i);
307 ioat = container_of(chan, struct ioat_dma_chan, base);
308
309 if (/* have we started processing anything yet */
310 chan->last_completion
311 /* have we completed any since last watchdog cycle? */
312 && (chan->last_completion == chan->watchdog_completion)
313 /* has TCP stuck on one cookie since last watchdog? */
314 && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie)
315 && (chan->watchdog_tcp_cookie != chan->completed_cookie)
316 /* is there something in the chain to be processed? */
317 /* CB1 chain always has at least the last one processed */
318 && (ioat->used_desc.prev != ioat->used_desc.next)
319 && ioat->pending == 0) {
320
321 /*
322 * check CHANSTS register for completed
323 * descriptor address.
324 * if it is different than completion writeback,
325 * it is not zero
326 * and it has changed since the last watchdog
327 * we can assume that channel
328 * is still working correctly
329 * and the problem is in completion writeback.
330 * update completion writeback
331 * with actual CHANSTS value
332 * else
333 * try resetting the channel
334 */
335
336 /* we need to read the low address first as this
337 * causes the chipset to latch the upper bits
338 * for the subsequent read
339 */
340 completion_low = readl(chan->reg_base +
341 IOAT_CHANSTS_OFFSET_LOW(chan->device->version));
342 completion = readl(chan->reg_base +
343 IOAT_CHANSTS_OFFSET_HIGH(chan->device->version));
344 completion <<= 32;
345 completion |= completion_low;
346 compl_desc_addr_hw = completion &
347 IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
348
349 if ((compl_desc_addr_hw != 0)
350 && (compl_desc_addr_hw != chan->watchdog_completion)
351 && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) {
352 chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
353 *chan->completion = completion;
354 } else {
355 ioat1_reset_channel(ioat);
356 chan->watchdog_completion = 0;
357 chan->last_compl_desc_addr_hw = 0;
358 }
359 } else {
360 chan->last_compl_desc_addr_hw = 0;
361 chan->watchdog_completion = chan->last_completion;
362 }
363
364 chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie;
365 }
366
367 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
368} 231}
369 232
370static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 233static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -372,6 +235,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
372 struct dma_chan *c = tx->chan; 235 struct dma_chan *c = tx->chan;
373 struct ioat_dma_chan *ioat = to_ioat_chan(c); 236 struct ioat_dma_chan *ioat = to_ioat_chan(c);
374 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); 237 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
238 struct ioat_chan_common *chan = &ioat->base;
375 struct ioat_desc_sw *first; 239 struct ioat_desc_sw *first;
376 struct ioat_desc_sw *chain_tail; 240 struct ioat_desc_sw *chain_tail;
377 dma_cookie_t cookie; 241 dma_cookie_t cookie;
@@ -396,6 +260,9 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
396 dump_desc_dbg(ioat, chain_tail); 260 dump_desc_dbg(ioat, chain_tail);
397 dump_desc_dbg(ioat, first); 261 dump_desc_dbg(ioat, first);
398 262
263 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
264 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
265
399 ioat->pending += desc->hw->tx_cnt; 266 ioat->pending += desc->hw->tx_cnt;
400 if (ioat->pending >= ioat_pending_level) 267 if (ioat->pending >= ioat_pending_level)
401 __ioat1_dma_memcpy_issue_pending(ioat); 268 __ioat1_dma_memcpy_issue_pending(ioat);
@@ -520,6 +387,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
520 return; 387 return;
521 388
522 tasklet_disable(&chan->cleanup_task); 389 tasklet_disable(&chan->cleanup_task);
390 del_timer_sync(&chan->timer);
523 ioat1_cleanup(ioat); 391 ioat1_cleanup(ioat);
524 392
525 /* Delay 100ms after reset to allow internal DMA logic to quiesce 393 /* Delay 100ms after reset to allow internal DMA logic to quiesce
@@ -560,9 +428,6 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
560 428
561 chan->last_completion = 0; 429 chan->last_completion = 0;
562 chan->completion_dma = 0; 430 chan->completion_dma = 0;
563 chan->watchdog_completion = 0;
564 chan->last_compl_desc_addr_hw = 0;
565 chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0;
566 ioat->pending = 0; 431 ioat->pending = 0;
567 ioat->desccount = 0; 432 ioat->desccount = 0;
568} 433}
@@ -705,15 +570,15 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
705 u64 completion; 570 u64 completion;
706 571
707 completion = *chan->completion; 572 completion = *chan->completion;
708 phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 573 phys_complete = ioat_chansts_to_addr(completion);
709 574
710 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, 575 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
711 (unsigned long long) phys_complete); 576 (unsigned long long) phys_complete);
712 577
713 if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == 578 if (is_ioat_halted(completion)) {
714 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { 579 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
715 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", 580 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
716 readl(chan->reg_base + IOAT_CHANERR_OFFSET)); 581 chanerr);
717 582
718 /* TODO do something to salvage the situation */ 583 /* TODO do something to salvage the situation */
719 } 584 }
@@ -721,48 +586,31 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
721 return phys_complete; 586 return phys_complete;
722} 587}
723 588
724/** 589bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
725 * ioat1_cleanup - cleanup up finished descriptors 590 unsigned long *phys_complete)
726 * @chan: ioat channel to be cleaned up
727 */
728static void ioat1_cleanup(struct ioat_dma_chan *ioat)
729{ 591{
730 struct ioat_chan_common *chan = &ioat->base; 592 *phys_complete = ioat_get_current_completion(chan);
731 unsigned long phys_complete; 593 if (*phys_complete == chan->last_completion)
732 struct ioat_desc_sw *desc, *_desc; 594 return false;
733 dma_cookie_t cookie = 0; 595 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
734 struct dma_async_tx_descriptor *tx; 596 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
735
736 prefetch(chan->completion);
737
738 if (!spin_trylock_bh(&chan->cleanup_lock))
739 return;
740 597
741 phys_complete = ioat_get_current_completion(chan); 598 return true;
742 if (phys_complete == chan->last_completion) { 599}
743 spin_unlock_bh(&chan->cleanup_lock);
744 /*
745 * perhaps we're stuck so hard that the watchdog can't go off?
746 * try to catch it after 2 seconds
747 */
748 if (time_after(jiffies,
749 chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
750 ioat1_chan_watchdog(&(chan->device->work.work));
751 chan->last_completion_time = jiffies;
752 }
753 return;
754 }
755 chan->last_completion_time = jiffies;
756 600
757 cookie = 0; 601static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
758 if (!spin_trylock_bh(&ioat->desc_lock)) { 602{
759 spin_unlock_bh(&chan->cleanup_lock); 603 struct ioat_chan_common *chan = &ioat->base;
760 return; 604 struct list_head *_desc, *n;
761 } 605 struct dma_async_tx_descriptor *tx;
762 606
763 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", 607 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
764 __func__, phys_complete); 608 __func__, phys_complete);
765 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { 609 list_for_each_safe(_desc, n, &ioat->used_desc) {
610 struct ioat_desc_sw *desc;
611
612 prefetch(n);
613 desc = list_entry(_desc, typeof(*desc), node);
766 tx = &desc->txd; 614 tx = &desc->txd;
767 /* 615 /*
768 * Incoming DMA requests may use multiple descriptors, 616 * Incoming DMA requests may use multiple descriptors,
@@ -771,7 +619,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
771 */ 619 */
772 dump_desc_dbg(ioat, desc); 620 dump_desc_dbg(ioat, desc);
773 if (tx->cookie) { 621 if (tx->cookie) {
774 cookie = tx->cookie; 622 chan->completed_cookie = tx->cookie;
623 tx->cookie = 0;
775 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 624 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
776 if (tx->callback) { 625 if (tx->callback) {
777 tx->callback(tx->callback_param); 626 tx->callback(tx->callback_param);
@@ -786,27 +635,110 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
786 */ 635 */
787 if (async_tx_test_ack(tx)) 636 if (async_tx_test_ack(tx))
788 list_move_tail(&desc->node, &ioat->free_desc); 637 list_move_tail(&desc->node, &ioat->free_desc);
789 else
790 tx->cookie = 0;
791 } else { 638 } else {
792 /* 639 /*
793 * last used desc. Do not remove, so we can 640 * last used desc. Do not remove, so we can
794 * append from it, but don't look at it next 641 * append from it.
795 * time, either
796 */ 642 */
797 tx->cookie = 0; 643
644 /* if nothing else is pending, cancel the
645 * completion timeout
646 */
647 if (n == &ioat->used_desc) {
648 dev_dbg(to_dev(chan),
649 "%s cancel completion timeout\n",
650 __func__);
651 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
652 }
798 653
799 /* TODO check status bits? */ 654 /* TODO check status bits? */
800 break; 655 break;
801 } 656 }
802 } 657 }
803 658
659 chan->last_completion = phys_complete;
660}
661
662/**
663 * ioat1_cleanup - cleanup up finished descriptors
664 * @chan: ioat channel to be cleaned up
665 *
666 * To prevent lock contention we defer cleanup when the locks are
667 * contended with a terminal timeout that forces cleanup and catches
668 * completion notification errors.
669 */
670static void ioat1_cleanup(struct ioat_dma_chan *ioat)
671{
672 struct ioat_chan_common *chan = &ioat->base;
673 unsigned long phys_complete;
674
675 prefetch(chan->completion);
676
677 if (!spin_trylock_bh(&chan->cleanup_lock))
678 return;
679
680 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
681 spin_unlock_bh(&chan->cleanup_lock);
682 return;
683 }
684
685 if (!spin_trylock_bh(&ioat->desc_lock)) {
686 spin_unlock_bh(&chan->cleanup_lock);
687 return;
688 }
689
690 __cleanup(ioat, phys_complete);
691
804 spin_unlock_bh(&ioat->desc_lock); 692 spin_unlock_bh(&ioat->desc_lock);
693 spin_unlock_bh(&chan->cleanup_lock);
694}
805 695
806 chan->last_completion = phys_complete; 696static void ioat1_timer_event(unsigned long data)
807 if (cookie != 0) 697{
808 chan->completed_cookie = cookie; 698 struct ioat_dma_chan *ioat = (void *) data;
699 struct ioat_chan_common *chan = &ioat->base;
809 700
701 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
702
703 spin_lock_bh(&chan->cleanup_lock);
704 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
705 struct ioat_desc_sw *desc;
706
707 spin_lock_bh(&ioat->desc_lock);
708
709 /* restart active descriptors */
710 desc = to_ioat_desc(ioat->used_desc.prev);
711 ioat_set_chainaddr(ioat, desc->txd.phys);
712 ioat_start(chan);
713
714 ioat->pending = 0;
715 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
716 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
717 spin_unlock_bh(&ioat->desc_lock);
718 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
719 unsigned long phys_complete;
720
721 spin_lock_bh(&ioat->desc_lock);
722 /* if we haven't made progress and we have already
723 * acknowledged a pending completion once, then be more
724 * forceful with a restart
725 */
726 if (ioat_cleanup_preamble(chan, &phys_complete))
727 __cleanup(ioat, phys_complete);
728 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
729 ioat1_reset_channel(ioat);
730 else {
731 u64 status = ioat_chansts(chan);
732
733 /* manually update the last completion address */
734 if (ioat_chansts_to_addr(status) != 0)
735 *chan->completion = status;
736
737 set_bit(IOAT_COMPLETION_ACK, &chan->state);
738 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
739 }
740 spin_unlock_bh(&ioat->desc_lock);
741 }
810 spin_unlock_bh(&chan->cleanup_lock); 742 spin_unlock_bh(&chan->cleanup_lock);
811} 743}
812 744
@@ -855,13 +787,8 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
855 list_add_tail(&desc->node, &ioat->used_desc); 787 list_add_tail(&desc->node, &ioat->used_desc);
856 dump_desc_dbg(ioat, desc); 788 dump_desc_dbg(ioat, desc);
857 789
858 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 790 ioat_set_chainaddr(ioat, desc->txd.phys);
859 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 791 ioat_start(chan);
860 writel(((u64) desc->txd.phys) >> 32,
861 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
862
863 writeb(IOAT_CHANCMD_START, chan->reg_base
864 + IOAT_CHANCMD_OFFSET(chan->device->version));
865 spin_unlock_bh(&ioat->desc_lock); 792 spin_unlock_bh(&ioat->desc_lock);
866} 793}
867 794
@@ -1194,9 +1121,6 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1194 if (dca) 1121 if (dca)
1195 device->dca = ioat_dca_init(pdev, device->reg_base); 1122 device->dca = ioat_dca_init(pdev, device->reg_base);
1196 1123
1197 INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog);
1198 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1199
1200 return err; 1124 return err;
1201} 1125}
1202 1126
@@ -1204,9 +1128,6 @@ void __devexit ioat_dma_remove(struct ioatdma_device *device)
1204{ 1128{
1205 struct dma_device *dma = &device->common; 1129 struct dma_device *dma = &device->common;
1206 1130
1207 if (device->version != IOAT_VER_3_0)
1208 cancel_delayed_work(&device->work);
1209
1210 ioat_disable_interrupts(device); 1131 ioat_disable_interrupts(device);
1211 1132
1212 dma_async_device_unregister(dma); 1133 dma_async_device_unregister(dma);