diff options
-rw-r--r-- | drivers/dma/ioat/dma.c | 351 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 112 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 321 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 10 | ||||
-rw-r--r-- | drivers/dma/ioat/registers.h | 22 | ||||
-rw-r--r-- | drivers/idle/i7300_idle.c | 16 |
6 files changed, 396 insertions, 436 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f59b6f42f866..17a518d0386f 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -99,23 +99,26 @@ static void ioat1_cleanup_tasklet(unsigned long data); | |||
99 | /* common channel initialization */ | 99 | /* common channel initialization */ |
100 | void ioat_init_channel(struct ioatdma_device *device, | 100 | void ioat_init_channel(struct ioatdma_device *device, |
101 | struct ioat_chan_common *chan, int idx, | 101 | struct ioat_chan_common *chan, int idx, |
102 | work_func_t work_fn, void (*tasklet)(unsigned long), | 102 | void (*timer_fn)(unsigned long), |
103 | unsigned long tasklet_data) | 103 | void (*tasklet)(unsigned long), |
104 | unsigned long ioat) | ||
104 | { | 105 | { |
105 | struct dma_device *dma = &device->common; | 106 | struct dma_device *dma = &device->common; |
106 | 107 | ||
107 | chan->device = device; | 108 | chan->device = device; |
108 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | 109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); |
109 | INIT_DELAYED_WORK(&chan->work, work_fn); | ||
110 | spin_lock_init(&chan->cleanup_lock); | 110 | spin_lock_init(&chan->cleanup_lock); |
111 | chan->common.device = dma; | 111 | chan->common.device = dma; |
112 | list_add_tail(&chan->common.device_node, &dma->channels); | 112 | list_add_tail(&chan->common.device_node, &dma->channels); |
113 | device->idx[idx] = chan; | 113 | device->idx[idx] = chan; |
114 | tasklet_init(&chan->cleanup_task, tasklet, tasklet_data); | 114 | init_timer(&chan->timer); |
115 | chan->timer.function = timer_fn; | ||
116 | chan->timer.data = ioat; | ||
117 | tasklet_init(&chan->cleanup_task, tasklet, ioat); | ||
115 | tasklet_disable(&chan->cleanup_task); | 118 | tasklet_disable(&chan->cleanup_task); |
116 | } | 119 | } |
117 | 120 | ||
118 | static void ioat1_reset_part2(struct work_struct *work); | 121 | static void ioat1_timer_event(unsigned long data); |
119 | 122 | ||
120 | /** | 123 | /** |
121 | * ioat1_dma_enumerate_channels - find and initialize the device's channels | 124 | * ioat1_dma_enumerate_channels - find and initialize the device's channels |
@@ -153,7 +156,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) | |||
153 | break; | 156 | break; |
154 | 157 | ||
155 | ioat_init_channel(device, &ioat->base, i, | 158 | ioat_init_channel(device, &ioat->base, i, |
156 | ioat1_reset_part2, | 159 | ioat1_timer_event, |
157 | ioat1_cleanup_tasklet, | 160 | ioat1_cleanup_tasklet, |
158 | (unsigned long) ioat); | 161 | (unsigned long) ioat); |
159 | ioat->xfercap = xfercap; | 162 | ioat->xfercap = xfercap; |
@@ -193,61 +196,6 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |||
193 | } | 196 | } |
194 | 197 | ||
195 | /** | 198 | /** |
196 | * ioat1_reset_part2 - reinit the channel after a reset | ||
197 | */ | ||
198 | static void ioat1_reset_part2(struct work_struct *work) | ||
199 | { | ||
200 | struct ioat_chan_common *chan; | ||
201 | struct ioat_dma_chan *ioat; | ||
202 | struct ioat_desc_sw *desc; | ||
203 | int dmacount; | ||
204 | bool start_null = false; | ||
205 | |||
206 | chan = container_of(work, struct ioat_chan_common, work.work); | ||
207 | ioat = container_of(chan, struct ioat_dma_chan, base); | ||
208 | spin_lock_bh(&chan->cleanup_lock); | ||
209 | spin_lock_bh(&ioat->desc_lock); | ||
210 | |||
211 | *chan->completion = 0; | ||
212 | ioat->pending = 0; | ||
213 | |||
214 | /* count the descriptors waiting */ | ||
215 | dmacount = 0; | ||
216 | if (ioat->used_desc.prev) { | ||
217 | desc = to_ioat_desc(ioat->used_desc.prev); | ||
218 | do { | ||
219 | dmacount++; | ||
220 | desc = to_ioat_desc(desc->node.next); | ||
221 | } while (&desc->node != ioat->used_desc.next); | ||
222 | } | ||
223 | |||
224 | if (dmacount) { | ||
225 | /* | ||
226 | * write the new starting descriptor address | ||
227 | * this puts channel engine into ARMED state | ||
228 | */ | ||
229 | desc = to_ioat_desc(ioat->used_desc.prev); | ||
230 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, | ||
231 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
232 | writel(((u64) desc->txd.phys) >> 32, | ||
233 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
234 | |||
235 | writeb(IOAT_CHANCMD_START, chan->reg_base | ||
236 | + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
237 | } else | ||
238 | start_null = true; | ||
239 | spin_unlock_bh(&ioat->desc_lock); | ||
240 | spin_unlock_bh(&chan->cleanup_lock); | ||
241 | |||
242 | dev_err(to_dev(chan), | ||
243 | "chan%d reset - %d descs waiting, %d total desc\n", | ||
244 | chan_num(chan), dmacount, ioat->desccount); | ||
245 | |||
246 | if (start_null) | ||
247 | ioat1_dma_start_null_desc(ioat); | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * ioat1_reset_channel - restart a channel | 199 | * ioat1_reset_channel - restart a channel |
252 | * @ioat: IOAT DMA channel handle | 200 | * @ioat: IOAT DMA channel handle |
253 | */ | 201 | */ |
@@ -257,12 +205,9 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) | |||
257 | void __iomem *reg_base = chan->reg_base; | 205 | void __iomem *reg_base = chan->reg_base; |
258 | u32 chansts, chanerr; | 206 | u32 chansts, chanerr; |
259 | 207 | ||
260 | if (!ioat->used_desc.prev) | 208 | dev_warn(to_dev(chan), "reset\n"); |
261 | return; | ||
262 | |||
263 | dev_dbg(to_dev(chan), "%s\n", __func__); | ||
264 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); | 209 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); |
265 | chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 210 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; |
266 | if (chanerr) { | 211 | if (chanerr) { |
267 | dev_err(to_dev(chan), | 212 | dev_err(to_dev(chan), |
268 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | 213 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", |
@@ -278,93 +223,11 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) | |||
278 | * while we're waiting. | 223 | * while we're waiting. |
279 | */ | 224 | */ |
280 | 225 | ||
281 | spin_lock_bh(&ioat->desc_lock); | ||
282 | ioat->pending = INT_MIN; | 226 | ioat->pending = INT_MIN; |
283 | writeb(IOAT_CHANCMD_RESET, | 227 | writeb(IOAT_CHANCMD_RESET, |
284 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | 228 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); |
285 | spin_unlock_bh(&ioat->desc_lock); | 229 | set_bit(IOAT_RESET_PENDING, &chan->state); |
286 | 230 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | |
287 | /* schedule the 2nd half instead of sleeping a long time */ | ||
288 | schedule_delayed_work(&chan->work, RESET_DELAY); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * ioat1_chan_watchdog - watch for stuck channels | ||
293 | */ | ||
294 | static void ioat1_chan_watchdog(struct work_struct *work) | ||
295 | { | ||
296 | struct ioatdma_device *device = | ||
297 | container_of(work, struct ioatdma_device, work.work); | ||
298 | struct ioat_dma_chan *ioat; | ||
299 | struct ioat_chan_common *chan; | ||
300 | int i; | ||
301 | u64 completion; | ||
302 | u32 completion_low; | ||
303 | unsigned long compl_desc_addr_hw; | ||
304 | |||
305 | for (i = 0; i < device->common.chancnt; i++) { | ||
306 | chan = ioat_chan_by_index(device, i); | ||
307 | ioat = container_of(chan, struct ioat_dma_chan, base); | ||
308 | |||
309 | if (/* have we started processing anything yet */ | ||
310 | chan->last_completion | ||
311 | /* have we completed any since last watchdog cycle? */ | ||
312 | && (chan->last_completion == chan->watchdog_completion) | ||
313 | /* has TCP stuck on one cookie since last watchdog? */ | ||
314 | && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) | ||
315 | && (chan->watchdog_tcp_cookie != chan->completed_cookie) | ||
316 | /* is there something in the chain to be processed? */ | ||
317 | /* CB1 chain always has at least the last one processed */ | ||
318 | && (ioat->used_desc.prev != ioat->used_desc.next) | ||
319 | && ioat->pending == 0) { | ||
320 | |||
321 | /* | ||
322 | * check CHANSTS register for completed | ||
323 | * descriptor address. | ||
324 | * if it is different than completion writeback, | ||
325 | * it is not zero | ||
326 | * and it has changed since the last watchdog | ||
327 | * we can assume that channel | ||
328 | * is still working correctly | ||
329 | * and the problem is in completion writeback. | ||
330 | * update completion writeback | ||
331 | * with actual CHANSTS value | ||
332 | * else | ||
333 | * try resetting the channel | ||
334 | */ | ||
335 | |||
336 | /* we need to read the low address first as this | ||
337 | * causes the chipset to latch the upper bits | ||
338 | * for the subsequent read | ||
339 | */ | ||
340 | completion_low = readl(chan->reg_base + | ||
341 | IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); | ||
342 | completion = readl(chan->reg_base + | ||
343 | IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); | ||
344 | completion <<= 32; | ||
345 | completion |= completion_low; | ||
346 | compl_desc_addr_hw = completion & | ||
347 | IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
348 | |||
349 | if ((compl_desc_addr_hw != 0) | ||
350 | && (compl_desc_addr_hw != chan->watchdog_completion) | ||
351 | && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { | ||
352 | chan->last_compl_desc_addr_hw = compl_desc_addr_hw; | ||
353 | *chan->completion = completion; | ||
354 | } else { | ||
355 | ioat1_reset_channel(ioat); | ||
356 | chan->watchdog_completion = 0; | ||
357 | chan->last_compl_desc_addr_hw = 0; | ||
358 | } | ||
359 | } else { | ||
360 | chan->last_compl_desc_addr_hw = 0; | ||
361 | chan->watchdog_completion = chan->last_completion; | ||
362 | } | ||
363 | |||
364 | chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; | ||
365 | } | ||
366 | |||
367 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
368 | } | 231 | } |
369 | 232 | ||
370 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | 233 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -372,6 +235,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
372 | struct dma_chan *c = tx->chan; | 235 | struct dma_chan *c = tx->chan; |
373 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | 236 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
374 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); | 237 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); |
238 | struct ioat_chan_common *chan = &ioat->base; | ||
375 | struct ioat_desc_sw *first; | 239 | struct ioat_desc_sw *first; |
376 | struct ioat_desc_sw *chain_tail; | 240 | struct ioat_desc_sw *chain_tail; |
377 | dma_cookie_t cookie; | 241 | dma_cookie_t cookie; |
@@ -396,6 +260,9 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
396 | dump_desc_dbg(ioat, chain_tail); | 260 | dump_desc_dbg(ioat, chain_tail); |
397 | dump_desc_dbg(ioat, first); | 261 | dump_desc_dbg(ioat, first); |
398 | 262 | ||
263 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
264 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
265 | |||
399 | ioat->pending += desc->hw->tx_cnt; | 266 | ioat->pending += desc->hw->tx_cnt; |
400 | if (ioat->pending >= ioat_pending_level) | 267 | if (ioat->pending >= ioat_pending_level) |
401 | __ioat1_dma_memcpy_issue_pending(ioat); | 268 | __ioat1_dma_memcpy_issue_pending(ioat); |
@@ -520,6 +387,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) | |||
520 | return; | 387 | return; |
521 | 388 | ||
522 | tasklet_disable(&chan->cleanup_task); | 389 | tasklet_disable(&chan->cleanup_task); |
390 | del_timer_sync(&chan->timer); | ||
523 | ioat1_cleanup(ioat); | 391 | ioat1_cleanup(ioat); |
524 | 392 | ||
525 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | 393 | /* Delay 100ms after reset to allow internal DMA logic to quiesce |
@@ -560,9 +428,6 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) | |||
560 | 428 | ||
561 | chan->last_completion = 0; | 429 | chan->last_completion = 0; |
562 | chan->completion_dma = 0; | 430 | chan->completion_dma = 0; |
563 | chan->watchdog_completion = 0; | ||
564 | chan->last_compl_desc_addr_hw = 0; | ||
565 | chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; | ||
566 | ioat->pending = 0; | 431 | ioat->pending = 0; |
567 | ioat->desccount = 0; | 432 | ioat->desccount = 0; |
568 | } | 433 | } |
@@ -705,15 +570,15 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | |||
705 | u64 completion; | 570 | u64 completion; |
706 | 571 | ||
707 | completion = *chan->completion; | 572 | completion = *chan->completion; |
708 | phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | 573 | phys_complete = ioat_chansts_to_addr(completion); |
709 | 574 | ||
710 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, | 575 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, |
711 | (unsigned long long) phys_complete); | 576 | (unsigned long long) phys_complete); |
712 | 577 | ||
713 | if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | 578 | if (is_ioat_halted(completion)) { |
714 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { | 579 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
715 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", | 580 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", |
716 | readl(chan->reg_base + IOAT_CHANERR_OFFSET)); | 581 | chanerr); |
717 | 582 | ||
718 | /* TODO do something to salvage the situation */ | 583 | /* TODO do something to salvage the situation */ |
719 | } | 584 | } |
@@ -721,48 +586,31 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | |||
721 | return phys_complete; | 586 | return phys_complete; |
722 | } | 587 | } |
723 | 588 | ||
724 | /** | 589 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
725 | * ioat1_cleanup - cleanup up finished descriptors | 590 | unsigned long *phys_complete) |
726 | * @chan: ioat channel to be cleaned up | ||
727 | */ | ||
728 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | ||
729 | { | 591 | { |
730 | struct ioat_chan_common *chan = &ioat->base; | 592 | *phys_complete = ioat_get_current_completion(chan); |
731 | unsigned long phys_complete; | 593 | if (*phys_complete == chan->last_completion) |
732 | struct ioat_desc_sw *desc, *_desc; | 594 | return false; |
733 | dma_cookie_t cookie = 0; | 595 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); |
734 | struct dma_async_tx_descriptor *tx; | 596 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
735 | |||
736 | prefetch(chan->completion); | ||
737 | |||
738 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
739 | return; | ||
740 | 597 | ||
741 | phys_complete = ioat_get_current_completion(chan); | 598 | return true; |
742 | if (phys_complete == chan->last_completion) { | 599 | } |
743 | spin_unlock_bh(&chan->cleanup_lock); | ||
744 | /* | ||
745 | * perhaps we're stuck so hard that the watchdog can't go off? | ||
746 | * try to catch it after 2 seconds | ||
747 | */ | ||
748 | if (time_after(jiffies, | ||
749 | chan->last_completion_time + HZ*WATCHDOG_DELAY)) { | ||
750 | ioat1_chan_watchdog(&(chan->device->work.work)); | ||
751 | chan->last_completion_time = jiffies; | ||
752 | } | ||
753 | return; | ||
754 | } | ||
755 | chan->last_completion_time = jiffies; | ||
756 | 600 | ||
757 | cookie = 0; | 601 | static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) |
758 | if (!spin_trylock_bh(&ioat->desc_lock)) { | 602 | { |
759 | spin_unlock_bh(&chan->cleanup_lock); | 603 | struct ioat_chan_common *chan = &ioat->base; |
760 | return; | 604 | struct list_head *_desc, *n; |
761 | } | 605 | struct dma_async_tx_descriptor *tx; |
762 | 606 | ||
763 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", | 607 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", |
764 | __func__, phys_complete); | 608 | __func__, phys_complete); |
765 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { | 609 | list_for_each_safe(_desc, n, &ioat->used_desc) { |
610 | struct ioat_desc_sw *desc; | ||
611 | |||
612 | prefetch(n); | ||
613 | desc = list_entry(_desc, typeof(*desc), node); | ||
766 | tx = &desc->txd; | 614 | tx = &desc->txd; |
767 | /* | 615 | /* |
768 | * Incoming DMA requests may use multiple descriptors, | 616 | * Incoming DMA requests may use multiple descriptors, |
@@ -771,7 +619,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |||
771 | */ | 619 | */ |
772 | dump_desc_dbg(ioat, desc); | 620 | dump_desc_dbg(ioat, desc); |
773 | if (tx->cookie) { | 621 | if (tx->cookie) { |
774 | cookie = tx->cookie; | 622 | chan->completed_cookie = tx->cookie; |
623 | tx->cookie = 0; | ||
775 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 624 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
776 | if (tx->callback) { | 625 | if (tx->callback) { |
777 | tx->callback(tx->callback_param); | 626 | tx->callback(tx->callback_param); |
@@ -786,27 +635,110 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |||
786 | */ | 635 | */ |
787 | if (async_tx_test_ack(tx)) | 636 | if (async_tx_test_ack(tx)) |
788 | list_move_tail(&desc->node, &ioat->free_desc); | 637 | list_move_tail(&desc->node, &ioat->free_desc); |
789 | else | ||
790 | tx->cookie = 0; | ||
791 | } else { | 638 | } else { |
792 | /* | 639 | /* |
793 | * last used desc. Do not remove, so we can | 640 | * last used desc. Do not remove, so we can |
794 | * append from it, but don't look at it next | 641 | * append from it. |
795 | * time, either | ||
796 | */ | 642 | */ |
797 | tx->cookie = 0; | 643 | |
644 | /* if nothing else is pending, cancel the | ||
645 | * completion timeout | ||
646 | */ | ||
647 | if (n == &ioat->used_desc) { | ||
648 | dev_dbg(to_dev(chan), | ||
649 | "%s cancel completion timeout\n", | ||
650 | __func__); | ||
651 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
652 | } | ||
798 | 653 | ||
799 | /* TODO check status bits? */ | 654 | /* TODO check status bits? */ |
800 | break; | 655 | break; |
801 | } | 656 | } |
802 | } | 657 | } |
803 | 658 | ||
659 | chan->last_completion = phys_complete; | ||
660 | } | ||
661 | |||
662 | /** | ||
663 | * ioat1_cleanup - cleanup up finished descriptors | ||
664 | * @chan: ioat channel to be cleaned up | ||
665 | * | ||
666 | * To prevent lock contention we defer cleanup when the locks are | ||
667 | * contended with a terminal timeout that forces cleanup and catches | ||
668 | * completion notification errors. | ||
669 | */ | ||
670 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | ||
671 | { | ||
672 | struct ioat_chan_common *chan = &ioat->base; | ||
673 | unsigned long phys_complete; | ||
674 | |||
675 | prefetch(chan->completion); | ||
676 | |||
677 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
678 | return; | ||
679 | |||
680 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
681 | spin_unlock_bh(&chan->cleanup_lock); | ||
682 | return; | ||
683 | } | ||
684 | |||
685 | if (!spin_trylock_bh(&ioat->desc_lock)) { | ||
686 | spin_unlock_bh(&chan->cleanup_lock); | ||
687 | return; | ||
688 | } | ||
689 | |||
690 | __cleanup(ioat, phys_complete); | ||
691 | |||
804 | spin_unlock_bh(&ioat->desc_lock); | 692 | spin_unlock_bh(&ioat->desc_lock); |
693 | spin_unlock_bh(&chan->cleanup_lock); | ||
694 | } | ||
805 | 695 | ||
806 | chan->last_completion = phys_complete; | 696 | static void ioat1_timer_event(unsigned long data) |
807 | if (cookie != 0) | 697 | { |
808 | chan->completed_cookie = cookie; | 698 | struct ioat_dma_chan *ioat = (void *) data; |
699 | struct ioat_chan_common *chan = &ioat->base; | ||
809 | 700 | ||
701 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); | ||
702 | |||
703 | spin_lock_bh(&chan->cleanup_lock); | ||
704 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | ||
705 | struct ioat_desc_sw *desc; | ||
706 | |||
707 | spin_lock_bh(&ioat->desc_lock); | ||
708 | |||
709 | /* restart active descriptors */ | ||
710 | desc = to_ioat_desc(ioat->used_desc.prev); | ||
711 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
712 | ioat_start(chan); | ||
713 | |||
714 | ioat->pending = 0; | ||
715 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
716 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
717 | spin_unlock_bh(&ioat->desc_lock); | ||
718 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
719 | unsigned long phys_complete; | ||
720 | |||
721 | spin_lock_bh(&ioat->desc_lock); | ||
722 | /* if we haven't made progress and we have already | ||
723 | * acknowledged a pending completion once, then be more | ||
724 | * forceful with a restart | ||
725 | */ | ||
726 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
727 | __cleanup(ioat, phys_complete); | ||
728 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
729 | ioat1_reset_channel(ioat); | ||
730 | else { | ||
731 | u64 status = ioat_chansts(chan); | ||
732 | |||
733 | /* manually update the last completion address */ | ||
734 | if (ioat_chansts_to_addr(status) != 0) | ||
735 | *chan->completion = status; | ||
736 | |||
737 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
738 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
739 | } | ||
740 | spin_unlock_bh(&ioat->desc_lock); | ||
741 | } | ||
810 | spin_unlock_bh(&chan->cleanup_lock); | 742 | spin_unlock_bh(&chan->cleanup_lock); |
811 | } | 743 | } |
812 | 744 | ||
@@ -855,13 +787,8 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | |||
855 | list_add_tail(&desc->node, &ioat->used_desc); | 787 | list_add_tail(&desc->node, &ioat->used_desc); |
856 | dump_desc_dbg(ioat, desc); | 788 | dump_desc_dbg(ioat, desc); |
857 | 789 | ||
858 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, | 790 | ioat_set_chainaddr(ioat, desc->txd.phys); |
859 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | 791 | ioat_start(chan); |
860 | writel(((u64) desc->txd.phys) >> 32, | ||
861 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
862 | |||
863 | writeb(IOAT_CHANCMD_START, chan->reg_base | ||
864 | + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
865 | spin_unlock_bh(&ioat->desc_lock); | 792 | spin_unlock_bh(&ioat->desc_lock); |
866 | } | 793 | } |
867 | 794 | ||
@@ -1194,9 +1121,6 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) | |||
1194 | if (dca) | 1121 | if (dca) |
1195 | device->dca = ioat_dca_init(pdev, device->reg_base); | 1122 | device->dca = ioat_dca_init(pdev, device->reg_base); |
1196 | 1123 | ||
1197 | INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog); | ||
1198 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
1199 | |||
1200 | return err; | 1124 | return err; |
1201 | } | 1125 | } |
1202 | 1126 | ||
@@ -1204,9 +1128,6 @@ void __devexit ioat_dma_remove(struct ioatdma_device *device) | |||
1204 | { | 1128 | { |
1205 | struct dma_device *dma = &device->common; | 1129 | struct dma_device *dma = &device->common; |
1206 | 1130 | ||
1207 | if (device->version != IOAT_VER_3_0) | ||
1208 | cancel_delayed_work(&device->work); | ||
1209 | |||
1210 | ioat_disable_interrupts(device); | 1131 | ioat_disable_interrupts(device); |
1211 | 1132 | ||
1212 | dma_async_device_unregister(dma); | 1133 | dma_async_device_unregister(dma); |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index ec851cf5345c..dbfccac3e80c 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include "hw.h" | 25 | #include "hw.h" |
26 | #include "registers.h" | ||
26 | #include <linux/init.h> | 27 | #include <linux/init.h> |
27 | #include <linux/dmapool.h> | 28 | #include <linux/dmapool.h> |
28 | #include <linux/cache.h> | 29 | #include <linux/cache.h> |
@@ -33,7 +34,6 @@ | |||
33 | 34 | ||
34 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | 35 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 |
35 | #define IOAT_DMA_DCA_ANY_CPU ~0 | 36 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
36 | #define IOAT_WATCHDOG_PERIOD (2 * HZ) | ||
37 | 37 | ||
38 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | 38 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) |
39 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | 39 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
@@ -42,9 +42,6 @@ | |||
42 | 42 | ||
43 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | 43 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) |
44 | 44 | ||
45 | #define RESET_DELAY msecs_to_jiffies(100) | ||
46 | #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) | ||
47 | |||
48 | /* | 45 | /* |
49 | * workaround for IOAT ver.3.0 null descriptor issue | 46 | * workaround for IOAT ver.3.0 null descriptor issue |
50 | * (channel returns error when size is 0) | 47 | * (channel returns error when size is 0) |
@@ -72,7 +69,6 @@ struct ioatdma_device { | |||
72 | struct pci_pool *completion_pool; | 69 | struct pci_pool *completion_pool; |
73 | struct dma_device common; | 70 | struct dma_device common; |
74 | u8 version; | 71 | u8 version; |
75 | struct delayed_work work; | ||
76 | struct msix_entry msix_entries[4]; | 72 | struct msix_entry msix_entries[4]; |
77 | struct ioat_chan_common *idx[4]; | 73 | struct ioat_chan_common *idx[4]; |
78 | struct dca_provider *dca; | 74 | struct dca_provider *dca; |
@@ -81,24 +77,21 @@ struct ioatdma_device { | |||
81 | }; | 77 | }; |
82 | 78 | ||
83 | struct ioat_chan_common { | 79 | struct ioat_chan_common { |
80 | struct dma_chan common; | ||
84 | void __iomem *reg_base; | 81 | void __iomem *reg_base; |
85 | |||
86 | unsigned long last_completion; | 82 | unsigned long last_completion; |
87 | unsigned long last_completion_time; | ||
88 | |||
89 | spinlock_t cleanup_lock; | 83 | spinlock_t cleanup_lock; |
90 | dma_cookie_t completed_cookie; | 84 | dma_cookie_t completed_cookie; |
91 | unsigned long watchdog_completion; | 85 | unsigned long state; |
92 | int watchdog_tcp_cookie; | 86 | #define IOAT_COMPLETION_PENDING 0 |
93 | u32 watchdog_last_tcp_cookie; | 87 | #define IOAT_COMPLETION_ACK 1 |
94 | struct delayed_work work; | 88 | #define IOAT_RESET_PENDING 2 |
95 | 89 | struct timer_list timer; | |
90 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | ||
91 | #define RESET_DELAY msecs_to_jiffies(100) | ||
96 | struct ioatdma_device *device; | 92 | struct ioatdma_device *device; |
97 | struct dma_chan common; | ||
98 | |||
99 | dma_addr_t completion_dma; | 93 | dma_addr_t completion_dma; |
100 | u64 *completion; | 94 | u64 *completion; |
101 | unsigned long last_compl_desc_addr_hw; | ||
102 | struct tasklet_struct cleanup_task; | 95 | struct tasklet_struct cleanup_task; |
103 | }; | 96 | }; |
104 | 97 | ||
@@ -148,7 +141,6 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, | |||
148 | 141 | ||
149 | last_used = c->cookie; | 142 | last_used = c->cookie; |
150 | last_complete = chan->completed_cookie; | 143 | last_complete = chan->completed_cookie; |
151 | chan->watchdog_tcp_cookie = cookie; | ||
152 | 144 | ||
153 | if (done) | 145 | if (done) |
154 | *done = last_complete; | 146 | *done = last_complete; |
@@ -215,6 +207,85 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) | |||
215 | return device->idx[index]; | 207 | return device->idx[index]; |
216 | } | 208 | } |
217 | 209 | ||
210 | static inline u64 ioat_chansts(struct ioat_chan_common *chan) | ||
211 | { | ||
212 | u8 ver = chan->device->version; | ||
213 | u64 status; | ||
214 | u32 status_lo; | ||
215 | |||
216 | /* We need to read the low address first as this causes the | ||
217 | * chipset to latch the upper bits for the subsequent read | ||
218 | */ | ||
219 | status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); | ||
220 | status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); | ||
221 | status <<= 32; | ||
222 | status |= status_lo; | ||
223 | |||
224 | return status; | ||
225 | } | ||
226 | |||
227 | static inline void ioat_start(struct ioat_chan_common *chan) | ||
228 | { | ||
229 | u8 ver = chan->device->version; | ||
230 | |||
231 | writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
232 | } | ||
233 | |||
234 | static inline u64 ioat_chansts_to_addr(u64 status) | ||
235 | { | ||
236 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
237 | } | ||
238 | |||
239 | static inline u32 ioat_chanerr(struct ioat_chan_common *chan) | ||
240 | { | ||
241 | return readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
242 | } | ||
243 | |||
244 | static inline void ioat_suspend(struct ioat_chan_common *chan) | ||
245 | { | ||
246 | u8 ver = chan->device->version; | ||
247 | |||
248 | writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
249 | } | ||
250 | |||
251 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) | ||
252 | { | ||
253 | struct ioat_chan_common *chan = &ioat->base; | ||
254 | |||
255 | writel(addr & 0x00000000FFFFFFFF, | ||
256 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
257 | writel(addr >> 32, | ||
258 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
259 | } | ||
260 | |||
261 | static inline bool is_ioat_active(unsigned long status) | ||
262 | { | ||
263 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | ||
264 | } | ||
265 | |||
266 | static inline bool is_ioat_idle(unsigned long status) | ||
267 | { | ||
268 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); | ||
269 | } | ||
270 | |||
271 | static inline bool is_ioat_halted(unsigned long status) | ||
272 | { | ||
273 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); | ||
274 | } | ||
275 | |||
276 | static inline bool is_ioat_suspended(unsigned long status) | ||
277 | { | ||
278 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); | ||
279 | } | ||
280 | |||
281 | /* channel was fatally programmed */ | ||
282 | static inline bool is_ioat_bug(unsigned long err) | ||
283 | { | ||
284 | return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| | ||
285 | IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| | ||
286 | IOAT_CHANERR_LENGTH_ERR)); | ||
287 | } | ||
288 | |||
218 | int __devinit ioat_probe(struct ioatdma_device *device); | 289 | int __devinit ioat_probe(struct ioatdma_device *device); |
219 | int __devinit ioat_register(struct ioatdma_device *device); | 290 | int __devinit ioat_register(struct ioatdma_device *device); |
220 | int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); | 291 | int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); |
@@ -224,8 +295,11 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, | |||
224 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); | 295 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); |
225 | void ioat_init_channel(struct ioatdma_device *device, | 296 | void ioat_init_channel(struct ioatdma_device *device, |
226 | struct ioat_chan_common *chan, int idx, | 297 | struct ioat_chan_common *chan, int idx, |
227 | work_func_t work_fn, void (*tasklet)(unsigned long), | 298 | void (*timer_fn)(unsigned long), |
228 | unsigned long tasklet_data); | 299 | void (*tasklet)(unsigned long), |
300 | unsigned long ioat); | ||
229 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | 301 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
230 | size_t len, struct ioat_dma_descriptor *hw); | 302 | size_t len, struct ioat_dma_descriptor *hw); |
303 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | ||
304 | unsigned long *phys_complete); | ||
231 | #endif /* IOATDMA_H */ | 305 | #endif /* IOATDMA_H */ |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 1aa2974e7a93..72e59a0d0f2e 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -49,7 +49,7 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) | |||
49 | void * __iomem reg_base = ioat->base.reg_base; | 49 | void * __iomem reg_base = ioat->base.reg_base; |
50 | 50 | ||
51 | ioat->pending = 0; | 51 | ioat->pending = 0; |
52 | ioat->dmacount += ioat2_ring_pending(ioat); | 52 | ioat->dmacount += ioat2_ring_pending(ioat);; |
53 | ioat->issued = ioat->head; | 53 | ioat->issued = ioat->head; |
54 | /* make descriptor updates globally visible before notifying channel */ | 54 | /* make descriptor updates globally visible before notifying channel */ |
55 | wmb(); | 55 | wmb(); |
@@ -92,7 +92,6 @@ static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | |||
92 | 92 | ||
93 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | 93 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) |
94 | { | 94 | { |
95 | void __iomem *reg_base = ioat->base.reg_base; | ||
96 | struct ioat_ring_ent *desc; | 95 | struct ioat_ring_ent *desc; |
97 | struct ioat_dma_descriptor *hw; | 96 | struct ioat_dma_descriptor *hw; |
98 | int idx; | 97 | int idx; |
@@ -118,10 +117,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
118 | hw->src_addr = 0; | 117 | hw->src_addr = 0; |
119 | hw->dst_addr = 0; | 118 | hw->dst_addr = 0; |
120 | async_tx_ack(&desc->txd); | 119 | async_tx_ack(&desc->txd); |
121 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, | 120 | ioat2_set_chainaddr(ioat, desc->txd.phys); |
122 | reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
123 | writel(((u64) desc->txd.phys) >> 32, | ||
124 | reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
125 | dump_desc_dbg(ioat, desc); | 121 | dump_desc_dbg(ioat, desc); |
126 | __ioat2_issue_pending(ioat); | 122 | __ioat2_issue_pending(ioat); |
127 | } | 123 | } |
@@ -133,177 +129,14 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
133 | spin_unlock_bh(&ioat->ring_lock); | 129 | spin_unlock_bh(&ioat->ring_lock); |
134 | } | 130 | } |
135 | 131 | ||
136 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat); | 132 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
137 | |||
138 | /** | ||
139 | * ioat2_reset_part2 - reinit the channel after a reset | ||
140 | */ | ||
141 | static void ioat2_reset_part2(struct work_struct *work) | ||
142 | { | ||
143 | struct ioat_chan_common *chan; | ||
144 | struct ioat2_dma_chan *ioat; | ||
145 | |||
146 | chan = container_of(work, struct ioat_chan_common, work.work); | ||
147 | ioat = container_of(chan, struct ioat2_dma_chan, base); | ||
148 | |||
149 | /* ensure that ->tail points to the stalled descriptor | ||
150 | * (ioat->pending is set to 2 at this point so no new | ||
151 | * descriptors will be issued while we perform this cleanup) | ||
152 | */ | ||
153 | ioat2_cleanup(ioat); | ||
154 | |||
155 | spin_lock_bh(&chan->cleanup_lock); | ||
156 | spin_lock_bh(&ioat->ring_lock); | ||
157 | |||
158 | /* set the tail to be re-issued */ | ||
159 | ioat->issued = ioat->tail; | ||
160 | ioat->dmacount = 0; | ||
161 | |||
162 | dev_dbg(to_dev(&ioat->base), | ||
163 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | ||
164 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | ||
165 | |||
166 | if (ioat2_ring_pending(ioat)) { | ||
167 | struct ioat_ring_ent *desc; | ||
168 | |||
169 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | ||
170 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, | ||
171 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
172 | writel(((u64) desc->txd.phys) >> 32, | ||
173 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
174 | __ioat2_issue_pending(ioat); | ||
175 | } else | ||
176 | __ioat2_start_null_desc(ioat); | ||
177 | |||
178 | spin_unlock_bh(&ioat->ring_lock); | ||
179 | spin_unlock_bh(&chan->cleanup_lock); | ||
180 | |||
181 | dev_info(to_dev(chan), | ||
182 | "chan%d reset - %d descs waiting, %d total desc\n", | ||
183 | chan_num(chan), ioat->dmacount, 1 << ioat->alloc_order); | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * ioat2_reset_channel - restart a channel | ||
188 | * @ioat: IOAT DMA channel handle | ||
189 | */ | ||
190 | static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) | ||
191 | { | 133 | { |
192 | u32 chansts, chanerr; | ||
193 | struct ioat_chan_common *chan = &ioat->base; | 134 | struct ioat_chan_common *chan = &ioat->base; |
194 | u16 active; | 135 | struct dma_async_tx_descriptor *tx; |
195 | |||
196 | spin_lock_bh(&ioat->ring_lock); | ||
197 | active = ioat2_ring_active(ioat); | ||
198 | spin_unlock_bh(&ioat->ring_lock); | ||
199 | if (!active) | ||
200 | return; | ||
201 | |||
202 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
203 | chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; | ||
204 | if (chanerr) { | ||
205 | dev_err(to_dev(chan), | ||
206 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
207 | chan_num(chan), chansts, chanerr); | ||
208 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
209 | } | ||
210 | |||
211 | spin_lock_bh(&ioat->ring_lock); | ||
212 | ioat->pending = 2; | ||
213 | writeb(IOAT_CHANCMD_RESET, | ||
214 | chan->reg_base | ||
215 | + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
216 | spin_unlock_bh(&ioat->ring_lock); | ||
217 | schedule_delayed_work(&chan->work, RESET_DELAY); | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * ioat2_chan_watchdog - watch for stuck channels | ||
222 | */ | ||
223 | static void ioat2_chan_watchdog(struct work_struct *work) | ||
224 | { | ||
225 | struct ioatdma_device *device = | ||
226 | container_of(work, struct ioatdma_device, work.work); | ||
227 | struct ioat2_dma_chan *ioat; | ||
228 | struct ioat_chan_common *chan; | ||
229 | u16 active; | ||
230 | int i; | ||
231 | |||
232 | dev_dbg(&device->pdev->dev, "%s\n", __func__); | ||
233 | |||
234 | for (i = 0; i < device->common.chancnt; i++) { | ||
235 | chan = ioat_chan_by_index(device, i); | ||
236 | ioat = container_of(chan, struct ioat2_dma_chan, base); | ||
237 | |||
238 | /* | ||
239 | * for version 2.0 if there are descriptors yet to be processed | ||
240 | * and the last completed hasn't changed since the last watchdog | ||
241 | * if they haven't hit the pending level | ||
242 | * issue the pending to push them through | ||
243 | * else | ||
244 | * try resetting the channel | ||
245 | */ | ||
246 | spin_lock_bh(&ioat->ring_lock); | ||
247 | active = ioat2_ring_active(ioat); | ||
248 | spin_unlock_bh(&ioat->ring_lock); | ||
249 | |||
250 | if (active && | ||
251 | chan->last_completion && | ||
252 | chan->last_completion == chan->watchdog_completion) { | ||
253 | |||
254 | if (ioat->pending == 1) | ||
255 | ioat2_issue_pending(&chan->common); | ||
256 | else { | ||
257 | ioat2_reset_channel(ioat); | ||
258 | chan->watchdog_completion = 0; | ||
259 | } | ||
260 | } else { | ||
261 | chan->last_compl_desc_addr_hw = 0; | ||
262 | chan->watchdog_completion = chan->last_completion; | ||
263 | } | ||
264 | chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; | ||
265 | } | ||
266 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | ||
271 | * @chan: ioat channel to be cleaned up | ||
272 | */ | ||
273 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | ||
274 | { | ||
275 | struct ioat_chan_common *chan = &ioat->base; | ||
276 | unsigned long phys_complete; | ||
277 | struct ioat_ring_ent *desc; | 136 | struct ioat_ring_ent *desc; |
278 | bool seen_current = false; | 137 | bool seen_current = false; |
279 | u16 active; | 138 | u16 active; |
280 | int i; | 139 | int i; |
281 | struct dma_async_tx_descriptor *tx; | ||
282 | |||
283 | prefetch(chan->completion); | ||
284 | |||
285 | spin_lock_bh(&chan->cleanup_lock); | ||
286 | phys_complete = ioat_get_current_completion(chan); | ||
287 | if (phys_complete == chan->last_completion) { | ||
288 | spin_unlock_bh(&chan->cleanup_lock); | ||
289 | /* | ||
290 | * perhaps we're stuck so hard that the watchdog can't go off? | ||
291 | * try to catch it after WATCHDOG_DELAY seconds | ||
292 | */ | ||
293 | if (chan->device->version < IOAT_VER_3_0) { | ||
294 | unsigned long tmo; | ||
295 | |||
296 | tmo = chan->last_completion_time + HZ*WATCHDOG_DELAY; | ||
297 | if (time_after(jiffies, tmo)) { | ||
298 | ioat2_chan_watchdog(&(chan->device->work.work)); | ||
299 | chan->last_completion_time = jiffies; | ||
300 | } | ||
301 | } | ||
302 | return; | ||
303 | } | ||
304 | chan->last_completion_time = jiffies; | ||
305 | |||
306 | spin_lock_bh(&ioat->ring_lock); | ||
307 | 140 | ||
308 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | 141 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
309 | __func__, ioat->head, ioat->tail, ioat->issued); | 142 | __func__, ioat->head, ioat->tail, ioat->issued); |
@@ -329,10 +162,42 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |||
329 | } | 162 | } |
330 | ioat->tail += i; | 163 | ioat->tail += i; |
331 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | 164 | BUG_ON(!seen_current); /* no active descs have written a completion? */ |
332 | spin_unlock_bh(&ioat->ring_lock); | ||
333 | 165 | ||
334 | chan->last_completion = phys_complete; | 166 | chan->last_completion = phys_complete; |
167 | if (ioat->head == ioat->tail) { | ||
168 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | ||
169 | __func__); | ||
170 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | ||
176 | * @chan: ioat channel to be cleaned up | ||
177 | */ | ||
178 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | ||
179 | { | ||
180 | struct ioat_chan_common *chan = &ioat->base; | ||
181 | unsigned long phys_complete; | ||
335 | 182 | ||
183 | prefetch(chan->completion); | ||
184 | |||
185 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
186 | return; | ||
187 | |||
188 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
189 | spin_unlock_bh(&chan->cleanup_lock); | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | if (!spin_trylock_bh(&ioat->ring_lock)) { | ||
194 | spin_unlock_bh(&chan->cleanup_lock); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | __cleanup(ioat, phys_complete); | ||
199 | |||
200 | spin_unlock_bh(&ioat->ring_lock); | ||
336 | spin_unlock_bh(&chan->cleanup_lock); | 201 | spin_unlock_bh(&chan->cleanup_lock); |
337 | } | 202 | } |
338 | 203 | ||
@@ -344,6 +209,90 @@ static void ioat2_cleanup_tasklet(unsigned long data) | |||
344 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | 209 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
345 | } | 210 | } |
346 | 211 | ||
212 | static void __restart_chan(struct ioat2_dma_chan *ioat) | ||
213 | { | ||
214 | struct ioat_chan_common *chan = &ioat->base; | ||
215 | |||
216 | /* set the tail to be re-issued */ | ||
217 | ioat->issued = ioat->tail; | ||
218 | ioat->dmacount = 0; | ||
219 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
220 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
221 | |||
222 | dev_dbg(to_dev(chan), | ||
223 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | ||
224 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | ||
225 | |||
226 | if (ioat2_ring_pending(ioat)) { | ||
227 | struct ioat_ring_ent *desc; | ||
228 | |||
229 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | ||
230 | ioat2_set_chainaddr(ioat, desc->txd.phys); | ||
231 | __ioat2_issue_pending(ioat); | ||
232 | } else | ||
233 | __ioat2_start_null_desc(ioat); | ||
234 | } | ||
235 | |||
236 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||
237 | { | ||
238 | struct ioat_chan_common *chan = &ioat->base; | ||
239 | unsigned long phys_complete; | ||
240 | u32 status; | ||
241 | |||
242 | status = ioat_chansts(chan); | ||
243 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
244 | ioat_suspend(chan); | ||
245 | while (is_ioat_active(status) || is_ioat_idle(status)) { | ||
246 | status = ioat_chansts(chan); | ||
247 | cpu_relax(); | ||
248 | } | ||
249 | |||
250 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
251 | __cleanup(ioat, phys_complete); | ||
252 | |||
253 | __restart_chan(ioat); | ||
254 | } | ||
255 | |||
256 | static void ioat2_timer_event(unsigned long data) | ||
257 | { | ||
258 | struct ioat2_dma_chan *ioat = (void *) data; | ||
259 | struct ioat_chan_common *chan = &ioat->base; | ||
260 | |||
261 | spin_lock_bh(&chan->cleanup_lock); | ||
262 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
263 | unsigned long phys_complete; | ||
264 | u64 status; | ||
265 | |||
266 | spin_lock_bh(&ioat->ring_lock); | ||
267 | status = ioat_chansts(chan); | ||
268 | |||
269 | /* when halted due to errors check for channel | ||
270 | * programming errors before advancing the completion state | ||
271 | */ | ||
272 | if (is_ioat_halted(status)) { | ||
273 | u32 chanerr; | ||
274 | |||
275 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
276 | BUG_ON(is_ioat_bug(chanerr)); | ||
277 | } | ||
278 | |||
279 | /* if we haven't made progress and we have already | ||
280 | * acknowledged a pending completion once, then be more | ||
281 | * forceful with a restart | ||
282 | */ | ||
283 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
284 | __cleanup(ioat, phys_complete); | ||
285 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
286 | ioat2_restart_channel(ioat); | ||
287 | else { | ||
288 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
289 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
290 | } | ||
291 | spin_unlock_bh(&ioat->ring_lock); | ||
292 | } | ||
293 | spin_unlock_bh(&chan->cleanup_lock); | ||
294 | } | ||
295 | |||
347 | /** | 296 | /** |
348 | * ioat2_enumerate_channels - find and initialize the device's channels | 297 | * ioat2_enumerate_channels - find and initialize the device's channels |
349 | * @device: the device to be enumerated | 298 | * @device: the device to be enumerated |
@@ -381,7 +330,7 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) | |||
381 | break; | 330 | break; |
382 | 331 | ||
383 | ioat_init_channel(device, &ioat->base, i, | 332 | ioat_init_channel(device, &ioat->base, i, |
384 | ioat2_reset_part2, | 333 | ioat2_timer_event, |
385 | ioat2_cleanup_tasklet, | 334 | ioat2_cleanup_tasklet, |
386 | (unsigned long) ioat); | 335 | (unsigned long) ioat); |
387 | ioat->xfercap_log = xfercap_log; | 336 | ioat->xfercap_log = xfercap_log; |
@@ -395,6 +344,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
395 | { | 344 | { |
396 | struct dma_chan *c = tx->chan; | 345 | struct dma_chan *c = tx->chan; |
397 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 346 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
347 | struct ioat_chan_common *chan = &ioat->base; | ||
398 | dma_cookie_t cookie = c->cookie; | 348 | dma_cookie_t cookie = c->cookie; |
399 | 349 | ||
400 | cookie++; | 350 | cookie++; |
@@ -404,6 +354,8 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
404 | c->cookie = cookie; | 354 | c->cookie = cookie; |
405 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 355 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
406 | 356 | ||
357 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
358 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
407 | ioat2_update_pending(ioat); | 359 | ioat2_update_pending(ioat); |
408 | spin_unlock_bh(&ioat->ring_lock); | 360 | spin_unlock_bh(&ioat->ring_lock); |
409 | 361 | ||
@@ -543,9 +495,18 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d | |||
543 | ioat->issued); | 495 | ioat->issued); |
544 | spin_unlock_bh(&ioat->ring_lock); | 496 | spin_unlock_bh(&ioat->ring_lock); |
545 | 497 | ||
546 | /* do direct reclaim in the allocation failure case */ | 498 | /* progress reclaim in the allocation failure case we |
547 | ioat2_cleanup(ioat); | 499 | * may be called under bh_disabled so we need to trigger |
548 | 500 | * the timer event directly | |
501 | */ | ||
502 | spin_lock_bh(&chan->cleanup_lock); | ||
503 | if (jiffies > chan->timer.expires && | ||
504 | timer_pending(&chan->timer)) { | ||
505 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
506 | spin_unlock_bh(&chan->cleanup_lock); | ||
507 | ioat2_timer_event((unsigned long) ioat); | ||
508 | } else | ||
509 | spin_unlock_bh(&chan->cleanup_lock); | ||
549 | return -ENOMEM; | 510 | return -ENOMEM; |
550 | } | 511 | } |
551 | 512 | ||
@@ -624,6 +585,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) | |||
624 | return; | 585 | return; |
625 | 586 | ||
626 | tasklet_disable(&chan->cleanup_task); | 587 | tasklet_disable(&chan->cleanup_task); |
588 | del_timer_sync(&chan->timer); | ||
627 | ioat2_cleanup(ioat); | 589 | ioat2_cleanup(ioat); |
628 | 590 | ||
629 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | 591 | /* Delay 100ms after reset to allow internal DMA logic to quiesce |
@@ -663,10 +625,6 @@ static void ioat2_free_chan_resources(struct dma_chan *c) | |||
663 | chan->completion_dma = 0; | 625 | chan->completion_dma = 0; |
664 | ioat->pending = 0; | 626 | ioat->pending = 0; |
665 | ioat->dmacount = 0; | 627 | ioat->dmacount = 0; |
666 | chan->watchdog_completion = 0; | ||
667 | chan->last_compl_desc_addr_hw = 0; | ||
668 | chan->watchdog_tcp_cookie = 0; | ||
669 | chan->watchdog_last_tcp_cookie = 0; | ||
670 | } | 628 | } |
671 | 629 | ||
672 | static enum dma_status | 630 | static enum dma_status |
@@ -716,9 +674,6 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | |||
716 | if (dca) | 674 | if (dca) |
717 | device->dca = ioat2_dca_init(pdev, device->reg_base); | 675 | device->dca = ioat2_dca_init(pdev, device->reg_base); |
718 | 676 | ||
719 | INIT_DELAYED_WORK(&device->work, ioat2_chan_watchdog); | ||
720 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
721 | |||
722 | return err; | 677 | return err; |
723 | } | 678 | } |
724 | 679 | ||
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index bdde5373cf66..73b04a2eb4b0 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -127,6 +127,16 @@ ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) | |||
127 | return ioat->ring[idx & ioat2_ring_mask(ioat)]; | 127 | return ioat->ring[idx & ioat2_ring_mask(ioat)]; |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | ||
131 | { | ||
132 | struct ioat_chan_common *chan = &ioat->base; | ||
133 | |||
134 | writel(addr & 0x00000000FFFFFFFF, | ||
135 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
136 | writel(addr >> 32, | ||
137 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
138 | } | ||
139 | |||
130 | int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); | 140 | int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
131 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 141 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); |
132 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 142 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 4380f6fbf056..e4334a195380 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -101,11 +101,11 @@ | |||
101 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) | 101 | #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) |
102 | #define IOAT_CHANSTS_SOFT_ERR 0x10ULL | 102 | #define IOAT_CHANSTS_SOFT_ERR 0x10ULL |
103 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL | 103 | #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL |
104 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x7ULL | 104 | #define IOAT_CHANSTS_STATUS 0x7ULL |
105 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 | 105 | #define IOAT_CHANSTS_ACTIVE 0x0 |
106 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 | 106 | #define IOAT_CHANSTS_DONE 0x1 |
107 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 | 107 | #define IOAT_CHANSTS_SUSPENDED 0x2 |
108 | #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 | 108 | #define IOAT_CHANSTS_HALTED 0x3 |
109 | 109 | ||
110 | 110 | ||
111 | 111 | ||
@@ -208,18 +208,18 @@ | |||
208 | #define IOAT_CDAR_OFFSET_HIGH 0x24 | 208 | #define IOAT_CDAR_OFFSET_HIGH 0x24 |
209 | 209 | ||
210 | #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ | 210 | #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ |
211 | #define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 | 211 | #define IOAT_CHANERR_SRC_ADDR_ERR 0x0001 |
212 | #define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 | 212 | #define IOAT_CHANERR_DEST_ADDR_ERR 0x0002 |
213 | #define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 | 213 | #define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004 |
214 | #define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 | 214 | #define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008 |
215 | #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 | 215 | #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 |
216 | #define IOAT_CHANERR_CHANCMD_ERR 0x0020 | 216 | #define IOAT_CHANERR_CHANCMD_ERR 0x0020 |
217 | #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 | 217 | #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 |
218 | #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 | 218 | #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 |
219 | #define IOAT_CHANERR_READ_DATA_ERR 0x0100 | 219 | #define IOAT_CHANERR_READ_DATA_ERR 0x0100 |
220 | #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 | 220 | #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 |
221 | #define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 | 221 | #define IOAT_CHANERR_CONTROL_ERR 0x0400 |
222 | #define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 | 222 | #define IOAT_CHANERR_LENGTH_ERR 0x0800 |
223 | #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 | 223 | #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 |
224 | #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 | 224 | #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 |
225 | #define IOAT_CHANERR_SOFT_ERR 0x4000 | 225 | #define IOAT_CHANERR_SOFT_ERR 0x4000 |
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index f2ec7243549e..1f20a042a4f5 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c | |||
@@ -126,9 +126,9 @@ static void i7300_idle_ioat_stop(void) | |||
126 | udelay(10); | 126 | udelay(10); |
127 | 127 | ||
128 | sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 128 | sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
129 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 129 | IOAT_CHANSTS_STATUS; |
130 | 130 | ||
131 | if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) | 131 | if (sts != IOAT_CHANSTS_ACTIVE) |
132 | break; | 132 | break; |
133 | 133 | ||
134 | } | 134 | } |
@@ -160,9 +160,9 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl, | |||
160 | udelay(1000); | 160 | udelay(1000); |
161 | 161 | ||
162 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 162 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
163 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 163 | IOAT_CHANSTS_STATUS; |
164 | 164 | ||
165 | if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) { | 165 | if (chan_sts != IOAT_CHANSTS_DONE) { |
166 | /* Not complete, reset the channel */ | 166 | /* Not complete, reset the channel */ |
167 | writeb(IOAT_CHANCMD_RESET, | 167 | writeb(IOAT_CHANCMD_RESET, |
168 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); | 168 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); |
@@ -288,9 +288,9 @@ static void __exit i7300_idle_ioat_exit(void) | |||
288 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); | 288 | ioat_chanbase + IOAT1_CHANCMD_OFFSET); |
289 | 289 | ||
290 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 290 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
291 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 291 | IOAT_CHANSTS_STATUS; |
292 | 292 | ||
293 | if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { | 293 | if (chan_sts != IOAT_CHANSTS_ACTIVE) { |
294 | writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); | 294 | writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); |
295 | break; | 295 | break; |
296 | } | 296 | } |
@@ -298,14 +298,14 @@ static void __exit i7300_idle_ioat_exit(void) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & | 300 | chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & |
301 | IOAT_CHANSTS_DMA_TRANSFER_STATUS; | 301 | IOAT_CHANSTS_STATUS; |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * We tried to reset multiple times. If IO A/T channel is still active | 304 | * We tried to reset multiple times. If IO A/T channel is still active |
305 | * flag an error and return without cleanup. Memory leak is better | 305 | * flag an error and return without cleanup. Memory leak is better |
306 | * than random corruption in that extreme error situation. | 306 | * than random corruption in that extreme error situation. |
307 | */ | 307 | */ |
308 | if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { | 308 | if (chan_sts == IOAT_CHANSTS_ACTIVE) { |
309 | printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels." | 309 | printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels." |
310 | " Not freeing resources\n"); | 310 | " Not freeing resources\n"); |
311 | return; | 311 | return; |