aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaciej Sosnowski <maciej.sosnowski@intel.com>2008-07-22 13:07:33 -0400
committerDan Williams <dan.j.williams@intel.com>2008-07-22 13:07:33 -0400
commit09177e85d6a0bffac8b55afd28ed8b82bd873f0b (patch)
treece3cb72509cc6c03bfc5d70a3ca0901a44b87fc0
parent2a46fa13d788364c093c4296fe01cae837aa8919 (diff)
I/OAT: Add watchdog/reset functionality to ioatdma
Due to occasional DMA channel hangs observed for I/OAT versions 1.2 and 2.0 a watchdog has been introduced to check every 2 seconds if all channels progress normally. If stuck channel is detected, driver resets it. The reset is done in two parts. The second part is scheduled by the first one to reinitialize the channel after the restart. Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/ioat_dma.c267
-rw-r--r--drivers/dma/ioatdma.h10
2 files changed, 271 insertions, 6 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 171cad69f318..da572968a7db 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -32,6 +32,7 @@
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/workqueue.h>
35#include "ioatdma.h" 36#include "ioatdma.h"
36#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
37#include "ioatdma_hw.h" 38#include "ioatdma_hw.h"
@@ -41,11 +42,17 @@
41#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 42#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 43#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 44
45#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
44static int ioat_pending_level = 4; 46static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644); 47module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level, 48MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)"); 49 "high-water mark for pushing ioat descriptors (default: 4)");
48 50
51#define RESET_DELAY msecs_to_jiffies(100)
52#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53static void ioat_dma_chan_reset_part2(struct work_struct *work);
54static void ioat_dma_chan_watchdog(struct work_struct *work);
55
49/* internal functions */ 56/* internal functions */
50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 57static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 58static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
@@ -137,6 +144,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 144 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap; 145 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0; 146 ioat_chan->desccount = 0;
147 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
140 if (ioat_chan->device->version != IOAT_VER_1_2) { 148 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE 149 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU, 150 | IOAT_DMA_DCA_ANY_CPU,
@@ -175,7 +183,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175{ 183{
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 184 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177 185
178 if (ioat_chan->pending != 0) { 186 if (ioat_chan->pending > 0) {
179 spin_lock_bh(&ioat_chan->desc_lock); 187 spin_lock_bh(&ioat_chan->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan); 188 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181 spin_unlock_bh(&ioat_chan->desc_lock); 189 spin_unlock_bh(&ioat_chan->desc_lock);
@@ -194,13 +202,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194{ 202{
195 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 203 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196 204
197 if (ioat_chan->pending != 0) { 205 if (ioat_chan->pending > 0) {
198 spin_lock_bh(&ioat_chan->desc_lock); 206 spin_lock_bh(&ioat_chan->desc_lock);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan); 207 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200 spin_unlock_bh(&ioat_chan->desc_lock); 208 spin_unlock_bh(&ioat_chan->desc_lock);
201 } 209 }
202} 210}
203 211
212
213/**
214 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
215 */
216static void ioat_dma_chan_reset_part2(struct work_struct *work)
217{
218 struct ioat_dma_chan *ioat_chan =
219 container_of(work, struct ioat_dma_chan, work.work);
220 struct ioat_desc_sw *desc;
221
222 spin_lock_bh(&ioat_chan->cleanup_lock);
223 spin_lock_bh(&ioat_chan->desc_lock);
224
225 ioat_chan->completion_virt->low = 0;
226 ioat_chan->completion_virt->high = 0;
227 ioat_chan->pending = 0;
228
229 /*
230 * count the descriptors waiting, and be sure to do it
231 * right for both the CB1 line and the CB2 ring
232 */
233 ioat_chan->dmacount = 0;
234 if (ioat_chan->used_desc.prev) {
235 desc = to_ioat_desc(ioat_chan->used_desc.prev);
236 do {
237 ioat_chan->dmacount++;
238 desc = to_ioat_desc(desc->node.next);
239 } while (&desc->node != ioat_chan->used_desc.next);
240 }
241
242 /*
243 * write the new starting descriptor address
244 * this puts channel engine into ARMED state
245 */
246 desc = to_ioat_desc(ioat_chan->used_desc.prev);
247 switch (ioat_chan->device->version) {
248 case IOAT_VER_1_2:
249 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
250 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
251 writel(((u64) desc->async_tx.phys) >> 32,
252 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
253
254 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
255 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
256 break;
257 case IOAT_VER_2_0:
258 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
259 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
260 writel(((u64) desc->async_tx.phys) >> 32,
261 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
262
263 /* tell the engine to go with what's left to be done */
264 writew(ioat_chan->dmacount,
265 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
266
267 break;
268 }
269 dev_err(&ioat_chan->device->pdev->dev,
270 "chan%d reset - %d descs waiting, %d total desc\n",
271 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
272
273 spin_unlock_bh(&ioat_chan->desc_lock);
274 spin_unlock_bh(&ioat_chan->cleanup_lock);
275}
276
277/**
278 * ioat_dma_reset_channel - restart a channel
279 * @ioat_chan: IOAT DMA channel handle
280 */
281static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
282{
283 u32 chansts, chanerr;
284
285 if (!ioat_chan->used_desc.prev)
286 return;
287
288 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
289 chansts = (ioat_chan->completion_virt->low
290 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
291 if (chanerr) {
292 dev_err(&ioat_chan->device->pdev->dev,
293 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
294 chan_num(ioat_chan), chansts, chanerr);
295 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
296 }
297
298 /*
299 * whack it upside the head with a reset
300 * and wait for things to settle out.
301 * force the pending count to a really big negative
302 * to make sure no one forces an issue_pending
303 * while we're waiting.
304 */
305
306 spin_lock_bh(&ioat_chan->desc_lock);
307 ioat_chan->pending = INT_MIN;
308 writeb(IOAT_CHANCMD_RESET,
309 ioat_chan->reg_base
310 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
311 spin_unlock_bh(&ioat_chan->desc_lock);
312
313 /* schedule the 2nd half instead of sleeping a long time */
314 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
315}
316
317/**
318 * ioat_dma_chan_watchdog - watch for stuck channels
319 */
320static void ioat_dma_chan_watchdog(struct work_struct *work)
321{
322 struct ioatdma_device *device =
323 container_of(work, struct ioatdma_device, work.work);
324 struct ioat_dma_chan *ioat_chan;
325 int i;
326
327 union {
328 u64 full;
329 struct {
330 u32 low;
331 u32 high;
332 };
333 } completion_hw;
334 unsigned long compl_desc_addr_hw;
335
336 for (i = 0; i < device->common.chancnt; i++) {
337 ioat_chan = ioat_lookup_chan_by_index(device, i);
338
339 if (ioat_chan->device->version == IOAT_VER_1_2
340 /* have we started processing anything yet */
341 && ioat_chan->last_completion
342 /* have we completed any since last watchdog cycle? */
343 && (ioat_chan->last_completion ==
344 ioat_chan->watchdog_completion)
345 /* has TCP stuck on one cookie since last watchdog? */
346 && (ioat_chan->watchdog_tcp_cookie ==
347 ioat_chan->watchdog_last_tcp_cookie)
348 && (ioat_chan->watchdog_tcp_cookie !=
349 ioat_chan->completed_cookie)
350 /* is there something in the chain to be processed? */
351 /* CB1 chain always has at least the last one processed */
352 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
353 && ioat_chan->pending == 0) {
354
355 /*
356 * check CHANSTS register for completed
357 * descriptor address.
358 * if it is different than completion writeback,
359 * it is not zero
360 * and it has changed since the last watchdog
361 * we can assume that channel
362 * is still working correctly
363 * and the problem is in completion writeback.
364 * update completion writeback
365 * with actual CHANSTS value
366 * else
367 * try resetting the channel
368 */
369
370 completion_hw.low = readl(ioat_chan->reg_base +
371 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
372 completion_hw.high = readl(ioat_chan->reg_base +
373 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
374#if (BITS_PER_LONG == 64)
375 compl_desc_addr_hw =
376 completion_hw.full
377 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
378#else
379 compl_desc_addr_hw =
380 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
381#endif
382
383 if ((compl_desc_addr_hw != 0)
384 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
385 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
386 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
387 ioat_chan->completion_virt->low = completion_hw.low;
388 ioat_chan->completion_virt->high = completion_hw.high;
389 } else {
390 ioat_dma_reset_channel(ioat_chan);
391 ioat_chan->watchdog_completion = 0;
392 ioat_chan->last_compl_desc_addr_hw = 0;
393 }
394
395 /*
396 * for version 2.0 if there are descriptors yet to be processed
397 * and the last completed hasn't changed since the last watchdog
398 * if they haven't hit the pending level
399 * issue the pending to push them through
400 * else
401 * try resetting the channel
402 */
403 } else if (ioat_chan->device->version == IOAT_VER_2_0
404 && ioat_chan->used_desc.prev
405 && ioat_chan->last_completion
406 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
407
408 if (ioat_chan->pending < ioat_pending_level)
409 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
410 else {
411 ioat_dma_reset_channel(ioat_chan);
412 ioat_chan->watchdog_completion = 0;
413 }
414 } else {
415 ioat_chan->last_compl_desc_addr_hw = 0;
416 ioat_chan->watchdog_completion
417 = ioat_chan->last_completion;
418 }
419
420 ioat_chan->watchdog_last_tcp_cookie =
421 ioat_chan->watchdog_tcp_cookie;
422 }
423
424 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
425}
426
204static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 427static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
205{ 428{
206 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 429 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -586,6 +809,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
586 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 809 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
587 ioat_chan->pending = 0; 810 ioat_chan->pending = 0;
588 ioat_chan->dmacount = 0; 811 ioat_chan->dmacount = 0;
812 ioat_chan->watchdog_completion = 0;
813 ioat_chan->last_compl_desc_addr_hw = 0;
814 ioat_chan->watchdog_tcp_cookie =
815 ioat_chan->watchdog_last_tcp_cookie = 0;
589} 816}
590 817
591/** 818/**
@@ -717,8 +944,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
717 new->src = dma_src; 944 new->src = dma_src;
718 new->async_tx.flags = flags; 945 new->async_tx.flags = flags;
719 return &new->async_tx; 946 return &new->async_tx;
720 } else 947 } else {
948 dev_err(&ioat_chan->device->pdev->dev,
949 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
950 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
721 return NULL; 951 return NULL;
952 }
722} 953}
723 954
724static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 955static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
@@ -745,8 +976,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
745 new->src = dma_src; 976 new->src = dma_src;
746 new->async_tx.flags = flags; 977 new->async_tx.flags = flags;
747 return &new->async_tx; 978 return &new->async_tx;
748 } else 979 } else {
980 spin_unlock_bh(&ioat_chan->desc_lock);
981 dev_err(&ioat_chan->device->pdev->dev,
982 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
983 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
749 return NULL; 984 return NULL;
985 }
750} 986}
751 987
752static void ioat_dma_cleanup_tasklet(unsigned long data) 988static void ioat_dma_cleanup_tasklet(unsigned long data)
@@ -821,11 +1057,25 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
821 1057
822 if (phys_complete == ioat_chan->last_completion) { 1058 if (phys_complete == ioat_chan->last_completion) {
823 spin_unlock_bh(&ioat_chan->cleanup_lock); 1059 spin_unlock_bh(&ioat_chan->cleanup_lock);
1060 /*
1061 * perhaps we're stuck so hard that the watchdog can't go off?
1062 * try to catch it after 2 seconds
1063 */
1064 if (time_after(jiffies,
1065 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1066 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1067 ioat_chan->last_completion_time = jiffies;
1068 }
824 return; 1069 return;
825 } 1070 }
1071 ioat_chan->last_completion_time = jiffies;
826 1072
827 cookie = 0; 1073 cookie = 0;
828 spin_lock_bh(&ioat_chan->desc_lock); 1074 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1075 spin_unlock_bh(&ioat_chan->cleanup_lock);
1076 return;
1077 }
1078
829 switch (ioat_chan->device->version) { 1079 switch (ioat_chan->device->version) {
830 case IOAT_VER_1_2: 1080 case IOAT_VER_1_2:
831 list_for_each_entry_safe(desc, _desc, 1081 list_for_each_entry_safe(desc, _desc,
@@ -942,6 +1192,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
942 1192
943 last_used = chan->cookie; 1193 last_used = chan->cookie;
944 last_complete = ioat_chan->completed_cookie; 1194 last_complete = ioat_chan->completed_cookie;
1195 ioat_chan->watchdog_tcp_cookie = cookie;
945 1196
946 if (done) 1197 if (done)
947 *done = last_complete; 1198 *done = last_complete;
@@ -1332,6 +1583,10 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1332 1583
1333 dma_async_device_register(&device->common); 1584 dma_async_device_register(&device->common);
1334 1585
1586 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1587 schedule_delayed_work(&device->work,
1588 WATCHDOG_DELAY);
1589
1335 return device; 1590 return device;
1336 1591
1337err_self_test: 1592err_self_test:
@@ -1364,6 +1619,8 @@ void ioat_dma_remove(struct ioatdma_device *device)
1364 pci_release_regions(device->pdev); 1619 pci_release_regions(device->pdev);
1365 pci_disable_device(device->pdev); 1620 pci_disable_device(device->pdev);
1366 1621
1622 cancel_delayed_work(&device->work);
1623
1367 list_for_each_entry_safe(chan, _chan, 1624 list_for_each_entry_safe(chan, _chan,
1368 &device->common.channels, device_node) { 1625 &device->common.channels, device_node) {
1369 ioat_chan = to_ioat_chan(chan); 1626 ioat_chan = to_ioat_chan(chan);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index f2c7fedbf009..c6ec933f9895 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -28,7 +28,7 @@
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/pci_ids.h> 29#include <linux/pci_ids.h>
30 30
31#define IOAT_DMA_VERSION "2.04" 31#define IOAT_DMA_VERSION "2.18"
32 32
33enum ioat_interrupt { 33enum ioat_interrupt {
34 none = 0, 34 none = 0,
@@ -40,6 +40,7 @@ enum ioat_interrupt {
40 40
41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
42#define IOAT_DMA_DCA_ANY_CPU ~0 42#define IOAT_DMA_DCA_ANY_CPU ~0
43#define IOAT_WATCHDOG_PERIOD (2 * HZ)
43 44
44 45
45/** 46/**
@@ -62,6 +63,7 @@ struct ioatdma_device {
62 struct dma_device common; 63 struct dma_device common;
63 u8 version; 64 u8 version;
64 enum ioat_interrupt irq_mode; 65 enum ioat_interrupt irq_mode;
66 struct delayed_work work;
65 struct msix_entry msix_entries[4]; 67 struct msix_entry msix_entries[4];
66 struct ioat_dma_chan *idx[4]; 68 struct ioat_dma_chan *idx[4];
67}; 69};
@@ -75,6 +77,7 @@ struct ioat_dma_chan {
75 77
76 dma_cookie_t completed_cookie; 78 dma_cookie_t completed_cookie;
77 unsigned long last_completion; 79 unsigned long last_completion;
80 unsigned long last_completion_time;
78 81
79 size_t xfercap; /* XFERCAP register value expanded out */ 82 size_t xfercap; /* XFERCAP register value expanded out */
80 83
@@ -82,6 +85,10 @@ struct ioat_dma_chan {
82 spinlock_t desc_lock; 85 spinlock_t desc_lock;
83 struct list_head free_desc; 86 struct list_head free_desc;
84 struct list_head used_desc; 87 struct list_head used_desc;
88 unsigned long watchdog_completion;
89 int watchdog_tcp_cookie;
90 u32 watchdog_last_tcp_cookie;
91 struct delayed_work work;
85 92
86 int pending; 93 int pending;
87 int dmacount; 94 int dmacount;
@@ -98,6 +105,7 @@ struct ioat_dma_chan {
98 u32 high; 105 u32 high;
99 }; 106 };
100 } *completion_virt; 107 } *completion_virt;
108 unsigned long last_compl_desc_addr_hw;
101 struct tasklet_struct cleanup_task; 109 struct tasklet_struct cleanup_task;
102}; 110};
103 111