aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h18
-rw-r--r--drivers/dma/ioat/dma_v2.c69
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c60
-rw-r--r--drivers/dma/ioat/registers.h1
6 files changed, 118 insertions, 34 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c524d36d3c2e..dcc4ab78b32b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
1032 dma->dev = &pdev->dev; 1032 dma->dev = &pdev->dev;
1033 1033
1034 if (!dma->chancnt) { 1034 if (!dma->chancnt) {
1035 dev_err(dev, "zero channels detected\n"); 1035 dev_err(dev, "channel enumeration error\n");
1036 goto err_setup_interrupts; 1036 goto err_setup_interrupts;
1037 } 1037 }
1038 1038
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 45edde996480..bbc3e78ef333 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,7 @@
60 * @dca: direct cache access context 60 * @dca: direct cache access context
61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) 61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
62 * @enumerate_channels: hw version specific channel enumeration 62 * @enumerate_channels: hw version specific channel enumeration
63 * @reset_hw: hw version specific channel (re)initialization
63 * @cleanup_tasklet: select between the v2 and v3 cleanup routines 64 * @cleanup_tasklet: select between the v2 and v3 cleanup routines
64 * @timer_fn: select between the v2 and v3 timer watchdog routines 65 * @timer_fn: select between the v2 and v3 timer watchdog routines
65 * @self_test: hardware version specific self test for each supported op type 66 * @self_test: hardware version specific self test for each supported op type
@@ -78,6 +79,7 @@ struct ioatdma_device {
78 struct dca_provider *dca; 79 struct dca_provider *dca;
79 void (*intr_quirk)(struct ioatdma_device *device); 80 void (*intr_quirk)(struct ioatdma_device *device);
80 int (*enumerate_channels)(struct ioatdma_device *device); 81 int (*enumerate_channels)(struct ioatdma_device *device);
82 int (*reset_hw)(struct ioat_chan_common *chan);
81 void (*cleanup_tasklet)(unsigned long data); 83 void (*cleanup_tasklet)(unsigned long data);
82 void (*timer_fn)(unsigned long data); 84 void (*timer_fn)(unsigned long data);
83 int (*self_test)(struct ioatdma_device *device); 85 int (*self_test)(struct ioatdma_device *device);
@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
264 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 266 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
265} 267}
266 268
269static inline void ioat_reset(struct ioat_chan_common *chan)
270{
271 u8 ver = chan->device->version;
272
273 writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
274}
275
276static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
277{
278 u8 ver = chan->device->version;
279 u8 cmd;
280
281 cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
282 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
283}
284
267static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) 285static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
268{ 286{
269 struct ioat_chan_common *chan = &ioat->base; 287 struct ioat_chan_common *chan = &ioat->base;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8f1f7f05deaa..5f7a500e18d0 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
239 __ioat2_start_null_desc(ioat); 239 __ioat2_start_null_desc(ioat);
240} 240}
241 241
242static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 242int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
243{ 243{
244 struct ioat_chan_common *chan = &ioat->base; 244 unsigned long end = jiffies + tmo;
245 unsigned long phys_complete; 245 int err = 0;
246 u32 status; 246 u32 status;
247 247
248 status = ioat_chansts(chan); 248 status = ioat_chansts(chan);
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (end && time_after(jiffies, end)) {
253 err = -ETIMEDOUT;
254 break;
255 }
252 status = ioat_chansts(chan); 256 status = ioat_chansts(chan);
253 cpu_relax(); 257 cpu_relax();
254 } 258 }
255 259
260 return err;
261}
262
263int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
264{
265 unsigned long end = jiffies + tmo;
266 int err = 0;
267
268 ioat_reset(chan);
269 while (ioat_reset_pending(chan)) {
270 if (end && time_after(jiffies, end)) {
271 err = -ETIMEDOUT;
272 break;
273 }
274 cpu_relax();
275 }
276
277 return err;
278}
279
280static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
281{
282 struct ioat_chan_common *chan = &ioat->base;
283 unsigned long phys_complete;
284
285 ioat2_quiesce(chan, 0);
256 if (ioat_cleanup_preamble(chan, &phys_complete)) 286 if (ioat_cleanup_preamble(chan, &phys_complete))
257 __cleanup(ioat, phys_complete); 287 __cleanup(ioat, phys_complete);
258 288
@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
318 spin_unlock_bh(&chan->cleanup_lock); 348 spin_unlock_bh(&chan->cleanup_lock);
319} 349}
320 350
351static int ioat2_reset_hw(struct ioat_chan_common *chan)
352{
353 /* throw away whatever the channel was doing and get it initialized */
354 u32 chanerr;
355
356 ioat2_quiesce(chan, msecs_to_jiffies(100));
357
358 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
359 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
360
361 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
362}
363
321/** 364/**
322 * ioat2_enumerate_channels - find and initialize the device's channels 365 * ioat2_enumerate_channels - find and initialize the device's channels
323 * @device: the device to be enumerated 366 * @device: the device to be enumerated
@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
360 (unsigned long) ioat); 403 (unsigned long) ioat);
361 ioat->xfercap_log = xfercap_log; 404 ioat->xfercap_log = xfercap_log;
362 spin_lock_init(&ioat->ring_lock); 405 spin_lock_init(&ioat->ring_lock);
406 if (device->reset_hw(&ioat->base)) {
407 i = 0;
408 break;
409 }
363 } 410 }
364 dma->chancnt = i; 411 dma->chancnt = i;
365 return i; 412 return i;
@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
467 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 514 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
468 struct ioat_chan_common *chan = &ioat->base; 515 struct ioat_chan_common *chan = &ioat->base;
469 struct ioat_ring_ent **ring; 516 struct ioat_ring_ent **ring;
470 u32 chanerr;
471 int order; 517 int order;
472 518
473 /* have we already been set up? */ 519 /* have we already been set up? */
@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
477 /* Setup register to interrupt and write completion status on error */ 523 /* Setup register to interrupt and write completion status on error */
478 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); 524 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
479 525
480 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
481 if (chanerr) {
482 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
483 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
484 }
485
486 /* allocate a completion writeback area */ 526 /* allocate a completion writeback area */
487 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 527 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
488 chan->completion = pci_pool_alloc(chan->device->completion_pool, 528 chan->completion = pci_pool_alloc(chan->device->completion_pool,
@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
746 tasklet_disable(&chan->cleanup_task); 786 tasklet_disable(&chan->cleanup_task);
747 del_timer_sync(&chan->timer); 787 del_timer_sync(&chan->timer);
748 device->cleanup_tasklet((unsigned long) ioat); 788 device->cleanup_tasklet((unsigned long) ioat);
749 789 device->reset_hw(chan);
750 /* Delay 100ms after reset to allow internal DMA logic to quiesce
751 * before removing DMA descriptor resources.
752 */
753 writeb(IOAT_CHANCMD_RESET,
754 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
755 mdelay(100);
756 790
757 spin_lock_bh(&ioat->ring_lock); 791 spin_lock_bh(&ioat->ring_lock);
758 descs = ioat2_ring_space(ioat); 792 descs = ioat2_ring_space(ioat);
@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
839 int err; 873 int err;
840 874
841 device->enumerate_channels = ioat2_enumerate_channels; 875 device->enumerate_channels = ioat2_enumerate_channels;
876 device->reset_hw = ioat2_reset_hw;
842 device->cleanup_tasklet = ioat2_cleanup_tasklet; 877 device->cleanup_tasklet = ioat2_cleanup_tasklet;
843 device->timer_fn = ioat2_timer_event; 878 device->timer_fn = ioat2_timer_event;
844 device->self_test = ioat_dma_self_test; 879 device->self_test = ioat_dma_self_test;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 1d849ef74d5f..3afad8da43cc 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); 185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data); 186void ioat2_cleanup_tasklet(unsigned long data);
187void ioat2_timer_event(unsigned long data); 187void ioat2_timer_event(unsigned long data);
188int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
189int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
188extern struct kobj_type ioat2_ktype; 190extern struct kobj_type ioat2_ktype;
189extern struct kmem_cache *ioat2_cache; 191extern struct kmem_cache *ioat2_cache;
190#endif /* IOATDMA_V2_H */ 192#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 42f6f10fb0cc..9908c9e94b2d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
650 650
651 num_descs = ioat2_xferlen_to_descs(ioat, len); 651 num_descs = ioat2_xferlen_to_descs(ioat, len);
652 /* we need 2x the number of descriptors to cover greater than 3 652 /* we need 2x the number of descriptors to cover greater than 3
653 * sources 653 * sources (we need 1 extra source in the q-only continuation
654 * case and 3 extra sources in the p+q continuation case.
654 */ 655 */
655 if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { 656 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
657 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
656 with_ext = 1; 658 with_ext = 1;
657 num_descs *= 2; 659 num_descs *= 2;
658 } else 660 } else
@@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1128 return 0; 1130 return 0;
1129} 1131}
1130 1132
1133static int ioat3_reset_hw(struct ioat_chan_common *chan)
1134{
1135 /* throw away whatever the channel was doing and get it
1136 * initialized, with ioat3 specific workarounds
1137 */
1138 struct ioatdma_device *device = chan->device;
1139 struct pci_dev *pdev = device->pdev;
1140 u32 chanerr;
1141 u16 dev_id;
1142 int err;
1143
1144 ioat2_quiesce(chan, msecs_to_jiffies(100));
1145
1146 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1147 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1148
1149 /* -= IOAT ver.3 workarounds =- */
1150 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1151 * that can cause stability issues for IOAT ver.3, and clear any
1152 * pending errors
1153 */
1154 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1155 err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1156 if (err) {
1157 dev_err(&pdev->dev, "channel error register unreachable\n");
1158 return err;
1159 }
1160 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1161
1162 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1163 * (workaround for spurious config parity error after restart)
1164 */
1165 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1166 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1167 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1168
1169 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1170}
1171
1131int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1172int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1132{ 1173{
1133 struct pci_dev *pdev = device->pdev; 1174 struct pci_dev *pdev = device->pdev;
@@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1137 struct ioat_chan_common *chan; 1178 struct ioat_chan_common *chan;
1138 bool is_raid_device = false; 1179 bool is_raid_device = false;
1139 int err; 1180 int err;
1140 u16 dev_id;
1141 u32 cap; 1181 u32 cap;
1142 1182
1143 device->enumerate_channels = ioat2_enumerate_channels; 1183 device->enumerate_channels = ioat2_enumerate_channels;
1184 device->reset_hw = ioat3_reset_hw;
1144 device->self_test = ioat3_dma_self_test; 1185 device->self_test = ioat3_dma_self_test;
1145 dma = &device->common; 1186 dma = &device->common;
1146 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1187 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
@@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1216 dma->device_prep_dma_xor_val = NULL; 1257 dma->device_prep_dma_xor_val = NULL;
1217 #endif 1258 #endif
1218 1259
1219 /* -= IOAT ver.3 workarounds =- */
1220 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1221 * that can cause stability issues for IOAT ver.3
1222 */
1223 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1224
1225 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1226 * (workaround for spurious config parity error after restart)
1227 */
1228 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1229 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1230 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1231
1232 err = ioat_probe(device); 1260 err = ioat_probe(device);
1233 if (err) 1261 if (err)
1234 return err; 1262 return err;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index f015ec196700..e8ae63baf588 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -27,6 +27,7 @@
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02 28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 31#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31 32
32/* MMIO Device Registers */ 33/* MMIO Device Registers */