aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-12-22 19:21:47 -0500
committerDan Williams <dan.j.williams@intel.com>2009-12-22 19:21:47 -0500
commitf80ca163d65903276bec7045a484a79c0897eb2d (patch)
tree97c7d61d43248b9db0757a76af80ff58b98b5599 /drivers/dma
parent0794ec8ce327ec74416b569b8fb1951274693700 (diff)
parenta6d52d70677e99bdb89b6921c265d0a58c22e597 (diff)
Merge branch 'ioat' into fixes
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/ioat/dca.c6
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h22
-rw-r--r--drivers/dma/ioat/dma_v2.c71
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c100
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/registers.h5
-rw-r--r--drivers/dma/shdma.c12
11 files changed, 179 insertions, 55 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fe93d70f2e37..93b058fec1a5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -26,6 +26,8 @@ config INTEL_IOATDMA
26 select DMA_ENGINE 26 select DMA_ENGINE
27 select DCA 27 select DCA
28 select ASYNC_TX_DISABLE_CHANNEL_SWITCH 28 select ASYNC_TX_DISABLE_CHANNEL_SWITCH
29 select ASYNC_TX_DISABLE_PQ_VAL_DMA
30 select ASYNC_TX_DISABLE_XOR_VAL_DMA
29 help 31 help
30 Enable support for the Intel(R) I/OAT DMA engine present 32 Enable support for the Intel(R) I/OAT DMA engine present
31 in recent Intel Xeon chipsets. 33 in recent Intel Xeon chipsets.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bd0b248de2cf..8f99354082ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device)
632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
633 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 633 if (!dma_has_cap(DMA_XOR, device->cap_mask))
634 return false; 634 return false;
635
636 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
637 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
638 return false;
639 #endif
635 #endif 640 #endif
636 641
637 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 642 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
638 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 643 if (!dma_has_cap(DMA_PQ, device->cap_mask))
639 return false; 644 return false;
645
646 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
647 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
648 return false;
649 #endif
640 #endif 650 #endif
641 651
642 return true; 652 return true;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 69d02615c4d6..abd9038e06b1 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
98 cpuid_level_9 = cpuid_eax(9); 98 cpuid_level_9 = cpuid_eax(9);
99 res = test_bit(0, &cpuid_level_9); 99 res = test_bit(0, &cpuid_level_9);
100 if (!res) 100 if (!res)
101 dev_err(&pdev->dev, "DCA is disabled in BIOS\n"); 101 dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
102 102
103 return res; 103 return res;
104} 104}
105 105
106static int system_has_dca_enabled(struct pci_dev *pdev) 106int system_has_dca_enabled(struct pci_dev *pdev)
107{ 107{
108 if (boot_cpu_has(X86_FEATURE_DCA)) 108 if (boot_cpu_has(X86_FEATURE_DCA))
109 return dca_enabled_in_bios(pdev); 109 return dca_enabled_in_bios(pdev);
110 110
111 dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); 111 dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
112 return 0; 112 return 0;
113} 113}
114 114
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c524d36d3c2e..dcc4ab78b32b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
1032 dma->dev = &pdev->dev; 1032 dma->dev = &pdev->dev;
1033 1033
1034 if (!dma->chancnt) { 1034 if (!dma->chancnt) {
1035 dev_err(dev, "zero channels detected\n"); 1035 dev_err(dev, "channel enumeration error\n");
1036 goto err_setup_interrupts; 1036 goto err_setup_interrupts;
1037 } 1037 }
1038 1038
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c14fdfeb7f33..bbc3e78ef333 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,7 @@
60 * @dca: direct cache access context 60 * @dca: direct cache access context
61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) 61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
62 * @enumerate_channels: hw version specific channel enumeration 62 * @enumerate_channels: hw version specific channel enumeration
63 * @reset_hw: hw version specific channel (re)initialization
63 * @cleanup_tasklet: select between the v2 and v3 cleanup routines 64 * @cleanup_tasklet: select between the v2 and v3 cleanup routines
64 * @timer_fn: select between the v2 and v3 timer watchdog routines 65 * @timer_fn: select between the v2 and v3 timer watchdog routines
65 * @self_test: hardware version specific self test for each supported op type 66 * @self_test: hardware version specific self test for each supported op type
@@ -78,6 +79,7 @@ struct ioatdma_device {
78 struct dca_provider *dca; 79 struct dca_provider *dca;
79 void (*intr_quirk)(struct ioatdma_device *device); 80 void (*intr_quirk)(struct ioatdma_device *device);
80 int (*enumerate_channels)(struct ioatdma_device *device); 81 int (*enumerate_channels)(struct ioatdma_device *device);
82 int (*reset_hw)(struct ioat_chan_common *chan);
81 void (*cleanup_tasklet)(unsigned long data); 83 void (*cleanup_tasklet)(unsigned long data);
82 void (*timer_fn)(unsigned long data); 84 void (*timer_fn)(unsigned long data);
83 int (*self_test)(struct ioatdma_device *device); 85 int (*self_test)(struct ioatdma_device *device);
@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
264 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); 266 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
265} 267}
266 268
269static inline void ioat_reset(struct ioat_chan_common *chan)
270{
271 u8 ver = chan->device->version;
272
273 writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
274}
275
276static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
277{
278 u8 ver = chan->device->version;
279 u8 cmd;
280
281 cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
282 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
283}
284
267static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) 285static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
268{ 286{
269 struct ioat_chan_common *chan = &ioat->base; 287 struct ioat_chan_common *chan = &ioat->base;
@@ -297,9 +315,7 @@ static inline bool is_ioat_suspended(unsigned long status)
297/* channel was fatally programmed */ 315/* channel was fatally programmed */
298static inline bool is_ioat_bug(unsigned long err) 316static inline bool is_ioat_bug(unsigned long err)
299{ 317{
300 return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| 318 return !!err;
301 IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
302 IOAT_CHANERR_LENGTH_ERR));
303} 319}
304 320
305static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, 321static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 96ffab7d37a7..5f7a500e18d0 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
239 __ioat2_start_null_desc(ioat); 239 __ioat2_start_null_desc(ioat);
240} 240}
241 241
242static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 242int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
243{ 243{
244 struct ioat_chan_common *chan = &ioat->base; 244 unsigned long end = jiffies + tmo;
245 unsigned long phys_complete; 245 int err = 0;
246 u32 status; 246 u32 status;
247 247
248 status = ioat_chansts(chan); 248 status = ioat_chansts(chan);
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (end && time_after(jiffies, end)) {
253 err = -ETIMEDOUT;
254 break;
255 }
252 status = ioat_chansts(chan); 256 status = ioat_chansts(chan);
253 cpu_relax(); 257 cpu_relax();
254 } 258 }
255 259
260 return err;
261}
262
263int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
264{
265 unsigned long end = jiffies + tmo;
266 int err = 0;
267
268 ioat_reset(chan);
269 while (ioat_reset_pending(chan)) {
270 if (end && time_after(jiffies, end)) {
271 err = -ETIMEDOUT;
272 break;
273 }
274 cpu_relax();
275 }
276
277 return err;
278}
279
280static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
281{
282 struct ioat_chan_common *chan = &ioat->base;
283 unsigned long phys_complete;
284
285 ioat2_quiesce(chan, 0);
256 if (ioat_cleanup_preamble(chan, &phys_complete)) 286 if (ioat_cleanup_preamble(chan, &phys_complete))
257 __cleanup(ioat, phys_complete); 287 __cleanup(ioat, phys_complete);
258 288
@@ -279,6 +309,8 @@ void ioat2_timer_event(unsigned long data)
279 u32 chanerr; 309 u32 chanerr;
280 310
281 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 311 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
312 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
313 __func__, chanerr);
282 BUG_ON(is_ioat_bug(chanerr)); 314 BUG_ON(is_ioat_bug(chanerr));
283 } 315 }
284 316
@@ -316,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
316 spin_unlock_bh(&chan->cleanup_lock); 348 spin_unlock_bh(&chan->cleanup_lock);
317} 349}
318 350
351static int ioat2_reset_hw(struct ioat_chan_common *chan)
352{
353 /* throw away whatever the channel was doing and get it initialized */
354 u32 chanerr;
355
356 ioat2_quiesce(chan, msecs_to_jiffies(100));
357
358 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
359 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
360
361 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
362}
363
319/** 364/**
320 * ioat2_enumerate_channels - find and initialize the device's channels 365 * ioat2_enumerate_channels - find and initialize the device's channels
321 * @device: the device to be enumerated 366 * @device: the device to be enumerated
@@ -358,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
358 (unsigned long) ioat); 403 (unsigned long) ioat);
359 ioat->xfercap_log = xfercap_log; 404 ioat->xfercap_log = xfercap_log;
360 spin_lock_init(&ioat->ring_lock); 405 spin_lock_init(&ioat->ring_lock);
406 if (device->reset_hw(&ioat->base)) {
407 i = 0;
408 break;
409 }
361 } 410 }
362 dma->chancnt = i; 411 dma->chancnt = i;
363 return i; 412 return i;
@@ -465,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
465 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 514 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
466 struct ioat_chan_common *chan = &ioat->base; 515 struct ioat_chan_common *chan = &ioat->base;
467 struct ioat_ring_ent **ring; 516 struct ioat_ring_ent **ring;
468 u32 chanerr;
469 int order; 517 int order;
470 518
471 /* have we already been set up? */ 519 /* have we already been set up? */
@@ -475,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
475 /* Setup register to interrupt and write completion status on error */ 523 /* Setup register to interrupt and write completion status on error */
476 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); 524 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
477 525
478 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
479 if (chanerr) {
480 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
481 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
482 }
483
484 /* allocate a completion writeback area */ 526 /* allocate a completion writeback area */
485 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 527 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
486 chan->completion = pci_pool_alloc(chan->device->completion_pool, 528 chan->completion = pci_pool_alloc(chan->device->completion_pool,
@@ -744,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
744 tasklet_disable(&chan->cleanup_task); 786 tasklet_disable(&chan->cleanup_task);
745 del_timer_sync(&chan->timer); 787 del_timer_sync(&chan->timer);
746 device->cleanup_tasklet((unsigned long) ioat); 788 device->cleanup_tasklet((unsigned long) ioat);
747 789 device->reset_hw(chan);
748 /* Delay 100ms after reset to allow internal DMA logic to quiesce
749 * before removing DMA descriptor resources.
750 */
751 writeb(IOAT_CHANCMD_RESET,
752 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
753 mdelay(100);
754 790
755 spin_lock_bh(&ioat->ring_lock); 791 spin_lock_bh(&ioat->ring_lock);
756 descs = ioat2_ring_space(ioat); 792 descs = ioat2_ring_space(ioat);
@@ -837,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
837 int err; 873 int err;
838 874
839 device->enumerate_channels = ioat2_enumerate_channels; 875 device->enumerate_channels = ioat2_enumerate_channels;
876 device->reset_hw = ioat2_reset_hw;
840 device->cleanup_tasklet = ioat2_cleanup_tasklet; 877 device->cleanup_tasklet = ioat2_cleanup_tasklet;
841 device->timer_fn = ioat2_timer_event; 878 device->timer_fn = ioat2_timer_event;
842 device->self_test = ioat_dma_self_test; 879 device->self_test = ioat_dma_self_test;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 1d849ef74d5f..3afad8da43cc 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); 185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data); 186void ioat2_cleanup_tasklet(unsigned long data);
187void ioat2_timer_event(unsigned long data); 187void ioat2_timer_event(unsigned long data);
188int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
189int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
188extern struct kobj_type ioat2_ktype; 190extern struct kobj_type ioat2_ktype;
189extern struct kmem_cache *ioat2_cache; 191extern struct kmem_cache *ioat2_cache;
190#endif /* IOATDMA_V2_H */ 192#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 35d1e33afd5b..9908c9e94b2d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data)
378 u32 chanerr; 378 u32 chanerr;
379 379
380 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 380 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
381 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
382 __func__, chanerr);
381 BUG_ON(is_ioat_bug(chanerr)); 383 BUG_ON(is_ioat_bug(chanerr));
382 } 384 }
383 385
@@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
569 dump_desc_dbg(ioat, compl_desc); 571 dump_desc_dbg(ioat, compl_desc);
570 572
571 /* we leave the channel locked to ensure in order submission */ 573 /* we leave the channel locked to ensure in order submission */
572 return &desc->txd; 574 return &compl_desc->txd;
573} 575}
574 576
575static struct dma_async_tx_descriptor * 577static struct dma_async_tx_descriptor *
@@ -648,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
648 650
649 num_descs = ioat2_xferlen_to_descs(ioat, len); 651 num_descs = ioat2_xferlen_to_descs(ioat, len);
650 /* we need 2x the number of descriptors to cover greater than 3 652 /* we need 2x the number of descriptors to cover greater than 3
651 * sources 653 * sources (we need 1 extra source in the q-only continuation
654 * case and 3 extra sources in the p+q continuation case.
652 */ 655 */
653 if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { 656 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
657 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
654 with_ext = 1; 658 with_ext = 1;
655 num_descs *= 2; 659 num_descs *= 2;
656 } else 660 } else
@@ -728,7 +732,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
728 dump_desc_dbg(ioat, compl_desc); 732 dump_desc_dbg(ioat, compl_desc);
729 733
730 /* we leave the channel locked to ensure in order submission */ 734 /* we leave the channel locked to ensure in order submission */
731 return &desc->txd; 735 return &compl_desc->txd;
732} 736}
733 737
734static struct dma_async_tx_descriptor * 738static struct dma_async_tx_descriptor *
@@ -736,10 +740,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
736 unsigned int src_cnt, const unsigned char *scf, size_t len, 740 unsigned int src_cnt, const unsigned char *scf, size_t len,
737 unsigned long flags) 741 unsigned long flags)
738{ 742{
743 /* specify valid address for disabled result */
744 if (flags & DMA_PREP_PQ_DISABLE_P)
745 dst[0] = dst[1];
746 if (flags & DMA_PREP_PQ_DISABLE_Q)
747 dst[1] = dst[0];
748
739 /* handle the single source multiply case from the raid6 749 /* handle the single source multiply case from the raid6
740 * recovery path 750 * recovery path
741 */ 751 */
742 if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { 752 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
743 dma_addr_t single_source[2]; 753 dma_addr_t single_source[2];
744 unsigned char single_source_coef[2]; 754 unsigned char single_source_coef[2];
745 755
@@ -761,6 +771,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
761 unsigned int src_cnt, const unsigned char *scf, size_t len, 771 unsigned int src_cnt, const unsigned char *scf, size_t len,
762 enum sum_check_flags *pqres, unsigned long flags) 772 enum sum_check_flags *pqres, unsigned long flags)
763{ 773{
774 /* specify valid address for disabled result */
775 if (flags & DMA_PREP_PQ_DISABLE_P)
776 pq[0] = pq[1];
777 if (flags & DMA_PREP_PQ_DISABLE_Q)
778 pq[1] = pq[0];
779
764 /* the cleanup routine only sets bits on validate failure, it 780 /* the cleanup routine only sets bits on validate failure, it
765 * does not clear bits on validate success... so clear it here 781 * does not clear bits on validate success... so clear it here
766 */ 782 */
@@ -778,9 +794,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
778 dma_addr_t pq[2]; 794 dma_addr_t pq[2];
779 795
780 memset(scf, 0, src_cnt); 796 memset(scf, 0, src_cnt);
781 flags |= DMA_PREP_PQ_DISABLE_Q;
782 pq[0] = dst; 797 pq[0] = dst;
783 pq[1] = ~0; 798 flags |= DMA_PREP_PQ_DISABLE_Q;
799 pq[1] = dst; /* specify valid address for disabled result */
784 800
785 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 801 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
786 flags); 802 flags);
@@ -800,9 +816,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
800 *result = 0; 816 *result = 0;
801 817
802 memset(scf, 0, src_cnt); 818 memset(scf, 0, src_cnt);
803 flags |= DMA_PREP_PQ_DISABLE_Q;
804 pq[0] = src[0]; 819 pq[0] = src[0];
805 pq[1] = ~0; 820 flags |= DMA_PREP_PQ_DISABLE_Q;
821 pq[1] = pq[0]; /* specify valid address for disabled result */
806 822
807 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, 823 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
808 len, flags); 824 len, flags);
@@ -1114,18 +1130,58 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1114 return 0; 1130 return 0;
1115} 1131}
1116 1132
1133static int ioat3_reset_hw(struct ioat_chan_common *chan)
1134{
1135 /* throw away whatever the channel was doing and get it
1136 * initialized, with ioat3 specific workarounds
1137 */
1138 struct ioatdma_device *device = chan->device;
1139 struct pci_dev *pdev = device->pdev;
1140 u32 chanerr;
1141 u16 dev_id;
1142 int err;
1143
1144 ioat2_quiesce(chan, msecs_to_jiffies(100));
1145
1146 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1147 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1148
1149 /* -= IOAT ver.3 workarounds =- */
1150 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1151 * that can cause stability issues for IOAT ver.3, and clear any
1152 * pending errors
1153 */
1154 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1155 err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1156 if (err) {
1157 dev_err(&pdev->dev, "channel error register unreachable\n");
1158 return err;
1159 }
1160 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1161
1162 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1163 * (workaround for spurious config parity error after restart)
1164 */
1165 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1166 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1167 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1168
1169 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1170}
1171
1117int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1172int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1118{ 1173{
1119 struct pci_dev *pdev = device->pdev; 1174 struct pci_dev *pdev = device->pdev;
1175 int dca_en = system_has_dca_enabled(pdev);
1120 struct dma_device *dma; 1176 struct dma_device *dma;
1121 struct dma_chan *c; 1177 struct dma_chan *c;
1122 struct ioat_chan_common *chan; 1178 struct ioat_chan_common *chan;
1123 bool is_raid_device = false; 1179 bool is_raid_device = false;
1124 int err; 1180 int err;
1125 u16 dev_id;
1126 u32 cap; 1181 u32 cap;
1127 1182
1128 device->enumerate_channels = ioat2_enumerate_channels; 1183 device->enumerate_channels = ioat2_enumerate_channels;
1184 device->reset_hw = ioat3_reset_hw;
1129 device->self_test = ioat3_dma_self_test; 1185 device->self_test = ioat3_dma_self_test;
1130 dma = &device->common; 1186 dma = &device->common;
1131 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1187 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
@@ -1137,6 +1193,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1137 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1193 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1138 1194
1139 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1195 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1196
1197 /* dca is incompatible with raid operations */
1198 if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1199 cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1200
1140 if (cap & IOAT_CAP_XOR) { 1201 if (cap & IOAT_CAP_XOR) {
1141 is_raid_device = true; 1202 is_raid_device = true;
1142 dma->max_xor = 8; 1203 dma->max_xor = 8;
@@ -1186,18 +1247,15 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1186 device->timer_fn = ioat2_timer_event; 1247 device->timer_fn = ioat2_timer_event;
1187 } 1248 }
1188 1249
1189 /* -= IOAT ver.3 workarounds =- */ 1250 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1190 /* Write CHANERRMSK_INT with 3E07h to mask out the errors 1251 dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
1191 * that can cause stability issues for IOAT ver.3 1252 dma->device_prep_dma_pq_val = NULL;
1192 */ 1253 #endif
1193 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1194 1254
1195 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit 1255 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1196 * (workaround for spurious config parity error after restart) 1256 dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
1197 */ 1257 dma->device_prep_dma_xor_val = NULL;
1198 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); 1258 #endif
1199 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1200 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1201 1259
1202 err = ioat_probe(device); 1260 err = ioat_probe(device);
1203 if (err) 1261 if (err)
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 99afb12bd409..60e675455b6a 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -39,6 +39,8 @@
39#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 39#define IOAT_VER_3_0 0x30 /* Version 3.0 */
40#define IOAT_VER_3_2 0x32 /* Version 3.2 */ 40#define IOAT_VER_3_2 0x32 /* Version 3.2 */
41 41
42int system_has_dca_enabled(struct pci_dev *pdev);
43
42struct ioat_dma_descriptor { 44struct ioat_dma_descriptor {
43 uint32_t size; 45 uint32_t size;
44 union { 46 union {
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 63038e18ab03..e8ae63baf588 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -27,6 +27,7 @@
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02 28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 31#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31 32
32/* MMIO Device Registers */ 33/* MMIO Device Registers */
@@ -92,9 +93,7 @@
92#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 93#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
93#define IOAT_CHANCTRL_INT_REARM 0x0001 94#define IOAT_CHANCTRL_INT_REARM 0x0001
94#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ 95#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
95 IOAT_CHANCTRL_ERR_COMPLETION_EN |\ 96 IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
96 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
97 IOAT_CHANCTRL_ERR_INT_EN)
98 97
99#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ 98#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
100#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ 99#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 9546f5f356eb..d10cc899c460 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -730,17 +730,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
730#endif 730#endif
731 struct sh_dmae_device *shdev; 731 struct sh_dmae_device *shdev;
732 732
733 /* get platform data */
734 if (!pdev->dev.platform_data)
735 return -ENODEV;
736
733 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 737 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
734 if (!shdev) { 738 if (!shdev) {
735 dev_err(&pdev->dev, "No enough memory\n"); 739 dev_err(&pdev->dev, "No enough memory\n");
736 err = -ENOMEM; 740 return -ENOMEM;
737 goto shdev_err;
738 } 741 }
739 742
740 /* get platform data */
741 if (!pdev->dev.platform_data)
742 goto shdev_err;
743
744 /* platform data */ 743 /* platform data */
745 memcpy(&shdev->pdata, pdev->dev.platform_data, 744 memcpy(&shdev->pdata, pdev->dev.platform_data,
746 sizeof(struct sh_dmae_pdata)); 745 sizeof(struct sh_dmae_pdata));
@@ -814,7 +813,6 @@ eirq_err:
814rst_err: 813rst_err:
815 kfree(shdev); 814 kfree(shdev);
816 815
817shdev_err:
818 return err; 816 return err;
819} 817}
820 818