aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:55 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:55 -0400
commitbf40a6869c9198bdf56fe173961feb89e9f0d961 (patch)
tree3d1b6bf44647857997113fe1b036fb46e360d8a7 /drivers/dma
parent2aec048cdc4a5a81163a42a61df903f76a27e737 (diff)
ioat3: split ioat3 support to its own file, add memset
Up until this point the driver for Intel(R) QuickData Technology engines, specification versions 2 and 3, were mostly identical save for a few quirks. Version 3.2 hardware adds many new capabilities (like raid offload support) requiring some infrastructure that is not relevant for v2. For better code organization of the new funcionality move v3 and v3.2 support to its own file dma_v3.c, and export some routines from the base files (dma.c and dma_v2.c) that can be reused directly. The first new capability included in this code reorganization is support for v3.2 memset operations. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ioat/Makefile2
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/ioat/dma.h16
-rw-r--r--drivers/dma/ioat/dma_v2.c94
-rw-r--r--drivers/dma/ioat/dma_v2.h13
-rw-r--r--drivers/dma/ioat/dma_v3.c367
-rw-r--r--drivers/dma/ioat/pci.c2
7 files changed, 421 insertions, 84 deletions
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
index 205a639e84df..8997d3fb9051 100644
--- a/drivers/dma/ioat/Makefile
+++ b/drivers/dma/ioat/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 1obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
2ioatdma-objs := pci.o dma.o dma_v2.o dca.o 2ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 17a518d0386f..70262c0131d9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -538,17 +538,6 @@ static void ioat1_cleanup_tasklet(unsigned long data)
538 writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); 538 writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
539} 539}
540 540
541static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
542 int direction, enum dma_ctrl_flags flags, bool dst)
543{
544 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
545 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
546 pci_unmap_single(pdev, addr, len, direction);
547 else
548 pci_unmap_page(pdev, addr, len, direction);
549}
550
551
552void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 541void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
553 size_t len, struct ioat_dma_descriptor *hw) 542 size_t len, struct ioat_dma_descriptor *hw)
554{ 543{
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 0d94e7804c13..c6d58bf541d1 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,10 @@
60 * @dca: direct cache access context 60 * @dca: direct cache access context
61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) 61 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
62 * @enumerate_channels: hw version specific channel enumeration 62 * @enumerate_channels: hw version specific channel enumeration
63 * @cleanup_tasklet: select between the v2 and v3 cleanup routines
64 * @timer_fn: select between the v2 and v3 timer watchdog routines
65 *
66 * Note: the v3 cleanup routine supports raid operations
63 */ 67 */
64 68
65struct ioatdma_device { 69struct ioatdma_device {
@@ -74,6 +78,8 @@ struct ioatdma_device {
74 struct dca_provider *dca; 78 struct dca_provider *dca;
75 void (*intr_quirk)(struct ioatdma_device *device); 79 void (*intr_quirk)(struct ioatdma_device *device);
76 int (*enumerate_channels)(struct ioatdma_device *device); 80 int (*enumerate_channels)(struct ioatdma_device *device);
81 void (*cleanup_tasklet)(unsigned long data);
82 void (*timer_fn)(unsigned long data);
77}; 83};
78 84
79struct ioat_chan_common { 85struct ioat_chan_common {
@@ -287,6 +293,16 @@ static inline bool is_ioat_bug(unsigned long err)
287 IOAT_CHANERR_LENGTH_ERR)); 293 IOAT_CHANERR_LENGTH_ERR));
288} 294}
289 295
296static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
297 int direction, enum dma_ctrl_flags flags, bool dst)
298{
299 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
300 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
301 pci_unmap_single(pdev, addr, len, direction);
302 else
303 pci_unmap_page(pdev, addr, len, direction);
304}
305
290int __devinit ioat_probe(struct ioatdma_device *device); 306int __devinit ioat_probe(struct ioatdma_device *device);
291int __devinit ioat_register(struct ioatdma_device *device); 307int __devinit ioat_register(struct ioatdma_device *device);
292int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); 308int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 568923c5ddec..7492e9165e08 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -39,7 +39,7 @@
39#include "registers.h" 39#include "registers.h"
40#include "hw.h" 40#include "hw.h"
41 41
42static int ioat_ring_alloc_order = 8; 42int ioat_ring_alloc_order = 8;
43module_param(ioat_ring_alloc_order, int, 0644); 43module_param(ioat_ring_alloc_order, int, 0644);
44MODULE_PARM_DESC(ioat_ring_alloc_order, 44MODULE_PARM_DESC(ioat_ring_alloc_order,
45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); 45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
@@ -63,7 +63,7 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
63 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); 63 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
64} 64}
65 65
66static void ioat2_issue_pending(struct dma_chan *chan) 66void ioat2_issue_pending(struct dma_chan *chan)
67{ 67{
68 struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); 68 struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
69 69
@@ -214,7 +214,7 @@ static void ioat2_cleanup_tasklet(unsigned long data)
214 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 214 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
215} 215}
216 216
217static void __restart_chan(struct ioat2_dma_chan *ioat) 217void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
218{ 218{
219 struct ioat_chan_common *chan = &ioat->base; 219 struct ioat_chan_common *chan = &ioat->base;
220 220
@@ -255,11 +255,9 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
255 if (ioat_cleanup_preamble(chan, &phys_complete)) 255 if (ioat_cleanup_preamble(chan, &phys_complete))
256 __cleanup(ioat, phys_complete); 256 __cleanup(ioat, phys_complete);
257 257
258 __restart_chan(ioat); 258 __ioat2_restart_chan(ioat);
259} 259}
260 260
261static bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
262
263static void ioat2_timer_event(unsigned long data) 261static void ioat2_timer_event(unsigned long data)
264{ 262{
265 struct ioat2_dma_chan *ioat = (void *) data; 263 struct ioat2_dma_chan *ioat = (void *) data;
@@ -321,7 +319,7 @@ static void ioat2_timer_event(unsigned long data)
321 * ioat2_enumerate_channels - find and initialize the device's channels 319 * ioat2_enumerate_channels - find and initialize the device's channels
322 * @device: the device to be enumerated 320 * @device: the device to be enumerated
323 */ 321 */
324static int ioat2_enumerate_channels(struct ioatdma_device *device) 322int ioat2_enumerate_channels(struct ioatdma_device *device)
325{ 323{
326 struct ioat2_dma_chan *ioat; 324 struct ioat2_dma_chan *ioat;
327 struct device *dev = &device->pdev->dev; 325 struct device *dev = &device->pdev->dev;
@@ -354,8 +352,8 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device)
354 break; 352 break;
355 353
356 ioat_init_channel(device, &ioat->base, i, 354 ioat_init_channel(device, &ioat->base, i,
357 ioat2_timer_event, 355 device->timer_fn,
358 ioat2_cleanup_tasklet, 356 device->cleanup_tasklet,
359 (unsigned long) ioat); 357 (unsigned long) ioat);
360 ioat->xfercap_log = xfercap_log; 358 ioat->xfercap_log = xfercap_log;
361 spin_lock_init(&ioat->ring_lock); 359 spin_lock_init(&ioat->ring_lock);
@@ -460,7 +458,7 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf
460/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring 458/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
461 * @chan: channel to be initialized 459 * @chan: channel to be initialized
462 */ 460 */
463static int ioat2_alloc_chan_resources(struct dma_chan *c) 461int ioat2_alloc_chan_resources(struct dma_chan *c)
464{ 462{
465 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 463 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
466 struct ioat_chan_common *chan = &ioat->base; 464 struct ioat_chan_common *chan = &ioat->base;
@@ -514,7 +512,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c)
514 return 1 << ioat->alloc_order; 512 return 1 << ioat->alloc_order;
515} 513}
516 514
517static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) 515bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
518{ 516{
519 /* reshape differs from normal ring allocation in that we want 517 /* reshape differs from normal ring allocation in that we want
520 * to allocate a new software ring while only 518 * to allocate a new software ring while only
@@ -627,7 +625,7 @@ static bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
627 * @ioat: ioat2,3 channel (ring) to operate on 625 * @ioat: ioat2,3 channel (ring) to operate on
628 * @num_descs: allocation length 626 * @num_descs: allocation length
629 */ 627 */
630static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) 628int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
631{ 629{
632 struct ioat_chan_common *chan = &ioat->base; 630 struct ioat_chan_common *chan = &ioat->base;
633 631
@@ -655,9 +653,11 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d
655 spin_lock_bh(&chan->cleanup_lock); 653 spin_lock_bh(&chan->cleanup_lock);
656 if (jiffies > chan->timer.expires && 654 if (jiffies > chan->timer.expires &&
657 timer_pending(&chan->timer)) { 655 timer_pending(&chan->timer)) {
656 struct ioatdma_device *device = chan->device;
657
658 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 658 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
659 spin_unlock_bh(&chan->cleanup_lock); 659 spin_unlock_bh(&chan->cleanup_lock);
660 ioat2_timer_event((unsigned long) ioat); 660 device->timer_fn((unsigned long) ioat);
661 } else 661 } else
662 spin_unlock_bh(&chan->cleanup_lock); 662 spin_unlock_bh(&chan->cleanup_lock);
663 return -ENOMEM; 663 return -ENOMEM;
@@ -670,7 +670,7 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d
670 return 0; /* with ioat->ring_lock held */ 670 return 0; /* with ioat->ring_lock held */
671} 671}
672 672
673static struct dma_async_tx_descriptor * 673struct dma_async_tx_descriptor *
674ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, 674ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
675 dma_addr_t dma_src, size_t len, unsigned long flags) 675 dma_addr_t dma_src, size_t len, unsigned long flags)
676{ 676{
@@ -722,11 +722,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
722 * ioat2_free_chan_resources - release all the descriptors 722 * ioat2_free_chan_resources - release all the descriptors
723 * @chan: the channel to be cleaned 723 * @chan: the channel to be cleaned
724 */ 724 */
725static void ioat2_free_chan_resources(struct dma_chan *c) 725void ioat2_free_chan_resources(struct dma_chan *c)
726{ 726{
727 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 727 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
728 struct ioat_chan_common *chan = &ioat->base; 728 struct ioat_chan_common *chan = &ioat->base;
729 struct ioatdma_device *ioatdma_device = chan->device; 729 struct ioatdma_device *device = chan->device;
730 struct ioat_ring_ent *desc; 730 struct ioat_ring_ent *desc;
731 const u16 total_descs = 1 << ioat->alloc_order; 731 const u16 total_descs = 1 << ioat->alloc_order;
732 int descs; 732 int descs;
@@ -740,7 +740,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
740 740
741 tasklet_disable(&chan->cleanup_task); 741 tasklet_disable(&chan->cleanup_task);
742 del_timer_sync(&chan->timer); 742 del_timer_sync(&chan->timer);
743 ioat2_cleanup(ioat); 743 device->cleanup_tasklet((unsigned long) ioat);
744 744
745 /* Delay 100ms after reset to allow internal DMA logic to quiesce 745 /* Delay 100ms after reset to allow internal DMA logic to quiesce
746 * before removing DMA descriptor resources. 746 * before removing DMA descriptor resources.
@@ -770,8 +770,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
770 kfree(ioat->ring); 770 kfree(ioat->ring);
771 ioat->ring = NULL; 771 ioat->ring = NULL;
772 ioat->alloc_order = 0; 772 ioat->alloc_order = 0;
773 pci_pool_free(ioatdma_device->completion_pool, 773 pci_pool_free(device->completion_pool, chan->completion,
774 chan->completion,
775 chan->completion_dma); 774 chan->completion_dma);
776 spin_unlock_bh(&ioat->ring_lock); 775 spin_unlock_bh(&ioat->ring_lock);
777 776
@@ -781,16 +780,17 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
781 ioat->dmacount = 0; 780 ioat->dmacount = 0;
782} 781}
783 782
784static enum dma_status 783enum dma_status
785ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, 784ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
786 dma_cookie_t *done, dma_cookie_t *used) 785 dma_cookie_t *done, dma_cookie_t *used)
787{ 786{
788 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 787 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
788 struct ioatdma_device *device = ioat->base.device;
789 789
790 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 790 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
791 return DMA_SUCCESS; 791 return DMA_SUCCESS;
792 792
793 ioat2_cleanup(ioat); 793 device->cleanup_tasklet((unsigned long) ioat);
794 794
795 return ioat_is_complete(c, cookie, done, used); 795 return ioat_is_complete(c, cookie, done, used);
796} 796}
@@ -804,6 +804,8 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
804 int err; 804 int err;
805 805
806 device->enumerate_channels = ioat2_enumerate_channels; 806 device->enumerate_channels = ioat2_enumerate_channels;
807 device->cleanup_tasklet = ioat2_cleanup_tasklet;
808 device->timer_fn = ioat2_timer_event;
807 dma = &device->common; 809 dma = &device->common;
808 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 810 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
809 dma->device_issue_pending = ioat2_issue_pending; 811 dma->device_issue_pending = ioat2_issue_pending;
@@ -830,53 +832,3 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
830 832
831 return err; 833 return err;
832} 834}
833
834int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
835{
836 struct pci_dev *pdev = device->pdev;
837 struct dma_device *dma;
838 struct dma_chan *c;
839 struct ioat_chan_common *chan;
840 int err;
841 u16 dev_id;
842
843 device->enumerate_channels = ioat2_enumerate_channels;
844 dma = &device->common;
845 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
846 dma->device_issue_pending = ioat2_issue_pending;
847 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
848 dma->device_free_chan_resources = ioat2_free_chan_resources;
849 dma->device_is_tx_complete = ioat2_is_complete;
850
851 /* -= IOAT ver.3 workarounds =- */
852 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
853 * that can cause stability issues for IOAT ver.3
854 */
855 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
856
857 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
858 * (workaround for spurious config parity error after restart)
859 */
860 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
861 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
862 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
863
864 err = ioat_probe(device);
865 if (err)
866 return err;
867 ioat_set_tcp_copy_break(262144);
868
869 list_for_each_entry(c, &dma->channels, device_node) {
870 chan = to_chan_common(c);
871 writel(IOAT_DMA_DCA_ANY_CPU,
872 chan->reg_base + IOAT_DCACTRL_OFFSET);
873 }
874
875 err = ioat_register(device);
876 if (err)
877 return err;
878 if (dca)
879 device->dca = ioat3_dca_init(pdev, device->reg_base);
880
881 return err;
882}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index ed4bb82a283d..bde57ddf555d 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -27,6 +27,7 @@
27 27
28 28
29extern int ioat_pending_level; 29extern int ioat_pending_level;
30extern int ioat_ring_alloc_order;
30 31
31/* 32/*
32 * workaround for IOAT ver.3.0 null descriptor issue 33 * workaround for IOAT ver.3.0 null descriptor issue
@@ -167,4 +168,16 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
167int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); 168int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
168struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 169struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
169struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); 170struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
171int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
172int ioat2_enumerate_channels(struct ioatdma_device *device);
173struct dma_async_tx_descriptor *
174ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
175 dma_addr_t dma_src, size_t len, unsigned long flags);
176void ioat2_issue_pending(struct dma_chan *chan);
177int ioat2_alloc_chan_resources(struct dma_chan *c);
178void ioat2_free_chan_resources(struct dma_chan *c);
179enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
180 dma_cookie_t *done, dma_cookie_t *used);
181void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
182bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
170#endif /* IOATDMA_V2_H */ 183#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
new file mode 100644
index 000000000000..b223d66b97e9
--- /dev/null
+++ b/drivers/dma/ioat/dma_v3.c
@@ -0,0 +1,367 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
24 *
25 * BSD LICENSE
26 *
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
53 */
54
55/*
56 * Support routines for v3+ hardware
57 */
58
59#include <linux/pci.h>
60#include <linux/dmaengine.h>
61#include <linux/dma-mapping.h>
62#include "registers.h"
63#include "hw.h"
64#include "dma.h"
65#include "dma_v2.h"
66
67static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
68 struct ioat_ring_ent *desc)
69{
70 struct ioat_chan_common *chan = &ioat->base;
71 struct pci_dev *pdev = chan->device->pdev;
72 size_t len = desc->len;
73 size_t offset = len - desc->hw->size;
74 struct dma_async_tx_descriptor *tx = &desc->txd;
75 enum dma_ctrl_flags flags = tx->flags;
76
77 switch (desc->hw->ctl_f.op) {
78 case IOAT_OP_COPY:
79 ioat_dma_unmap(chan, flags, len, desc->hw);
80 break;
81 case IOAT_OP_FILL: {
82 struct ioat_fill_descriptor *hw = desc->fill;
83
84 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
85 ioat_unmap(pdev, hw->dst_addr - offset, len,
86 PCI_DMA_FROMDEVICE, flags, 1);
87 break;
88 }
89 default:
90 dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
91 __func__, desc->hw->ctl_f.op);
92 }
93}
94
95
96static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
97{
98 struct ioat_chan_common *chan = &ioat->base;
99 struct ioat_ring_ent *desc;
100 bool seen_current = false;
101 u16 active;
102 int i;
103
104 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
105 __func__, ioat->head, ioat->tail, ioat->issued);
106
107 active = ioat2_ring_active(ioat);
108 for (i = 0; i < active && !seen_current; i++) {
109 struct dma_async_tx_descriptor *tx;
110
111 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
112 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
113 dump_desc_dbg(ioat, desc);
114 tx = &desc->txd;
115 if (tx->cookie) {
116 chan->completed_cookie = tx->cookie;
117 ioat3_dma_unmap(ioat, desc);
118 tx->cookie = 0;
119 if (tx->callback) {
120 tx->callback(tx->callback_param);
121 tx->callback = NULL;
122 }
123 }
124
125 if (tx->phys == phys_complete)
126 seen_current = true;
127 }
128 ioat->tail += i;
129 BUG_ON(!seen_current); /* no active descs have written a completion? */
130 chan->last_completion = phys_complete;
131 if (ioat->head == ioat->tail) {
132 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
133 __func__);
134 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
135 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
136 }
137}
138
139static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
140{
141 struct ioat_chan_common *chan = &ioat->base;
142 unsigned long phys_complete;
143
144 prefetch(chan->completion);
145
146 if (!spin_trylock_bh(&chan->cleanup_lock))
147 return;
148
149 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
150 spin_unlock_bh(&chan->cleanup_lock);
151 return;
152 }
153
154 if (!spin_trylock_bh(&ioat->ring_lock)) {
155 spin_unlock_bh(&chan->cleanup_lock);
156 return;
157 }
158
159 __cleanup(ioat, phys_complete);
160
161 spin_unlock_bh(&ioat->ring_lock);
162 spin_unlock_bh(&chan->cleanup_lock);
163}
164
165static void ioat3_cleanup_tasklet(unsigned long data)
166{
167 struct ioat2_dma_chan *ioat = (void *) data;
168
169 ioat3_cleanup(ioat);
170 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
171}
172
173static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
174{
175 struct ioat_chan_common *chan = &ioat->base;
176 unsigned long phys_complete;
177 u32 status;
178
179 status = ioat_chansts(chan);
180 if (is_ioat_active(status) || is_ioat_idle(status))
181 ioat_suspend(chan);
182 while (is_ioat_active(status) || is_ioat_idle(status)) {
183 status = ioat_chansts(chan);
184 cpu_relax();
185 }
186
187 if (ioat_cleanup_preamble(chan, &phys_complete))
188 __cleanup(ioat, phys_complete);
189
190 __ioat2_restart_chan(ioat);
191}
192
193static void ioat3_timer_event(unsigned long data)
194{
195 struct ioat2_dma_chan *ioat = (void *) data;
196 struct ioat_chan_common *chan = &ioat->base;
197
198 spin_lock_bh(&chan->cleanup_lock);
199 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
200 unsigned long phys_complete;
201 u64 status;
202
203 spin_lock_bh(&ioat->ring_lock);
204 status = ioat_chansts(chan);
205
206 /* when halted due to errors check for channel
207 * programming errors before advancing the completion state
208 */
209 if (is_ioat_halted(status)) {
210 u32 chanerr;
211
212 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
213 BUG_ON(is_ioat_bug(chanerr));
214 }
215
216 /* if we haven't made progress and we have already
217 * acknowledged a pending completion once, then be more
218 * forceful with a restart
219 */
220 if (ioat_cleanup_preamble(chan, &phys_complete))
221 __cleanup(ioat, phys_complete);
222 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
223 ioat3_restart_channel(ioat);
224 else {
225 set_bit(IOAT_COMPLETION_ACK, &chan->state);
226 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
227 }
228 spin_unlock_bh(&ioat->ring_lock);
229 } else {
230 u16 active;
231
232 /* if the ring is idle, empty, and oversized try to step
233 * down the size
234 */
235 spin_lock_bh(&ioat->ring_lock);
236 active = ioat2_ring_active(ioat);
237 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
238 reshape_ring(ioat, ioat->alloc_order-1);
239 spin_unlock_bh(&ioat->ring_lock);
240
241 /* keep shrinking until we get back to our minimum
242 * default size
243 */
244 if (ioat->alloc_order > ioat_get_alloc_order())
245 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
246 }
247 spin_unlock_bh(&chan->cleanup_lock);
248}
249
250static enum dma_status
251ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
252 dma_cookie_t *done, dma_cookie_t *used)
253{
254 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
255
256 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
257 return DMA_SUCCESS;
258
259 ioat3_cleanup(ioat);
260
261 return ioat_is_complete(c, cookie, done, used);
262}
263
264static struct dma_async_tx_descriptor *
265ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
266 size_t len, unsigned long flags)
267{
268 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
269 struct ioat_ring_ent *desc;
270 size_t total_len = len;
271 struct ioat_fill_descriptor *fill;
272 int num_descs;
273 u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
274 u16 idx;
275 int i;
276
277 num_descs = ioat2_xferlen_to_descs(ioat, len);
278 if (likely(num_descs) &&
279 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
280 /* pass */;
281 else
282 return NULL;
283 for (i = 0; i < num_descs; i++) {
284 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
285
286 desc = ioat2_get_ring_ent(ioat, idx + i);
287 fill = desc->fill;
288
289 fill->size = xfer_size;
290 fill->src_data = src_data;
291 fill->dst_addr = dest;
292 fill->ctl = 0;
293 fill->ctl_f.op = IOAT_OP_FILL;
294
295 len -= xfer_size;
296 dest += xfer_size;
297 dump_desc_dbg(ioat, desc);
298 }
299
300 desc->txd.flags = flags;
301 desc->len = total_len;
302 fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
303 fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
304 fill->ctl_f.compl_write = 1;
305 dump_desc_dbg(ioat, desc);
306
307 /* we leave the channel locked to ensure in order submission */
308 return &desc->txd;
309}
310
311int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
312{
313 struct pci_dev *pdev = device->pdev;
314 struct dma_device *dma;
315 struct dma_chan *c;
316 struct ioat_chan_common *chan;
317 int err;
318 u16 dev_id;
319 u32 cap;
320
321 device->enumerate_channels = ioat2_enumerate_channels;
322 device->cleanup_tasklet = ioat3_cleanup_tasklet;
323 device->timer_fn = ioat3_timer_event;
324 dma = &device->common;
325 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
326 dma->device_issue_pending = ioat2_issue_pending;
327 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
328 dma->device_free_chan_resources = ioat2_free_chan_resources;
329 dma->device_is_tx_complete = ioat3_is_complete;
330 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
331 if (cap & IOAT_CAP_FILL_BLOCK) {
332 dma_cap_set(DMA_MEMSET, dma->cap_mask);
333 dma->device_prep_dma_memset = ioat3_prep_memset_lock;
334 }
335
336 /* -= IOAT ver.3 workarounds =- */
337 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
338 * that can cause stability issues for IOAT ver.3
339 */
340 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
341
342 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
343 * (workaround for spurious config parity error after restart)
344 */
345 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
346 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
347 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
348
349 err = ioat_probe(device);
350 if (err)
351 return err;
352 ioat_set_tcp_copy_break(262144);
353
354 list_for_each_entry(c, &dma->channels, device_node) {
355 chan = to_chan_common(c);
356 writel(IOAT_DMA_DCA_ANY_CPU,
357 chan->reg_base + IOAT_DCACTRL_OFFSET);
358 }
359
360 err = ioat_register(device);
361 if (err)
362 return err;
363 if (dca)
364 device->dca = ioat3_dca_init(pdev, device->reg_base);
365
366 return 0;
367}
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index c4e432269252..0f3ec6e97fe9 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -36,7 +36,7 @@
36#include "hw.h" 36#include "hw.h"
37 37
38MODULE_VERSION(IOAT_DMA_VERSION); 38MODULE_VERSION(IOAT_DMA_VERSION);
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("Dual BSD/GPL");
40MODULE_AUTHOR("Intel Corporation"); 40MODULE_AUTHOR("Intel Corporation");
41 41
42static struct pci_device_id ioat_pci_tbl[] = { 42static struct pci_device_id ioat_pci_tbl[] = {