aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma_v2.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:55 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:55 -0400
commitbf40a6869c9198bdf56fe173961feb89e9f0d961 (patch)
tree3d1b6bf44647857997113fe1b036fb46e360d8a7 /drivers/dma/ioat/dma_v2.c
parent2aec048cdc4a5a81163a42a61df903f76a27e737 (diff)
ioat3: split ioat3 support to its own file, add memset
Up until this point the driver for Intel(R) QuickData Technology engines, specification versions 2 and 3, were mostly identical save for a few quirks. Version 3.2 hardware adds many new capabilities (like raid offload support) requiring some infrastructure that is not relevant for v2. For better code organization of the new funcionality move v3 and v3.2 support to its own file dma_v3.c, and export some routines from the base files (dma.c and dma_v2.c) that can be reused directly. The first new capability included in this code reorganization is support for v3.2 memset operations. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma_v2.c')
-rw-r--r--drivers/dma/ioat/dma_v2.c94
1 files changed, 23 insertions, 71 deletions
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 568923c5ddec..7492e9165e08 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -39,7 +39,7 @@
39#include "registers.h" 39#include "registers.h"
40#include "hw.h" 40#include "hw.h"
41 41
42static int ioat_ring_alloc_order = 8; 42int ioat_ring_alloc_order = 8;
43module_param(ioat_ring_alloc_order, int, 0644); 43module_param(ioat_ring_alloc_order, int, 0644);
44MODULE_PARM_DESC(ioat_ring_alloc_order, 44MODULE_PARM_DESC(ioat_ring_alloc_order,
45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); 45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
@@ -63,7 +63,7 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
63 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); 63 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
64} 64}
65 65
66static void ioat2_issue_pending(struct dma_chan *chan) 66void ioat2_issue_pending(struct dma_chan *chan)
67{ 67{
68 struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); 68 struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
69 69
@@ -214,7 +214,7 @@ static void ioat2_cleanup_tasklet(unsigned long data)
214 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 214 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
215} 215}
216 216
217static void __restart_chan(struct ioat2_dma_chan *ioat) 217void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
218{ 218{
219 struct ioat_chan_common *chan = &ioat->base; 219 struct ioat_chan_common *chan = &ioat->base;
220 220
@@ -255,11 +255,9 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
255 if (ioat_cleanup_preamble(chan, &phys_complete)) 255 if (ioat_cleanup_preamble(chan, &phys_complete))
256 __cleanup(ioat, phys_complete); 256 __cleanup(ioat, phys_complete);
257 257
258 __restart_chan(ioat); 258 __ioat2_restart_chan(ioat);
259} 259}
260 260
261static bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
262
263static void ioat2_timer_event(unsigned long data) 261static void ioat2_timer_event(unsigned long data)
264{ 262{
265 struct ioat2_dma_chan *ioat = (void *) data; 263 struct ioat2_dma_chan *ioat = (void *) data;
@@ -321,7 +319,7 @@ static void ioat2_timer_event(unsigned long data)
321 * ioat2_enumerate_channels - find and initialize the device's channels 319 * ioat2_enumerate_channels - find and initialize the device's channels
322 * @device: the device to be enumerated 320 * @device: the device to be enumerated
323 */ 321 */
324static int ioat2_enumerate_channels(struct ioatdma_device *device) 322int ioat2_enumerate_channels(struct ioatdma_device *device)
325{ 323{
326 struct ioat2_dma_chan *ioat; 324 struct ioat2_dma_chan *ioat;
327 struct device *dev = &device->pdev->dev; 325 struct device *dev = &device->pdev->dev;
@@ -354,8 +352,8 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device)
354 break; 352 break;
355 353
356 ioat_init_channel(device, &ioat->base, i, 354 ioat_init_channel(device, &ioat->base, i,
357 ioat2_timer_event, 355 device->timer_fn,
358 ioat2_cleanup_tasklet, 356 device->cleanup_tasklet,
359 (unsigned long) ioat); 357 (unsigned long) ioat);
360 ioat->xfercap_log = xfercap_log; 358 ioat->xfercap_log = xfercap_log;
361 spin_lock_init(&ioat->ring_lock); 359 spin_lock_init(&ioat->ring_lock);
@@ -460,7 +458,7 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf
460/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring 458/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
461 * @chan: channel to be initialized 459 * @chan: channel to be initialized
462 */ 460 */
463static int ioat2_alloc_chan_resources(struct dma_chan *c) 461int ioat2_alloc_chan_resources(struct dma_chan *c)
464{ 462{
465 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 463 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
466 struct ioat_chan_common *chan = &ioat->base; 464 struct ioat_chan_common *chan = &ioat->base;
@@ -514,7 +512,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c)
514 return 1 << ioat->alloc_order; 512 return 1 << ioat->alloc_order;
515} 513}
516 514
517static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) 515bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
518{ 516{
519 /* reshape differs from normal ring allocation in that we want 517 /* reshape differs from normal ring allocation in that we want
520 * to allocate a new software ring while only 518 * to allocate a new software ring while only
@@ -627,7 +625,7 @@ static bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
627 * @ioat: ioat2,3 channel (ring) to operate on 625 * @ioat: ioat2,3 channel (ring) to operate on
628 * @num_descs: allocation length 626 * @num_descs: allocation length
629 */ 627 */
630static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) 628int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
631{ 629{
632 struct ioat_chan_common *chan = &ioat->base; 630 struct ioat_chan_common *chan = &ioat->base;
633 631
@@ -655,9 +653,11 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d
655 spin_lock_bh(&chan->cleanup_lock); 653 spin_lock_bh(&chan->cleanup_lock);
656 if (jiffies > chan->timer.expires && 654 if (jiffies > chan->timer.expires &&
657 timer_pending(&chan->timer)) { 655 timer_pending(&chan->timer)) {
656 struct ioatdma_device *device = chan->device;
657
658 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 658 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
659 spin_unlock_bh(&chan->cleanup_lock); 659 spin_unlock_bh(&chan->cleanup_lock);
660 ioat2_timer_event((unsigned long) ioat); 660 device->timer_fn((unsigned long) ioat);
661 } else 661 } else
662 spin_unlock_bh(&chan->cleanup_lock); 662 spin_unlock_bh(&chan->cleanup_lock);
663 return -ENOMEM; 663 return -ENOMEM;
@@ -670,7 +670,7 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d
670 return 0; /* with ioat->ring_lock held */ 670 return 0; /* with ioat->ring_lock held */
671} 671}
672 672
673static struct dma_async_tx_descriptor * 673struct dma_async_tx_descriptor *
674ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, 674ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
675 dma_addr_t dma_src, size_t len, unsigned long flags) 675 dma_addr_t dma_src, size_t len, unsigned long flags)
676{ 676{
@@ -722,11 +722,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
722 * ioat2_free_chan_resources - release all the descriptors 722 * ioat2_free_chan_resources - release all the descriptors
723 * @chan: the channel to be cleaned 723 * @chan: the channel to be cleaned
724 */ 724 */
725static void ioat2_free_chan_resources(struct dma_chan *c) 725void ioat2_free_chan_resources(struct dma_chan *c)
726{ 726{
727 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 727 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
728 struct ioat_chan_common *chan = &ioat->base; 728 struct ioat_chan_common *chan = &ioat->base;
729 struct ioatdma_device *ioatdma_device = chan->device; 729 struct ioatdma_device *device = chan->device;
730 struct ioat_ring_ent *desc; 730 struct ioat_ring_ent *desc;
731 const u16 total_descs = 1 << ioat->alloc_order; 731 const u16 total_descs = 1 << ioat->alloc_order;
732 int descs; 732 int descs;
@@ -740,7 +740,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
740 740
741 tasklet_disable(&chan->cleanup_task); 741 tasklet_disable(&chan->cleanup_task);
742 del_timer_sync(&chan->timer); 742 del_timer_sync(&chan->timer);
743 ioat2_cleanup(ioat); 743 device->cleanup_tasklet((unsigned long) ioat);
744 744
745 /* Delay 100ms after reset to allow internal DMA logic to quiesce 745 /* Delay 100ms after reset to allow internal DMA logic to quiesce
746 * before removing DMA descriptor resources. 746 * before removing DMA descriptor resources.
@@ -770,8 +770,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
770 kfree(ioat->ring); 770 kfree(ioat->ring);
771 ioat->ring = NULL; 771 ioat->ring = NULL;
772 ioat->alloc_order = 0; 772 ioat->alloc_order = 0;
773 pci_pool_free(ioatdma_device->completion_pool, 773 pci_pool_free(device->completion_pool, chan->completion,
774 chan->completion,
775 chan->completion_dma); 774 chan->completion_dma);
776 spin_unlock_bh(&ioat->ring_lock); 775 spin_unlock_bh(&ioat->ring_lock);
777 776
@@ -781,16 +780,17 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
781 ioat->dmacount = 0; 780 ioat->dmacount = 0;
782} 781}
783 782
784static enum dma_status 783enum dma_status
785ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, 784ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
786 dma_cookie_t *done, dma_cookie_t *used) 785 dma_cookie_t *done, dma_cookie_t *used)
787{ 786{
788 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 787 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
788 struct ioatdma_device *device = ioat->base.device;
789 789
790 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 790 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
791 return DMA_SUCCESS; 791 return DMA_SUCCESS;
792 792
793 ioat2_cleanup(ioat); 793 device->cleanup_tasklet((unsigned long) ioat);
794 794
795 return ioat_is_complete(c, cookie, done, used); 795 return ioat_is_complete(c, cookie, done, used);
796} 796}
@@ -804,6 +804,8 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
804 int err; 804 int err;
805 805
806 device->enumerate_channels = ioat2_enumerate_channels; 806 device->enumerate_channels = ioat2_enumerate_channels;
807 device->cleanup_tasklet = ioat2_cleanup_tasklet;
808 device->timer_fn = ioat2_timer_event;
807 dma = &device->common; 809 dma = &device->common;
808 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 810 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
809 dma->device_issue_pending = ioat2_issue_pending; 811 dma->device_issue_pending = ioat2_issue_pending;
@@ -830,53 +832,3 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
830 832
831 return err; 833 return err;
832} 834}
833
834int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
835{
836 struct pci_dev *pdev = device->pdev;
837 struct dma_device *dma;
838 struct dma_chan *c;
839 struct ioat_chan_common *chan;
840 int err;
841 u16 dev_id;
842
843 device->enumerate_channels = ioat2_enumerate_channels;
844 dma = &device->common;
845 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
846 dma->device_issue_pending = ioat2_issue_pending;
847 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
848 dma->device_free_chan_resources = ioat2_free_chan_resources;
849 dma->device_is_tx_complete = ioat2_is_complete;
850
851 /* -= IOAT ver.3 workarounds =- */
852 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
853 * that can cause stability issues for IOAT ver.3
854 */
855 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
856
857 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
858 * (workaround for spurious config parity error after restart)
859 */
860 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
861 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
862 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
863
864 err = ioat_probe(device);
865 if (err)
866 return err;
867 ioat_set_tcp_copy_break(262144);
868
869 list_for_each_entry(c, &dma->channels, device_node) {
870 chan = to_chan_common(c);
871 writel(IOAT_DMA_DCA_ANY_CPU,
872 chan->reg_base + IOAT_DCACTRL_OFFSET);
873 }
874
875 err = ioat_register(device);
876 if (err)
877 return err;
878 if (dca)
879 device->dca = ioat3_dca_init(pdev, device->reg_base);
880
881 return err;
882}