aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/ioat.c196
-rw-r--r--drivers/dma/ioat_dma.c180
-rw-r--r--drivers/dma/ioatdma.h20
-rw-r--r--drivers/dma/ioatdma_hw.h2
5 files changed, 254 insertions, 146 deletions
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 77bee99df02a..cec0c9c5df65 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
2obj-$(CONFIG_NET_DMA) += iovlock.o 2obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
4ioatdma-objs := ioat_dma.o 4ioatdma-objs := ioat.o ioat_dma.o
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
new file mode 100644
index 000000000000..ae5817bfc015
--- /dev/null
+++ b/drivers/dma/ioat.c
@@ -0,0 +1,196 @@
1/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include "ioatdma.h"
33#include "ioatdma_registers.h"
34#include "ioatdma_hw.h"
35
36MODULE_VERSION("1.24");
37MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Intel Corporation");
39
40static struct pci_device_id ioat_pci_tbl[] = {
41 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
42 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
43 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
44 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
45 { 0, }
46};
47
48struct ioat_device {
49 struct pci_dev *pdev;
50 void __iomem *iobase;
51 struct ioatdma_device *dma;
52};
53
54static int __devinit ioat_probe(struct pci_dev *pdev,
55 const struct pci_device_id *id);
56#ifdef IOAT_DMA_REMOVE
57static void __devexit ioat_remove(struct pci_dev *pdev);
58#endif
59
60static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
61{
62 struct ioat_device *device = pci_get_drvdata(pdev);
63 u8 version;
64 int err = 0;
65
66 version = readb(iobase + IOAT_VER_OFFSET);
67 switch (version) {
68 case IOAT_VER_1_2:
69 device->dma = ioat_dma_probe(pdev, iobase);
70 break;
71 default:
72 err = -ENODEV;
73 break;
74 }
75 return err;
76}
77
78static void ioat_shutdown_functionality(struct pci_dev *pdev)
79{
80 struct ioat_device *device = pci_get_drvdata(pdev);
81
82 if (device->dma) {
83 ioat_dma_remove(device->dma);
84 device->dma = NULL;
85 }
86}
87
88static struct pci_driver ioat_pci_drv = {
89 .name = "ioatdma",
90 .id_table = ioat_pci_tbl,
91 .probe = ioat_probe,
92 .shutdown = ioat_shutdown_functionality,
93#ifdef IOAT_DMA_REMOVE
94 .remove = __devexit_p(ioat_remove),
95#endif
96};
97
98static int __devinit ioat_probe(struct pci_dev *pdev,
99 const struct pci_device_id *id)
100{
101 void __iomem *iobase;
102 struct ioat_device *device;
103 unsigned long mmio_start, mmio_len;
104 int err;
105
106 err = pci_enable_device(pdev);
107 if (err)
108 goto err_enable_device;
109
110 err = pci_request_regions(pdev, ioat_pci_drv.name);
111 if (err)
112 goto err_request_regions;
113
114 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
115 if (err)
116 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
117 if (err)
118 goto err_set_dma_mask;
119
120 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
121 if (err)
122 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
123 if (err)
124 goto err_set_dma_mask;
125
126 mmio_start = pci_resource_start(pdev, 0);
127 mmio_len = pci_resource_len(pdev, 0);
128 iobase = ioremap(mmio_start, mmio_len);
129 if (!iobase) {
130 err = -ENOMEM;
131 goto err_ioremap;
132 }
133
134 device = kzalloc(sizeof(*device), GFP_KERNEL);
135 if (!device) {
136 err = -ENOMEM;
137 goto err_kzalloc;
138 }
139 device->pdev = pdev;
140 pci_set_drvdata(pdev, device);
141 device->iobase = iobase;
142
143 pci_set_master(pdev);
144
145 err = ioat_setup_functionality(pdev, iobase);
146 if (err)
147 goto err_version;
148
149 return 0;
150
151err_version:
152 kfree(device);
153err_kzalloc:
154 iounmap(iobase);
155err_ioremap:
156err_set_dma_mask:
157 pci_release_regions(pdev);
158 pci_disable_device(pdev);
159err_request_regions:
160err_enable_device:
161 return err;
162}
163
164#ifdef IOAT_DMA_REMOVE
165/*
166 * It is unsafe to remove this module: if removed while a requested
167 * dma is outstanding, esp. from tcp, it is possible to hang while
168 * waiting for something that will never finish, thus hanging at
169 * least one cpu. However, if you're feeling lucky and need to do
170 * some testing, this usually works just fine.
171 */
172static void __devexit ioat_remove(struct pci_dev *pdev)
173{
174 struct ioat_device *device = pci_get_drvdata(pdev);
175
176 ioat_shutdown_functionality(pdev);
177
178 kfree(device);
179
180 iounmap(device->iobase);
181 pci_release_regions(pdev);
182 pci_disable_device(pdev);
183}
184#endif
185
186static int __init ioat_init_module(void)
187{
188 return pci_register_driver(&ioat_pci_drv);
189}
190module_init(ioat_init_module);
191
192static void __exit ioat_exit_module(void)
193{
194 pci_unregister_driver(&ioat_pci_drv);
195}
196module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 2db05f614843..eef83ea291a3 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -39,19 +39,15 @@
39#define INITIAL_IOAT_DESC_COUNT 128 39#define INITIAL_IOAT_DESC_COUNT 128
40 40
41#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) 41#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42#define to_ioat_device(dev) container_of(dev, struct ioat_device, common) 42#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
43#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 43#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 44#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45 45
46/* internal functions */ 46/* internal functions */
47static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 47static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 48static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
49static int __devinit ioat_probe(struct pci_dev *pdev,
50 const struct pci_device_id *ent);
51static void ioat_shutdown(struct pci_dev *pdev);
52static void __devexit ioat_remove(struct pci_dev *pdev);
53 49
54static int ioat_dma_enumerate_channels(struct ioat_device *device) 50static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
55{ 51{
56 u8 xfercap_scale; 52 u8 xfercap_scale;
57 u32 xfercap; 53 u32 xfercap;
@@ -158,17 +154,17 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
158{ 154{
159 struct ioat_dma_descriptor *desc; 155 struct ioat_dma_descriptor *desc;
160 struct ioat_desc_sw *desc_sw; 156 struct ioat_desc_sw *desc_sw;
161 struct ioat_device *ioat_device; 157 struct ioatdma_device *ioatdma_device;
162 dma_addr_t phys; 158 dma_addr_t phys;
163 159
164 ioat_device = to_ioat_device(ioat_chan->common.device); 160 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
165 desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys); 161 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
166 if (unlikely(!desc)) 162 if (unlikely(!desc))
167 return NULL; 163 return NULL;
168 164
169 desc_sw = kzalloc(sizeof(*desc_sw), flags); 165 desc_sw = kzalloc(sizeof(*desc_sw), flags);
170 if (unlikely(!desc_sw)) { 166 if (unlikely(!desc_sw)) {
171 pci_pool_free(ioat_device->dma_pool, desc, phys); 167 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
172 return NULL; 168 return NULL;
173 } 169 }
174 170
@@ -245,9 +241,8 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
245static void ioat_dma_free_chan_resources(struct dma_chan *chan) 241static void ioat_dma_free_chan_resources(struct dma_chan *chan)
246{ 242{
247 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 243 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
248 struct ioat_device *ioat_device = to_ioat_device(chan->device); 244 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
249 struct ioat_desc_sw *desc, *_desc; 245 struct ioat_desc_sw *desc, *_desc;
250 u16 chanctrl;
251 int in_use_descs = 0; 246 int in_use_descs = 0;
252 247
253 ioat_dma_memcpy_cleanup(ioat_chan); 248 ioat_dma_memcpy_cleanup(ioat_chan);
@@ -258,19 +253,19 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
258 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { 253 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
259 in_use_descs++; 254 in_use_descs++;
260 list_del(&desc->node); 255 list_del(&desc->node);
261 pci_pool_free(ioat_device->dma_pool, desc->hw, 256 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
262 desc->async_tx.phys); 257 desc->async_tx.phys);
263 kfree(desc); 258 kfree(desc);
264 } 259 }
265 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { 260 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
266 list_del(&desc->node); 261 list_del(&desc->node);
267 pci_pool_free(ioat_device->dma_pool, desc->hw, 262 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
268 desc->async_tx.phys); 263 desc->async_tx.phys);
269 kfree(desc); 264 kfree(desc);
270 } 265 }
271 spin_unlock_bh(&ioat_chan->desc_lock); 266 spin_unlock_bh(&ioat_chan->desc_lock);
272 267
273 pci_pool_free(ioat_device->completion_pool, 268 pci_pool_free(ioatdma_device->completion_pool,
274 ioat_chan->completion_virt, 269 ioat_chan->completion_virt,
275 ioat_chan->completion_addr); 270 ioat_chan->completion_addr);
276 271
@@ -514,25 +509,9 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
514 509
515/* PCI API */ 510/* PCI API */
516 511
517static struct pci_device_id ioat_pci_tbl[] = {
518 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
519 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
520 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
521 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
522 { 0, }
523};
524
525static struct pci_driver ioat_pci_driver = {
526 .name = "ioatdma",
527 .id_table = ioat_pci_tbl,
528 .probe = ioat_probe,
529 .shutdown = ioat_shutdown,
530 .remove = __devexit_p(ioat_remove),
531};
532
533static irqreturn_t ioat_do_interrupt(int irq, void *data) 512static irqreturn_t ioat_do_interrupt(int irq, void *data)
534{ 513{
535 struct ioat_device *instance = data; 514 struct ioatdma_device *instance = data;
536 unsigned long attnstatus; 515 unsigned long attnstatus;
537 u8 intrctrl; 516 u8 intrctrl;
538 517
@@ -592,7 +571,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
592 */ 571 */
593#define IOAT_TEST_SIZE 2000 572#define IOAT_TEST_SIZE 2000
594 573
595static int ioat_self_test(struct ioat_device *device) 574static int ioat_self_test(struct ioatdma_device *device)
596{ 575{
597 int i; 576 int i;
598 u8 *src; 577 u8 *src;
@@ -660,46 +639,25 @@ out:
660 return err; 639 return err;
661} 640}
662 641
663static int __devinit ioat_probe(struct pci_dev *pdev, 642struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
664 const struct pci_device_id *ent) 643 void __iomem *iobase)
665{ 644{
666 int err; 645 int err;
667 unsigned long mmio_start, mmio_len; 646 struct ioatdma_device *device;
668 void __iomem *reg_base;
669 struct ioat_device *device;
670
671 err = pci_enable_device(pdev);
672 if (err)
673 goto err_enable_device;
674
675 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
676 if (err)
677 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
678 if (err)
679 goto err_set_dma_mask;
680
681 err = pci_request_regions(pdev, ioat_pci_driver.name);
682 if (err)
683 goto err_request_regions;
684
685 mmio_start = pci_resource_start(pdev, 0);
686 mmio_len = pci_resource_len(pdev, 0);
687
688 reg_base = ioremap(mmio_start, mmio_len);
689 if (!reg_base) {
690 err = -ENOMEM;
691 goto err_ioremap;
692 }
693 647
694 device = kzalloc(sizeof(*device), GFP_KERNEL); 648 device = kzalloc(sizeof(*device), GFP_KERNEL);
695 if (!device) { 649 if (!device) {
696 err = -ENOMEM; 650 err = -ENOMEM;
697 goto err_kzalloc; 651 goto err_kzalloc;
698 } 652 }
653 device->pdev = pdev;
654 device->reg_base = iobase;
655 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
699 656
700 /* DMA coherent memory pool for DMA descriptor allocations */ 657 /* DMA coherent memory pool for DMA descriptor allocations */
701 device->dma_pool = pci_pool_create("dma_desc_pool", pdev, 658 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
702 sizeof(struct ioat_dma_descriptor), 64, 0); 659 sizeof(struct ioat_dma_descriptor),
660 64, 0);
703 if (!device->dma_pool) { 661 if (!device->dma_pool) {
704 err = -ENOMEM; 662 err = -ENOMEM;
705 goto err_dma_pool; 663 goto err_dma_pool;
@@ -713,26 +671,6 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
713 goto err_completion_pool; 671 goto err_completion_pool;
714 } 672 }
715 673
716 device->pdev = pdev;
717 pci_set_drvdata(pdev, device);
718#ifdef CONFIG_PCI_MSI
719 if (pci_enable_msi(pdev) == 0) {
720 device->msi = 1;
721 } else {
722 device->msi = 0;
723 }
724#endif
725 err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
726 device);
727 if (err)
728 goto err_irq;
729
730 device->reg_base = reg_base;
731
732 writeb(IOAT_INTRCTRL_MASTER_INT_EN,
733 device->reg_base + IOAT_INTRCTRL_OFFSET);
734 pci_set_master(pdev);
735
736 INIT_LIST_HEAD(&device->common.channels); 674 INIT_LIST_HEAD(&device->common.channels);
737 ioat_dma_enumerate_channels(device); 675 ioat_dma_enumerate_channels(device);
738 676
@@ -746,9 +684,19 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
746 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; 684 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
747 device->common.device_dependency_added = ioat_dma_dependency_added; 685 device->common.device_dependency_added = ioat_dma_dependency_added;
748 device->common.dev = &pdev->dev; 686 device->common.dev = &pdev->dev;
749 printk(KERN_INFO 687 printk(KERN_INFO "ioatdma: Intel(R) I/OAT DMA Engine found,"
750 "ioatdma: Intel(R) I/OAT DMA Engine found, %d channels\n", 688 " %d channels, device version 0x%02x\n",
751 device->common.chancnt); 689 device->common.chancnt, device->version);
690
691 pci_set_drvdata(pdev, device);
692 err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
693 device);
694 if (err)
695 goto err_irq;
696
697 writeb(IOAT_INTRCTRL_MASTER_INT_EN,
698 device->reg_base + IOAT_INTRCTRL_OFFSET);
699 pci_set_master(pdev);
752 700
753 err = ioat_self_test(device); 701 err = ioat_self_test(device);
754 if (err) 702 if (err)
@@ -756,9 +704,10 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
756 704
757 dma_async_device_register(&device->common); 705 dma_async_device_register(&device->common);
758 706
759 return 0; 707 return device;
760 708
761err_self_test: 709err_self_test:
710 free_irq(device->pdev->irq, device);
762err_irq: 711err_irq:
763 pci_pool_destroy(device->completion_pool); 712 pci_pool_destroy(device->completion_pool);
764err_completion_pool: 713err_completion_pool:
@@ -766,47 +715,24 @@ err_completion_pool:
766err_dma_pool: 715err_dma_pool:
767 kfree(device); 716 kfree(device);
768err_kzalloc: 717err_kzalloc:
769 iounmap(reg_base); 718 iounmap(iobase);
770err_ioremap: 719 printk(KERN_ERR
771 pci_release_regions(pdev); 720 "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
772err_request_regions: 721 return NULL;
773err_set_dma_mask:
774 pci_disable_device(pdev);
775err_enable_device:
776
777 printk(KERN_INFO
778 "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
779
780 return err;
781}
782
783static void ioat_shutdown(struct pci_dev *pdev)
784{
785 struct ioat_device *device;
786 device = pci_get_drvdata(pdev);
787
788 dma_async_device_unregister(&device->common);
789} 722}
790 723
791static void __devexit ioat_remove(struct pci_dev *pdev) 724void ioat_dma_remove(struct ioatdma_device *device)
792{ 725{
793 struct ioat_device *device;
794 struct dma_chan *chan, *_chan; 726 struct dma_chan *chan, *_chan;
795 struct ioat_dma_chan *ioat_chan; 727 struct ioat_dma_chan *ioat_chan;
796 728
797 device = pci_get_drvdata(pdev);
798 dma_async_device_unregister(&device->common); 729 dma_async_device_unregister(&device->common);
799 730
800 free_irq(device->pdev->irq, device); 731 free_irq(device->pdev->irq, device);
801#ifdef CONFIG_PCI_MSI 732
802 if (device->msi)
803 pci_disable_msi(device->pdev);
804#endif
805 pci_pool_destroy(device->dma_pool); 733 pci_pool_destroy(device->dma_pool);
806 pci_pool_destroy(device->completion_pool); 734 pci_pool_destroy(device->completion_pool);
807 iounmap(device->reg_base); 735
808 pci_release_regions(pdev);
809 pci_disable_device(pdev);
810 list_for_each_entry_safe(chan, _chan, 736 list_for_each_entry_safe(chan, _chan,
811 &device->common.channels, device_node) { 737 &device->common.channels, device_node) {
812 ioat_chan = to_ioat_chan(chan); 738 ioat_chan = to_ioat_chan(chan);
@@ -816,25 +742,3 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
816 kfree(device); 742 kfree(device);
817} 743}
818 744
819/* MODULE API */
820MODULE_VERSION("1.9");
821MODULE_LICENSE("GPL");
822MODULE_AUTHOR("Intel Corporation");
823
824static int __init ioat_init_module(void)
825{
826 /* it's currently unsafe to unload this module */
827 /* if forced, worst case is that rmmod hangs */
828 __unsafe(THIS_MODULE);
829
830 return pci_register_driver(&ioat_pci_driver);
831}
832
833module_init(ioat_init_module);
834
835static void __exit ioat_exit_module(void)
836{
837 pci_unregister_driver(&ioat_pci_driver);
838}
839
840module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index bf4dad70e0f5..0b8ffbde1e61 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -31,22 +31,21 @@
31#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 31#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
32 32
33/** 33/**
34 * struct ioat_device - internal representation of a IOAT device 34 * struct ioatdma_device - internal representation of a IOAT device
35 * @pdev: PCI-Express device 35 * @pdev: PCI-Express device
36 * @reg_base: MMIO register space base address 36 * @reg_base: MMIO register space base address
37 * @dma_pool: for allocating DMA descriptors 37 * @dma_pool: for allocating DMA descriptors
38 * @common: embedded struct dma_device 38 * @common: embedded struct dma_device
39 * @msi: Message Signaled Interrupt number 39 * @version: version of ioatdma device
40 */ 40 */
41 41
42struct ioat_device { 42struct ioatdma_device {
43 struct pci_dev *pdev; 43 struct pci_dev *pdev;
44 void __iomem *reg_base; 44 void __iomem *reg_base;
45 struct pci_pool *dma_pool; 45 struct pci_pool *dma_pool;
46 struct pci_pool *completion_pool; 46 struct pci_pool *completion_pool;
47
48 struct dma_device common; 47 struct dma_device common;
49 u8 msi; 48 u8 version;
50}; 49};
51 50
52/** 51/**
@@ -84,7 +83,7 @@ struct ioat_dma_chan {
84 83
85 int pending; 84 int pending;
86 85
87 struct ioat_device *device; 86 struct ioatdma_device *device;
88 struct dma_chan common; 87 struct dma_chan common;
89 88
90 dma_addr_t completion_addr; 89 dma_addr_t completion_addr;
@@ -117,4 +116,13 @@ struct ioat_desc_sw {
117 struct dma_async_tx_descriptor async_tx; 116 struct dma_async_tx_descriptor async_tx;
118}; 117};
119 118
119#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
120struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
121 void __iomem *iobase);
122void ioat_dma_remove(struct ioatdma_device *device);
123#else
124#define ioat_dma_probe(pdev, iobase) NULL
125#define ioat_dma_remove(device) do { } while (0)
126#endif
127
120#endif /* IOATDMA_H */ 128#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index 4d7a12880be3..9e7434e1551f 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -27,7 +27,7 @@
27#define IOAT_PCI_RID 0x00 27#define IOAT_PCI_RID 0x00
28#define IOAT_PCI_SVID 0x8086 28#define IOAT_PCI_SVID 0x8086
29#define IOAT_PCI_SID 0x8086 29#define IOAT_PCI_SID 0x8086
30#define IOAT_VER 0x12 /* Version 1.2 */ 30#define IOAT_VER_1_2 0x12 /* Version 1.2 */
31 31
32struct ioat_dma_descriptor { 32struct ioat_dma_descriptor {
33 uint32_t size; 33 uint32_t size;