aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:29:44 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:29:44 -0400
commite6c0b69a43150c1a37cf342ce5faedf12583bf79 (patch)
tree955456982fea62d6557ad5992f19ee3e73e64bc2 /drivers/dma
parent1f27adc2f050836c12deb4d99afe507636537a0b (diff)
ioat: convert ioat_probe to pcim/devm
The driver currently duplicates much of what these routines offer, so just use the common code. For example ->irq_mode tracks what interrupt mode was initialized, which duplicates the ->msix_enabled and ->msi_enabled handling in pcim_release. This also adds a check to the return value of dma_async_device_register, which can fail. Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ioat/dma.c130
-rw-r--r--drivers/dma/ioat/dma.h11
-rw-r--r--drivers/dma/ioat/hw.h1
-rw-r--r--drivers/dma/ioat/pci.c67
4 files changed, 68 insertions, 141 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 16c080786a65..65f8b7492a4d 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -121,6 +121,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
121 u32 xfercap; 121 u32 xfercap;
122 int i; 122 int i;
123 struct ioat_dma_chan *ioat_chan; 123 struct ioat_dma_chan *ioat_chan;
124 struct device *dev = &device->pdev->dev;
124 125
125 /* 126 /*
126 * IOAT ver.3 workarounds 127 * IOAT ver.3 workarounds
@@ -164,7 +165,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
164 } 165 }
165#endif 166#endif
166 for (i = 0; i < device->common.chancnt; i++) { 167 for (i = 0; i < device->common.chancnt; i++) {
167 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); 168 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
168 if (!ioat_chan) { 169 if (!ioat_chan) {
169 device->common.chancnt = i; 170 device->common.chancnt = i;
170 break; 171 break;
@@ -1450,7 +1451,11 @@ MODULE_PARM_DESC(ioat_interrupt_style,
1450static int ioat_dma_setup_interrupts(struct ioatdma_device *device) 1451static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1451{ 1452{
1452 struct ioat_dma_chan *ioat_chan; 1453 struct ioat_dma_chan *ioat_chan;
1453 int err, i, j, msixcnt; 1454 struct pci_dev *pdev = device->pdev;
1455 struct device *dev = &pdev->dev;
1456 struct msix_entry *msix;
1457 int i, j, msixcnt;
1458 int err = -EINVAL;
1454 u8 intrctrl = 0; 1459 u8 intrctrl = 0;
1455 1460
1456 if (!strcmp(ioat_interrupt_style, "msix")) 1461 if (!strcmp(ioat_interrupt_style, "msix"))
@@ -1461,8 +1466,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1461 goto msi; 1466 goto msi;
1462 if (!strcmp(ioat_interrupt_style, "intx")) 1467 if (!strcmp(ioat_interrupt_style, "intx"))
1463 goto intx; 1468 goto intx;
1464 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", 1469 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
1465 ioat_interrupt_style);
1466 goto err_no_irq; 1470 goto err_no_irq;
1467 1471
1468msix: 1472msix:
@@ -1471,55 +1475,55 @@ msix:
1471 for (i = 0; i < msixcnt; i++) 1475 for (i = 0; i < msixcnt; i++)
1472 device->msix_entries[i].entry = i; 1476 device->msix_entries[i].entry = i;
1473 1477
1474 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); 1478 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1475 if (err < 0) 1479 if (err < 0)
1476 goto msi; 1480 goto msi;
1477 if (err > 0) 1481 if (err > 0)
1478 goto msix_single_vector; 1482 goto msix_single_vector;
1479 1483
1480 for (i = 0; i < msixcnt; i++) { 1484 for (i = 0; i < msixcnt; i++) {
1485 msix = &device->msix_entries[i];
1481 ioat_chan = ioat_lookup_chan_by_index(device, i); 1486 ioat_chan = ioat_lookup_chan_by_index(device, i);
1482 err = request_irq(device->msix_entries[i].vector, 1487 err = devm_request_irq(dev, msix->vector,
1483 ioat_dma_do_interrupt_msix, 1488 ioat_dma_do_interrupt_msix, 0,
1484 0, "ioat-msix", ioat_chan); 1489 "ioat-msix", ioat_chan);
1485 if (err) { 1490 if (err) {
1486 for (j = 0; j < i; j++) { 1491 for (j = 0; j < i; j++) {
1492 msix = &device->msix_entries[j];
1487 ioat_chan = 1493 ioat_chan =
1488 ioat_lookup_chan_by_index(device, j); 1494 ioat_lookup_chan_by_index(device, j);
1489 free_irq(device->msix_entries[j].vector, 1495 devm_free_irq(dev, msix->vector, ioat_chan);
1490 ioat_chan);
1491 } 1496 }
1492 goto msix_single_vector; 1497 goto msix_single_vector;
1493 } 1498 }
1494 } 1499 }
1495 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 1500 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1496 device->irq_mode = msix_multi_vector;
1497 goto done; 1501 goto done;
1498 1502
1499msix_single_vector: 1503msix_single_vector:
1500 device->msix_entries[0].entry = 0; 1504 msix = &device->msix_entries[0];
1501 err = pci_enable_msix(device->pdev, device->msix_entries, 1); 1505 msix->entry = 0;
1506 err = pci_enable_msix(pdev, device->msix_entries, 1);
1502 if (err) 1507 if (err)
1503 goto msi; 1508 goto msi;
1504 1509
1505 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, 1510 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1506 0, "ioat-msix", device); 1511 "ioat-msix", device);
1507 if (err) { 1512 if (err) {
1508 pci_disable_msix(device->pdev); 1513 pci_disable_msix(pdev);
1509 goto msi; 1514 goto msi;
1510 } 1515 }
1511 device->irq_mode = msix_single_vector;
1512 goto done; 1516 goto done;
1513 1517
1514msi: 1518msi:
1515 err = pci_enable_msi(device->pdev); 1519 err = pci_enable_msi(pdev);
1516 if (err) 1520 if (err)
1517 goto intx; 1521 goto intx;
1518 1522
1519 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, 1523 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1520 0, "ioat-msi", device); 1524 "ioat-msi", device);
1521 if (err) { 1525 if (err) {
1522 pci_disable_msi(device->pdev); 1526 pci_disable_msi(pdev);
1523 goto intx; 1527 goto intx;
1524 } 1528 }
1525 /* 1529 /*
@@ -1527,21 +1531,17 @@ msi:
1527 */ 1531 */
1528 if (device->version == IOAT_VER_1_2) { 1532 if (device->version == IOAT_VER_1_2) {
1529 u32 dmactrl; 1533 u32 dmactrl;
1530 pci_read_config_dword(device->pdev, 1534 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1531 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1532 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; 1535 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1533 pci_write_config_dword(device->pdev, 1536 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1534 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1535 } 1537 }
1536 device->irq_mode = msi;
1537 goto done; 1538 goto done;
1538 1539
1539intx: 1540intx:
1540 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, 1541 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1541 IRQF_SHARED, "ioat-intx", device); 1542 IRQF_SHARED, "ioat-intx", device);
1542 if (err) 1543 if (err)
1543 goto err_no_irq; 1544 goto err_no_irq;
1544 device->irq_mode = intx;
1545 1545
1546done: 1546done:
1547 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; 1547 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
@@ -1551,60 +1551,26 @@ done:
1551err_no_irq: 1551err_no_irq:
1552 /* Disable all interrupt generation */ 1552 /* Disable all interrupt generation */
1553 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1553 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1554 dev_err(&device->pdev->dev, "no usable interrupts\n"); 1554 dev_err(dev, "no usable interrupts\n");
1555 device->irq_mode = none; 1555 return err;
1556 return -1;
1557} 1556}
1558 1557
1559/** 1558static void ioat_disable_interrupts(struct ioatdma_device *device)
1560 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1561 * @device: ioat device
1562 */
1563static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1564{ 1559{
1565 struct ioat_dma_chan *ioat_chan;
1566 int i;
1567
1568 /* Disable all interrupt generation */ 1560 /* Disable all interrupt generation */
1569 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1561 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1570
1571 switch (device->irq_mode) {
1572 case msix_multi_vector:
1573 for (i = 0; i < device->common.chancnt; i++) {
1574 ioat_chan = ioat_lookup_chan_by_index(device, i);
1575 free_irq(device->msix_entries[i].vector, ioat_chan);
1576 }
1577 pci_disable_msix(device->pdev);
1578 break;
1579 case msix_single_vector:
1580 free_irq(device->msix_entries[0].vector, device);
1581 pci_disable_msix(device->pdev);
1582 break;
1583 case msi:
1584 free_irq(device->pdev->irq, device);
1585 pci_disable_msi(device->pdev);
1586 break;
1587 case intx:
1588 free_irq(device->pdev->irq, device);
1589 break;
1590 case none:
1591 dev_warn(&device->pdev->dev,
1592 "call to %s without interrupts setup\n", __func__);
1593 }
1594 device->irq_mode = none;
1595} 1562}
1596 1563
1597struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 1564struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1598 void __iomem *iobase) 1565 void __iomem *iobase)
1599{ 1566{
1600 int err; 1567 int err;
1568 struct device *dev = &pdev->dev;
1601 struct ioatdma_device *device; 1569 struct ioatdma_device *device;
1602 1570
1603 device = kzalloc(sizeof(*device), GFP_KERNEL); 1571 device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
1604 if (!device) { 1572 if (!device)
1605 err = -ENOMEM; 1573 err = -ENOMEM;
1606 goto err_kzalloc;
1607 }
1608 device->pdev = pdev; 1574 device->pdev = pdev;
1609 device->reg_base = iobase; 1575 device->reg_base = iobase;
1610 device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1576 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
@@ -1651,14 +1617,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1651 break; 1617 break;
1652 } 1618 }
1653 1619
1654 dev_err(&device->pdev->dev, 1620 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1655 "Intel(R) I/OAT DMA Engine found,"
1656 " %d channels, device version 0x%02x, driver version %s\n", 1621 " %d channels, device version 0x%02x, driver version %s\n",
1657 device->common.chancnt, device->version, IOAT_DMA_VERSION); 1622 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1658 1623
1659 if (!device->common.chancnt) { 1624 if (!device->common.chancnt) {
1660 dev_err(&device->pdev->dev, 1625 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1661 "Intel(R) I/OAT DMA Engine problem found: "
1662 "zero channels detected\n"); 1626 "zero channels detected\n");
1663 goto err_setup_interrupts; 1627 goto err_setup_interrupts;
1664 } 1628 }
@@ -1671,9 +1635,11 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1671 if (err) 1635 if (err)
1672 goto err_self_test; 1636 goto err_self_test;
1673 1637
1674 ioat_set_tcp_copy_break(device); 1638 err = dma_async_device_register(&device->common);
1639 if (err)
1640 goto err_self_test;
1675 1641
1676 dma_async_device_register(&device->common); 1642 ioat_set_tcp_copy_break(device);
1677 1643
1678 if (device->version != IOAT_VER_3_0) { 1644 if (device->version != IOAT_VER_3_0) {
1679 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); 1645 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
@@ -1684,16 +1650,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1684 return device; 1650 return device;
1685 1651
1686err_self_test: 1652err_self_test:
1687 ioat_dma_remove_interrupts(device); 1653 ioat_disable_interrupts(device);
1688err_setup_interrupts: 1654err_setup_interrupts:
1689 pci_pool_destroy(device->completion_pool); 1655 pci_pool_destroy(device->completion_pool);
1690err_completion_pool: 1656err_completion_pool:
1691 pci_pool_destroy(device->dma_pool); 1657 pci_pool_destroy(device->dma_pool);
1692err_dma_pool: 1658err_dma_pool:
1693 kfree(device);
1694err_kzalloc:
1695 dev_err(&pdev->dev,
1696 "Intel(R) I/OAT DMA Engine initialization failed\n");
1697 return NULL; 1659 return NULL;
1698} 1660}
1699 1661
@@ -1705,23 +1667,17 @@ void ioat_dma_remove(struct ioatdma_device *device)
1705 if (device->version != IOAT_VER_3_0) 1667 if (device->version != IOAT_VER_3_0)
1706 cancel_delayed_work(&device->work); 1668 cancel_delayed_work(&device->work);
1707 1669
1708 ioat_dma_remove_interrupts(device); 1670 ioat_disable_interrupts(device);
1709 1671
1710 dma_async_device_unregister(&device->common); 1672 dma_async_device_unregister(&device->common);
1711 1673
1712 pci_pool_destroy(device->dma_pool); 1674 pci_pool_destroy(device->dma_pool);
1713 pci_pool_destroy(device->completion_pool); 1675 pci_pool_destroy(device->completion_pool);
1714 1676
1715 iounmap(device->reg_base);
1716 pci_release_regions(device->pdev);
1717 pci_disable_device(device->pdev);
1718
1719 list_for_each_entry_safe(chan, _chan, 1677 list_for_each_entry_safe(chan, _chan,
1720 &device->common.channels, device_node) { 1678 &device->common.channels, device_node) {
1721 ioat_chan = to_ioat_chan(chan); 1679 ioat_chan = to_ioat_chan(chan);
1722 list_del(&chan->device_node); 1680 list_del(&chan->device_node);
1723 kfree(ioat_chan);
1724 } 1681 }
1725 kfree(device);
1726} 1682}
1727 1683
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index ccb400f5e279..5e8d7cfabc21 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -31,14 +31,6 @@
31 31
32#define IOAT_DMA_VERSION "3.64" 32#define IOAT_DMA_VERSION "3.64"
33 33
34enum ioat_interrupt {
35 none = 0,
36 msix_multi_vector = 1,
37 msix_single_vector = 2,
38 msi = 3,
39 intx = 4,
40};
41
42#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 34#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
43#define IOAT_DMA_DCA_ANY_CPU ~0 35#define IOAT_DMA_DCA_ANY_CPU ~0
44#define IOAT_WATCHDOG_PERIOD (2 * HZ) 36#define IOAT_WATCHDOG_PERIOD (2 * HZ)
@@ -59,7 +51,6 @@ enum ioat_interrupt {
59 */ 51 */
60#define NULL_DESC_BUFFER_SIZE 1 52#define NULL_DESC_BUFFER_SIZE 1
61 53
62
63/** 54/**
64 * struct ioatdma_device - internal representation of a IOAT device 55 * struct ioatdma_device - internal representation of a IOAT device
65 * @pdev: PCI-Express device 56 * @pdev: PCI-Express device
@@ -67,7 +58,6 @@ enum ioat_interrupt {
67 * @dma_pool: for allocating DMA descriptors 58 * @dma_pool: for allocating DMA descriptors
68 * @common: embedded struct dma_device 59 * @common: embedded struct dma_device
69 * @version: version of ioatdma device 60 * @version: version of ioatdma device
70 * @irq_mode: which style irq to use
71 * @msix_entries: irq handlers 61 * @msix_entries: irq handlers
72 * @idx: per channel data 62 * @idx: per channel data
73 */ 63 */
@@ -79,7 +69,6 @@ struct ioatdma_device {
79 struct pci_pool *completion_pool; 69 struct pci_pool *completion_pool;
80 struct dma_device common; 70 struct dma_device common;
81 u8 version; 71 u8 version;
82 enum ioat_interrupt irq_mode;
83 struct delayed_work work; 72 struct delayed_work work;
84 struct msix_entry msix_entries[4]; 73 struct msix_entry msix_entries[4];
85 struct ioat_dma_chan *idx[4]; 74 struct ioat_dma_chan *idx[4];
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index afa57eef86c9..1438fa5c4d1a 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -23,6 +23,7 @@
23 23
24/* PCI Configuration Space Values */ 24/* PCI Configuration Space Values */
25#define IOAT_PCI_VID 0x8086 25#define IOAT_PCI_VID 0x8086
26#define IOAT_MMIO_BAR 0
26 27
27/* CB device ID's */ 28/* CB device ID's */
28#define IOAT_PCI_DID_5000 0x1A38 29#define IOAT_PCI_DID_5000 0x1A38
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index d7948bfd8fba..982e38fd177c 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -62,7 +62,6 @@ static struct pci_device_id ioat_pci_tbl[] = {
62 62
63struct ioat_device { 63struct ioat_device {
64 struct pci_dev *pdev; 64 struct pci_dev *pdev;
65 void __iomem *iobase;
66 struct ioatdma_device *dma; 65 struct ioatdma_device *dma;
67 struct dca_provider *dca; 66 struct dca_provider *dca;
68}; 67};
@@ -75,8 +74,10 @@ static int ioat_dca_enabled = 1;
75module_param(ioat_dca_enabled, int, 0644); 74module_param(ioat_dca_enabled, int, 0644);
76MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 75MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
77 76
77#define DRV_NAME "ioatdma"
78
78static struct pci_driver ioat_pci_driver = { 79static struct pci_driver ioat_pci_driver = {
79 .name = "ioatdma", 80 .name = DRV_NAME,
80 .id_table = ioat_pci_tbl, 81 .id_table = ioat_pci_tbl,
81 .probe = ioat_probe, 82 .probe = ioat_probe,
82 .remove = __devexit_p(ioat_remove), 83 .remove = __devexit_p(ioat_remove),
@@ -85,47 +86,42 @@ static struct pci_driver ioat_pci_driver = {
85static int __devinit ioat_probe(struct pci_dev *pdev, 86static int __devinit ioat_probe(struct pci_dev *pdev,
86 const struct pci_device_id *id) 87 const struct pci_device_id *id)
87{ 88{
89 void __iomem * const *iomap;
88 void __iomem *iobase; 90 void __iomem *iobase;
91 struct device *dev = &pdev->dev;
89 struct ioat_device *device; 92 struct ioat_device *device;
90 unsigned long mmio_start, mmio_len;
91 int err; 93 int err;
92 94
93 err = pci_enable_device(pdev); 95 err = pcim_enable_device(pdev);
94 if (err) 96 if (err)
95 goto err_enable_device; 97 return err;
96 98
97 err = pci_request_regions(pdev, ioat_pci_driver.name); 99 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
98 if (err) 100 if (err)
99 goto err_request_regions; 101 return err;
102 iomap = pcim_iomap_table(pdev);
103 if (!iomap)
104 return -ENOMEM;
100 105
101 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 106 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
102 if (err) 107 if (err)
103 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 108 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
104 if (err) 109 if (err)
105 goto err_set_dma_mask; 110 return err;
106 111
107 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 112 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
108 if (err) 113 if (err)
109 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 114 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
110 if (err) 115 if (err)
111 goto err_set_dma_mask; 116 return err;
112 117
113 mmio_start = pci_resource_start(pdev, 0); 118 device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
114 mmio_len = pci_resource_len(pdev, 0); 119 if (!device)
115 iobase = ioremap(mmio_start, mmio_len); 120 return -ENOMEM;
116 if (!iobase) {
117 err = -ENOMEM;
118 goto err_ioremap;
119 }
120 121
121 device = kzalloc(sizeof(*device), GFP_KERNEL);
122 if (!device) {
123 err = -ENOMEM;
124 goto err_kzalloc;
125 }
126 device->pdev = pdev; 122 device->pdev = pdev;
127 pci_set_drvdata(pdev, device); 123 pci_set_drvdata(pdev, device);
128 device->iobase = iobase; 124 iobase = iomap[IOAT_MMIO_BAR];
129 125
130 pci_set_master(pdev); 126 pci_set_master(pdev);
131 127
@@ -146,28 +142,15 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
146 device->dca = ioat3_dca_init(pdev, iobase); 142 device->dca = ioat3_dca_init(pdev, iobase);
147 break; 143 break;
148 default: 144 default:
149 err = -ENODEV; 145 return -ENODEV;
150 break;
151 } 146 }
152 if (!device->dma)
153 err = -ENODEV;
154 147
155 if (err) 148 if (!device->dma) {
156 goto err_version; 149 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
150 return -ENODEV;
151 }
157 152
158 return 0; 153 return 0;
159
160err_version:
161 kfree(device);
162err_kzalloc:
163 iounmap(iobase);
164err_ioremap:
165err_set_dma_mask:
166 pci_release_regions(pdev);
167 pci_disable_device(pdev);
168err_request_regions:
169err_enable_device:
170 return err;
171} 154}
172 155
173static void __devexit ioat_remove(struct pci_dev *pdev) 156static void __devexit ioat_remove(struct pci_dev *pdev)
@@ -185,8 +168,6 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
185 ioat_dma_remove(device->dma); 168 ioat_dma_remove(device->dma);
186 device->dma = NULL; 169 device->dma = NULL;
187 } 170 }
188
189 kfree(device);
190} 171}
191 172
192static int __init ioat_init_module(void) 173static int __init ioat_init_module(void)