aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:29:44 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:29:44 -0400
commite6c0b69a43150c1a37cf342ce5faedf12583bf79 (patch)
tree955456982fea62d6557ad5992f19ee3e73e64bc2 /drivers/dma/ioat/dma.c
parent1f27adc2f050836c12deb4d99afe507636537a0b (diff)
ioat: convert ioat_probe to pcim/devm
The driver currently duplicates much of what these routines offer, so just use the common code. For example ->irq_mode tracks what interrupt mode was initialized, which duplicates the ->msix_enabled and ->msi_enabled handling in pcim_release. This also adds a check to the return value of dma_async_device_register, which can fail. Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r--drivers/dma/ioat/dma.c130
1 files changed, 43 insertions, 87 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 16c080786a65..65f8b7492a4d 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -121,6 +121,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
121 u32 xfercap; 121 u32 xfercap;
122 int i; 122 int i;
123 struct ioat_dma_chan *ioat_chan; 123 struct ioat_dma_chan *ioat_chan;
124 struct device *dev = &device->pdev->dev;
124 125
125 /* 126 /*
126 * IOAT ver.3 workarounds 127 * IOAT ver.3 workarounds
@@ -164,7 +165,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
164 } 165 }
165#endif 166#endif
166 for (i = 0; i < device->common.chancnt; i++) { 167 for (i = 0; i < device->common.chancnt; i++) {
167 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); 168 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
168 if (!ioat_chan) { 169 if (!ioat_chan) {
169 device->common.chancnt = i; 170 device->common.chancnt = i;
170 break; 171 break;
@@ -1450,7 +1451,11 @@ MODULE_PARM_DESC(ioat_interrupt_style,
1450static int ioat_dma_setup_interrupts(struct ioatdma_device *device) 1451static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1451{ 1452{
1452 struct ioat_dma_chan *ioat_chan; 1453 struct ioat_dma_chan *ioat_chan;
1453 int err, i, j, msixcnt; 1454 struct pci_dev *pdev = device->pdev;
1455 struct device *dev = &pdev->dev;
1456 struct msix_entry *msix;
1457 int i, j, msixcnt;
1458 int err = -EINVAL;
1454 u8 intrctrl = 0; 1459 u8 intrctrl = 0;
1455 1460
1456 if (!strcmp(ioat_interrupt_style, "msix")) 1461 if (!strcmp(ioat_interrupt_style, "msix"))
@@ -1461,8 +1466,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1461 goto msi; 1466 goto msi;
1462 if (!strcmp(ioat_interrupt_style, "intx")) 1467 if (!strcmp(ioat_interrupt_style, "intx"))
1463 goto intx; 1468 goto intx;
1464 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", 1469 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
1465 ioat_interrupt_style);
1466 goto err_no_irq; 1470 goto err_no_irq;
1467 1471
1468msix: 1472msix:
@@ -1471,55 +1475,55 @@ msix:
1471 for (i = 0; i < msixcnt; i++) 1475 for (i = 0; i < msixcnt; i++)
1472 device->msix_entries[i].entry = i; 1476 device->msix_entries[i].entry = i;
1473 1477
1474 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); 1478 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1475 if (err < 0) 1479 if (err < 0)
1476 goto msi; 1480 goto msi;
1477 if (err > 0) 1481 if (err > 0)
1478 goto msix_single_vector; 1482 goto msix_single_vector;
1479 1483
1480 for (i = 0; i < msixcnt; i++) { 1484 for (i = 0; i < msixcnt; i++) {
1485 msix = &device->msix_entries[i];
1481 ioat_chan = ioat_lookup_chan_by_index(device, i); 1486 ioat_chan = ioat_lookup_chan_by_index(device, i);
1482 err = request_irq(device->msix_entries[i].vector, 1487 err = devm_request_irq(dev, msix->vector,
1483 ioat_dma_do_interrupt_msix, 1488 ioat_dma_do_interrupt_msix, 0,
1484 0, "ioat-msix", ioat_chan); 1489 "ioat-msix", ioat_chan);
1485 if (err) { 1490 if (err) {
1486 for (j = 0; j < i; j++) { 1491 for (j = 0; j < i; j++) {
1492 msix = &device->msix_entries[j];
1487 ioat_chan = 1493 ioat_chan =
1488 ioat_lookup_chan_by_index(device, j); 1494 ioat_lookup_chan_by_index(device, j);
1489 free_irq(device->msix_entries[j].vector, 1495 devm_free_irq(dev, msix->vector, ioat_chan);
1490 ioat_chan);
1491 } 1496 }
1492 goto msix_single_vector; 1497 goto msix_single_vector;
1493 } 1498 }
1494 } 1499 }
1495 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 1500 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1496 device->irq_mode = msix_multi_vector;
1497 goto done; 1501 goto done;
1498 1502
1499msix_single_vector: 1503msix_single_vector:
1500 device->msix_entries[0].entry = 0; 1504 msix = &device->msix_entries[0];
1501 err = pci_enable_msix(device->pdev, device->msix_entries, 1); 1505 msix->entry = 0;
1506 err = pci_enable_msix(pdev, device->msix_entries, 1);
1502 if (err) 1507 if (err)
1503 goto msi; 1508 goto msi;
1504 1509
1505 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, 1510 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1506 0, "ioat-msix", device); 1511 "ioat-msix", device);
1507 if (err) { 1512 if (err) {
1508 pci_disable_msix(device->pdev); 1513 pci_disable_msix(pdev);
1509 goto msi; 1514 goto msi;
1510 } 1515 }
1511 device->irq_mode = msix_single_vector;
1512 goto done; 1516 goto done;
1513 1517
1514msi: 1518msi:
1515 err = pci_enable_msi(device->pdev); 1519 err = pci_enable_msi(pdev);
1516 if (err) 1520 if (err)
1517 goto intx; 1521 goto intx;
1518 1522
1519 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, 1523 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1520 0, "ioat-msi", device); 1524 "ioat-msi", device);
1521 if (err) { 1525 if (err) {
1522 pci_disable_msi(device->pdev); 1526 pci_disable_msi(pdev);
1523 goto intx; 1527 goto intx;
1524 } 1528 }
1525 /* 1529 /*
@@ -1527,21 +1531,17 @@ msi:
1527 */ 1531 */
1528 if (device->version == IOAT_VER_1_2) { 1532 if (device->version == IOAT_VER_1_2) {
1529 u32 dmactrl; 1533 u32 dmactrl;
1530 pci_read_config_dword(device->pdev, 1534 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1531 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1532 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; 1535 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1533 pci_write_config_dword(device->pdev, 1536 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1534 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1535 } 1537 }
1536 device->irq_mode = msi;
1537 goto done; 1538 goto done;
1538 1539
1539intx: 1540intx:
1540 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, 1541 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1541 IRQF_SHARED, "ioat-intx", device); 1542 IRQF_SHARED, "ioat-intx", device);
1542 if (err) 1543 if (err)
1543 goto err_no_irq; 1544 goto err_no_irq;
1544 device->irq_mode = intx;
1545 1545
1546done: 1546done:
1547 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; 1547 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
@@ -1551,60 +1551,26 @@ done:
1551err_no_irq: 1551err_no_irq:
1552 /* Disable all interrupt generation */ 1552 /* Disable all interrupt generation */
1553 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1553 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1554 dev_err(&device->pdev->dev, "no usable interrupts\n"); 1554 dev_err(dev, "no usable interrupts\n");
1555 device->irq_mode = none; 1555 return err;
1556 return -1;
1557} 1556}
1558 1557
1559/** 1558static void ioat_disable_interrupts(struct ioatdma_device *device)
1560 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1561 * @device: ioat device
1562 */
1563static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1564{ 1559{
1565 struct ioat_dma_chan *ioat_chan;
1566 int i;
1567
1568 /* Disable all interrupt generation */ 1560 /* Disable all interrupt generation */
1569 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1561 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1570
1571 switch (device->irq_mode) {
1572 case msix_multi_vector:
1573 for (i = 0; i < device->common.chancnt; i++) {
1574 ioat_chan = ioat_lookup_chan_by_index(device, i);
1575 free_irq(device->msix_entries[i].vector, ioat_chan);
1576 }
1577 pci_disable_msix(device->pdev);
1578 break;
1579 case msix_single_vector:
1580 free_irq(device->msix_entries[0].vector, device);
1581 pci_disable_msix(device->pdev);
1582 break;
1583 case msi:
1584 free_irq(device->pdev->irq, device);
1585 pci_disable_msi(device->pdev);
1586 break;
1587 case intx:
1588 free_irq(device->pdev->irq, device);
1589 break;
1590 case none:
1591 dev_warn(&device->pdev->dev,
1592 "call to %s without interrupts setup\n", __func__);
1593 }
1594 device->irq_mode = none;
1595} 1562}
1596 1563
1597struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 1564struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1598 void __iomem *iobase) 1565 void __iomem *iobase)
1599{ 1566{
1600 int err; 1567 int err;
1568 struct device *dev = &pdev->dev;
1601 struct ioatdma_device *device; 1569 struct ioatdma_device *device;
1602 1570
1603 device = kzalloc(sizeof(*device), GFP_KERNEL); 1571 device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
1604 if (!device) { 1572 if (!device)
1605 err = -ENOMEM; 1573 err = -ENOMEM;
1606 goto err_kzalloc;
1607 }
1608 device->pdev = pdev; 1574 device->pdev = pdev;
1609 device->reg_base = iobase; 1575 device->reg_base = iobase;
1610 device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1576 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
@@ -1651,14 +1617,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1651 break; 1617 break;
1652 } 1618 }
1653 1619
1654 dev_err(&device->pdev->dev, 1620 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1655 "Intel(R) I/OAT DMA Engine found,"
1656 " %d channels, device version 0x%02x, driver version %s\n", 1621 " %d channels, device version 0x%02x, driver version %s\n",
1657 device->common.chancnt, device->version, IOAT_DMA_VERSION); 1622 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1658 1623
1659 if (!device->common.chancnt) { 1624 if (!device->common.chancnt) {
1660 dev_err(&device->pdev->dev, 1625 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1661 "Intel(R) I/OAT DMA Engine problem found: "
1662 "zero channels detected\n"); 1626 "zero channels detected\n");
1663 goto err_setup_interrupts; 1627 goto err_setup_interrupts;
1664 } 1628 }
@@ -1671,9 +1635,11 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1671 if (err) 1635 if (err)
1672 goto err_self_test; 1636 goto err_self_test;
1673 1637
1674 ioat_set_tcp_copy_break(device); 1638 err = dma_async_device_register(&device->common);
1639 if (err)
1640 goto err_self_test;
1675 1641
1676 dma_async_device_register(&device->common); 1642 ioat_set_tcp_copy_break(device);
1677 1643
1678 if (device->version != IOAT_VER_3_0) { 1644 if (device->version != IOAT_VER_3_0) {
1679 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); 1645 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
@@ -1684,16 +1650,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1684 return device; 1650 return device;
1685 1651
1686err_self_test: 1652err_self_test:
1687 ioat_dma_remove_interrupts(device); 1653 ioat_disable_interrupts(device);
1688err_setup_interrupts: 1654err_setup_interrupts:
1689 pci_pool_destroy(device->completion_pool); 1655 pci_pool_destroy(device->completion_pool);
1690err_completion_pool: 1656err_completion_pool:
1691 pci_pool_destroy(device->dma_pool); 1657 pci_pool_destroy(device->dma_pool);
1692err_dma_pool: 1658err_dma_pool:
1693 kfree(device);
1694err_kzalloc:
1695 dev_err(&pdev->dev,
1696 "Intel(R) I/OAT DMA Engine initialization failed\n");
1697 return NULL; 1659 return NULL;
1698} 1660}
1699 1661
@@ -1705,23 +1667,17 @@ void ioat_dma_remove(struct ioatdma_device *device)
1705 if (device->version != IOAT_VER_3_0) 1667 if (device->version != IOAT_VER_3_0)
1706 cancel_delayed_work(&device->work); 1668 cancel_delayed_work(&device->work);
1707 1669
1708 ioat_dma_remove_interrupts(device); 1670 ioat_disable_interrupts(device);
1709 1671
1710 dma_async_device_unregister(&device->common); 1672 dma_async_device_unregister(&device->common);
1711 1673
1712 pci_pool_destroy(device->dma_pool); 1674 pci_pool_destroy(device->dma_pool);
1713 pci_pool_destroy(device->completion_pool); 1675 pci_pool_destroy(device->completion_pool);
1714 1676
1715 iounmap(device->reg_base);
1716 pci_release_regions(device->pdev);
1717 pci_disable_device(device->pdev);
1718
1719 list_for_each_entry_safe(chan, _chan, 1677 list_for_each_entry_safe(chan, _chan,
1720 &device->common.channels, device_node) { 1678 &device->common.channels, device_node) {
1721 ioat_chan = to_ioat_chan(chan); 1679 ioat_chan = to_ioat_chan(chan);
1722 list_del(&chan->device_node); 1680 list_del(&chan->device_node);
1723 kfree(ioat_chan);
1724 } 1681 }
1725 kfree(device);
1726} 1682}
1727 1683