diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-07-28 17:42:38 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:29:54 -0400 |
commit | f2427e276ffec5ce599c6bc116e0927269a360ef (patch) | |
tree | d23b47ad7a00daeba720c25bb900fd96bf226f54 | |
parent | b31b78f1ab7806759622b703357e39a21f757281 (diff) |
ioat: split ioat_dma_probe into core/version-specific routines
Towards the removal of ioatdma_device.version split the initialization
path into distinct versions. This conversion:
1/ moves version specific probe code to version specific routines
2/ removes the need for ioat_device
3/ turns off the ioat1 msi quirk if the device is reinitialized for intx
Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/dma/ioat/dma.c | 253 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 23 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 79 |
3 files changed, 200 insertions, 155 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 462dae627191..b7508041c6d7 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -121,52 +121,21 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
121 | int i; | 121 | int i; |
122 | struct ioat_dma_chan *ioat_chan; | 122 | struct ioat_dma_chan *ioat_chan; |
123 | struct device *dev = &device->pdev->dev; | 123 | struct device *dev = &device->pdev->dev; |
124 | struct dma_device *dma = &device->common; | ||
124 | 125 | ||
125 | /* | 126 | INIT_LIST_HEAD(&dma->channels); |
126 | * IOAT ver.3 workarounds | 127 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); |
127 | */ | ||
128 | if (device->version == IOAT_VER_3_0) { | ||
129 | u32 chan_err_mask; | ||
130 | u16 dev_id; | ||
131 | u32 dmauncerrsts; | ||
132 | |||
133 | /* | ||
134 | * Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
135 | * that can cause stability issues for IOAT ver.3 | ||
136 | */ | ||
137 | chan_err_mask = 0x3E07; | ||
138 | pci_write_config_dword(device->pdev, | ||
139 | IOAT_PCI_CHANERRMASK_INT_OFFSET, | ||
140 | chan_err_mask); | ||
141 | |||
142 | /* | ||
143 | * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
144 | * (workaround for spurious config parity error after restart) | ||
145 | */ | ||
146 | pci_read_config_word(device->pdev, | ||
147 | IOAT_PCI_DEVICE_ID_OFFSET, | ||
148 | &dev_id); | ||
149 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
150 | dmauncerrsts = 0x10; | ||
151 | pci_write_config_dword(device->pdev, | ||
152 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
153 | dmauncerrsts); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
158 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | 128 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
159 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | 129 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
160 | 130 | ||
161 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | 131 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
162 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { | 132 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) |
163 | device->common.chancnt--; | 133 | dma->chancnt--; |
164 | } | ||
165 | #endif | 134 | #endif |
166 | for (i = 0; i < device->common.chancnt; i++) { | 135 | for (i = 0; i < dma->chancnt; i++) { |
167 | ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); | 136 | ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); |
168 | if (!ioat_chan) { | 137 | if (!ioat_chan) { |
169 | device->common.chancnt = i; | 138 | dma->chancnt = i; |
170 | break; | 139 | break; |
171 | } | 140 | } |
172 | 141 | ||
@@ -175,28 +144,20 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
175 | ioat_chan->xfercap = xfercap; | 144 | ioat_chan->xfercap = xfercap; |
176 | ioat_chan->desccount = 0; | 145 | ioat_chan->desccount = 0; |
177 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); | 146 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); |
178 | if (ioat_chan->device->version == IOAT_VER_2_0) | ||
179 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | | ||
180 | IOAT_DMA_DCA_ANY_CPU, | ||
181 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
182 | else if (ioat_chan->device->version == IOAT_VER_3_0) | ||
183 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
184 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
185 | spin_lock_init(&ioat_chan->cleanup_lock); | 147 | spin_lock_init(&ioat_chan->cleanup_lock); |
186 | spin_lock_init(&ioat_chan->desc_lock); | 148 | spin_lock_init(&ioat_chan->desc_lock); |
187 | INIT_LIST_HEAD(&ioat_chan->free_desc); | 149 | INIT_LIST_HEAD(&ioat_chan->free_desc); |
188 | INIT_LIST_HEAD(&ioat_chan->used_desc); | 150 | INIT_LIST_HEAD(&ioat_chan->used_desc); |
189 | /* This should be made common somewhere in dmaengine.c */ | 151 | /* This should be made common somewhere in dmaengine.c */ |
190 | ioat_chan->common.device = &device->common; | 152 | ioat_chan->common.device = &device->common; |
191 | list_add_tail(&ioat_chan->common.device_node, | 153 | list_add_tail(&ioat_chan->common.device_node, &dma->channels); |
192 | &device->common.channels); | ||
193 | device->idx[i] = ioat_chan; | 154 | device->idx[i] = ioat_chan; |
194 | tasklet_init(&ioat_chan->cleanup_task, | 155 | tasklet_init(&ioat_chan->cleanup_task, |
195 | ioat_dma_cleanup_tasklet, | 156 | ioat_dma_cleanup_tasklet, |
196 | (unsigned long) ioat_chan); | 157 | (unsigned long) ioat_chan); |
197 | tasklet_disable(&ioat_chan->cleanup_task); | 158 | tasklet_disable(&ioat_chan->cleanup_task); |
198 | } | 159 | } |
199 | return device->common.chancnt; | 160 | return dma->chancnt; |
200 | } | 161 | } |
201 | 162 | ||
202 | /** | 163 | /** |
@@ -1504,15 +1465,6 @@ msi: | |||
1504 | pci_disable_msi(pdev); | 1465 | pci_disable_msi(pdev); |
1505 | goto intx; | 1466 | goto intx; |
1506 | } | 1467 | } |
1507 | /* | ||
1508 | * CB 1.2 devices need a bit set in configuration space to enable MSI | ||
1509 | */ | ||
1510 | if (device->version == IOAT_VER_1_2) { | ||
1511 | u32 dmactrl; | ||
1512 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1513 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1514 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1515 | } | ||
1516 | goto done; | 1468 | goto done; |
1517 | 1469 | ||
1518 | intx: | 1470 | intx: |
@@ -1522,6 +1474,8 @@ intx: | |||
1522 | goto err_no_irq; | 1474 | goto err_no_irq; |
1523 | 1475 | ||
1524 | done: | 1476 | done: |
1477 | if (device->intr_quirk) | ||
1478 | device->intr_quirk(device); | ||
1525 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | 1479 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
1526 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | 1480 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); |
1527 | return 0; | 1481 | return 0; |
@@ -1539,21 +1493,12 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) | |||
1539 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | 1493 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); |
1540 | } | 1494 | } |
1541 | 1495 | ||
1542 | struct ioatdma_device * | 1496 | static int ioat_probe(struct ioatdma_device *device) |
1543 | ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) | ||
1544 | { | 1497 | { |
1545 | int err; | 1498 | int err = -ENODEV; |
1499 | struct dma_device *dma = &device->common; | ||
1500 | struct pci_dev *pdev = device->pdev; | ||
1546 | struct device *dev = &pdev->dev; | 1501 | struct device *dev = &pdev->dev; |
1547 | struct ioatdma_device *device; | ||
1548 | struct dma_device *dma; | ||
1549 | |||
1550 | device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); | ||
1551 | if (!device) | ||
1552 | err = -ENOMEM; | ||
1553 | device->pdev = pdev; | ||
1554 | device->reg_base = iobase; | ||
1555 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | ||
1556 | dma = &device->common; | ||
1557 | 1502 | ||
1558 | /* DMA coherent memory pool for DMA descriptor allocations */ | 1503 | /* DMA coherent memory pool for DMA descriptor allocations */ |
1559 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | 1504 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, |
@@ -1572,26 +1517,13 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) | |||
1572 | goto err_completion_pool; | 1517 | goto err_completion_pool; |
1573 | } | 1518 | } |
1574 | 1519 | ||
1575 | INIT_LIST_HEAD(&dma->channels); | ||
1576 | ioat_dma_enumerate_channels(device); | 1520 | ioat_dma_enumerate_channels(device); |
1577 | 1521 | ||
1522 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
1578 | dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; | 1523 | dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; |
1579 | dma->device_free_chan_resources = ioat_dma_free_chan_resources; | 1524 | dma->device_free_chan_resources = ioat_dma_free_chan_resources; |
1580 | dma->dev = &pdev->dev; | ||
1581 | |||
1582 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
1583 | dma->device_is_tx_complete = ioat_dma_is_complete; | 1525 | dma->device_is_tx_complete = ioat_dma_is_complete; |
1584 | switch (device->version) { | 1526 | dma->dev = &pdev->dev; |
1585 | case IOAT_VER_1_2: | ||
1586 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1587 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
1588 | break; | ||
1589 | case IOAT_VER_2_0: | ||
1590 | case IOAT_VER_3_0: | ||
1591 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
1592 | dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; | ||
1593 | break; | ||
1594 | } | ||
1595 | 1527 | ||
1596 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," | 1528 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," |
1597 | " %d channels, device version 0x%02x, driver version %s\n", | 1529 | " %d channels, device version 0x%02x, driver version %s\n", |
@@ -1611,19 +1543,7 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) | |||
1611 | if (err) | 1543 | if (err) |
1612 | goto err_self_test; | 1544 | goto err_self_test; |
1613 | 1545 | ||
1614 | err = dma_async_device_register(dma); | 1546 | return 0; |
1615 | if (err) | ||
1616 | goto err_self_test; | ||
1617 | |||
1618 | ioat_set_tcp_copy_break(device); | ||
1619 | |||
1620 | if (device->version != IOAT_VER_3_0) { | ||
1621 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1622 | schedule_delayed_work(&device->work, | ||
1623 | WATCHDOG_DELAY); | ||
1624 | } | ||
1625 | |||
1626 | return device; | ||
1627 | 1547 | ||
1628 | err_self_test: | 1548 | err_self_test: |
1629 | ioat_disable_interrupts(device); | 1549 | ioat_disable_interrupts(device); |
@@ -1632,7 +1552,142 @@ err_setup_interrupts: | |||
1632 | err_completion_pool: | 1552 | err_completion_pool: |
1633 | pci_pool_destroy(device->dma_pool); | 1553 | pci_pool_destroy(device->dma_pool); |
1634 | err_dma_pool: | 1554 | err_dma_pool: |
1635 | return NULL; | 1555 | return err; |
1556 | } | ||
1557 | |||
1558 | static int ioat_register(struct ioatdma_device *device) | ||
1559 | { | ||
1560 | int err = dma_async_device_register(&device->common); | ||
1561 | |||
1562 | if (err) { | ||
1563 | ioat_disable_interrupts(device); | ||
1564 | pci_pool_destroy(device->completion_pool); | ||
1565 | pci_pool_destroy(device->dma_pool); | ||
1566 | } | ||
1567 | |||
1568 | return err; | ||
1569 | } | ||
1570 | |||
1571 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | ||
1572 | static void ioat1_intr_quirk(struct ioatdma_device *device) | ||
1573 | { | ||
1574 | struct pci_dev *pdev = device->pdev; | ||
1575 | u32 dmactrl; | ||
1576 | |||
1577 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1578 | if (pdev->msi_enabled) | ||
1579 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1580 | else | ||
1581 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | ||
1582 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1583 | } | ||
1584 | |||
1585 | int ioat1_dma_probe(struct ioatdma_device *device, int dca) | ||
1586 | { | ||
1587 | struct pci_dev *pdev = device->pdev; | ||
1588 | struct dma_device *dma; | ||
1589 | int err; | ||
1590 | |||
1591 | device->intr_quirk = ioat1_intr_quirk; | ||
1592 | dma = &device->common; | ||
1593 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1594 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
1595 | |||
1596 | err = ioat_probe(device); | ||
1597 | if (err) | ||
1598 | return err; | ||
1599 | ioat_set_tcp_copy_break(4096); | ||
1600 | err = ioat_register(device); | ||
1601 | if (err) | ||
1602 | return err; | ||
1603 | if (dca) | ||
1604 | device->dca = ioat_dca_init(pdev, device->reg_base); | ||
1605 | |||
1606 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1607 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
1608 | |||
1609 | return err; | ||
1610 | } | ||
1611 | |||
1612 | int ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||
1613 | { | ||
1614 | struct pci_dev *pdev = device->pdev; | ||
1615 | struct dma_device *dma; | ||
1616 | struct dma_chan *chan; | ||
1617 | struct ioat_dma_chan *ioat_chan; | ||
1618 | int err; | ||
1619 | |||
1620 | dma = &device->common; | ||
1621 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
1622 | dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; | ||
1623 | |||
1624 | err = ioat_probe(device); | ||
1625 | if (err) | ||
1626 | return err; | ||
1627 | ioat_set_tcp_copy_break(2048); | ||
1628 | |||
1629 | list_for_each_entry(chan, &dma->channels, device_node) { | ||
1630 | ioat_chan = to_ioat_chan(chan); | ||
1631 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | ||
1632 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
1633 | } | ||
1634 | |||
1635 | err = ioat_register(device); | ||
1636 | if (err) | ||
1637 | return err; | ||
1638 | if (dca) | ||
1639 | device->dca = ioat2_dca_init(pdev, device->reg_base); | ||
1640 | |||
1641 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1642 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
1643 | |||
1644 | return err; | ||
1645 | } | ||
1646 | |||
1647 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
1648 | { | ||
1649 | struct pci_dev *pdev = device->pdev; | ||
1650 | struct dma_device *dma; | ||
1651 | struct dma_chan *chan; | ||
1652 | struct ioat_dma_chan *ioat_chan; | ||
1653 | int err; | ||
1654 | u16 dev_id; | ||
1655 | |||
1656 | dma = &device->common; | ||
1657 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
1658 | dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; | ||
1659 | |||
1660 | /* -= IOAT ver.3 workarounds =- */ | ||
1661 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
1662 | * that can cause stability issues for IOAT ver.3 | ||
1663 | */ | ||
1664 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
1665 | |||
1666 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
1667 | * (workaround for spurious config parity error after restart) | ||
1668 | */ | ||
1669 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
1670 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
1671 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
1672 | |||
1673 | err = ioat_probe(device); | ||
1674 | if (err) | ||
1675 | return err; | ||
1676 | ioat_set_tcp_copy_break(262144); | ||
1677 | |||
1678 | list_for_each_entry(chan, &dma->channels, device_node) { | ||
1679 | ioat_chan = to_ioat_chan(chan); | ||
1680 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
1681 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
1682 | } | ||
1683 | |||
1684 | err = ioat_register(device); | ||
1685 | if (err) | ||
1686 | return err; | ||
1687 | if (dca) | ||
1688 | device->dca = ioat3_dca_init(pdev, device->reg_base); | ||
1689 | |||
1690 | return err; | ||
1636 | } | 1691 | } |
1637 | 1692 | ||
1638 | void ioat_dma_remove(struct ioatdma_device *device) | 1693 | void ioat_dma_remove(struct ioatdma_device *device) |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 6e27ddb1e98a..1226e35f2709 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -61,6 +61,8 @@ | |||
61 | * @version: version of ioatdma device | 61 | * @version: version of ioatdma device |
62 | * @msix_entries: irq handlers | 62 | * @msix_entries: irq handlers |
63 | * @idx: per channel data | 63 | * @idx: per channel data |
64 | * @dca: direct cache access context | ||
65 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | ||
64 | */ | 66 | */ |
65 | 67 | ||
66 | struct ioatdma_device { | 68 | struct ioatdma_device { |
@@ -73,6 +75,8 @@ struct ioatdma_device { | |||
73 | struct delayed_work work; | 75 | struct delayed_work work; |
74 | struct msix_entry msix_entries[4]; | 76 | struct msix_entry msix_entries[4]; |
75 | struct ioat_dma_chan *idx[4]; | 77 | struct ioat_dma_chan *idx[4]; |
78 | struct dca_provider *dca; | ||
79 | void (*intr_quirk)(struct ioatdma_device *device); | ||
76 | }; | 80 | }; |
77 | 81 | ||
78 | /** | 82 | /** |
@@ -136,25 +140,16 @@ struct ioat_desc_sw { | |||
136 | struct dma_async_tx_descriptor txd; | 140 | struct dma_async_tx_descriptor txd; |
137 | }; | 141 | }; |
138 | 142 | ||
139 | static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) | 143 | static inline void ioat_set_tcp_copy_break(unsigned long copybreak) |
140 | { | 144 | { |
141 | #ifdef CONFIG_NET_DMA | 145 | #ifdef CONFIG_NET_DMA |
142 | switch (dev->version) { | 146 | sysctl_tcp_dma_copybreak = copybreak; |
143 | case IOAT_VER_1_2: | ||
144 | sysctl_tcp_dma_copybreak = 4096; | ||
145 | break; | ||
146 | case IOAT_VER_2_0: | ||
147 | sysctl_tcp_dma_copybreak = 2048; | ||
148 | break; | ||
149 | case IOAT_VER_3_0: | ||
150 | sysctl_tcp_dma_copybreak = 262144; | ||
151 | break; | ||
152 | } | ||
153 | #endif | 147 | #endif |
154 | } | 148 | } |
155 | 149 | ||
156 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | 150 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); |
157 | void __iomem *iobase); | 151 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
152 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | ||
158 | void ioat_dma_remove(struct ioatdma_device *device); | 153 | void ioat_dma_remove(struct ioatdma_device *device); |
159 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 154 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
160 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 155 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 982e38fd177c..55414d88ac1b 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -60,14 +60,8 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
60 | { 0, } | 60 | { 0, } |
61 | }; | 61 | }; |
62 | 62 | ||
63 | struct ioat_device { | 63 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, |
64 | struct pci_dev *pdev; | 64 | const struct pci_device_id *id); |
65 | struct ioatdma_device *dma; | ||
66 | struct dca_provider *dca; | ||
67 | }; | ||
68 | |||
69 | static int __devinit ioat_probe(struct pci_dev *pdev, | ||
70 | const struct pci_device_id *id); | ||
71 | static void __devexit ioat_remove(struct pci_dev *pdev); | 65 | static void __devexit ioat_remove(struct pci_dev *pdev); |
72 | 66 | ||
73 | static int ioat_dca_enabled = 1; | 67 | static int ioat_dca_enabled = 1; |
@@ -79,17 +73,28 @@ MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)" | |||
79 | static struct pci_driver ioat_pci_driver = { | 73 | static struct pci_driver ioat_pci_driver = { |
80 | .name = DRV_NAME, | 74 | .name = DRV_NAME, |
81 | .id_table = ioat_pci_tbl, | 75 | .id_table = ioat_pci_tbl, |
82 | .probe = ioat_probe, | 76 | .probe = ioat_pci_probe, |
83 | .remove = __devexit_p(ioat_remove), | 77 | .remove = __devexit_p(ioat_remove), |
84 | }; | 78 | }; |
85 | 79 | ||
86 | static int __devinit ioat_probe(struct pci_dev *pdev, | 80 | static struct ioatdma_device * |
87 | const struct pci_device_id *id) | 81 | alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) |
82 | { | ||
83 | struct device *dev = &pdev->dev; | ||
84 | struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); | ||
85 | |||
86 | if (!d) | ||
87 | return NULL; | ||
88 | d->pdev = pdev; | ||
89 | d->reg_base = iobase; | ||
90 | return d; | ||
91 | } | ||
92 | |||
93 | static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
88 | { | 94 | { |
89 | void __iomem * const *iomap; | 95 | void __iomem * const *iomap; |
90 | void __iomem *iobase; | ||
91 | struct device *dev = &pdev->dev; | 96 | struct device *dev = &pdev->dev; |
92 | struct ioat_device *device; | 97 | struct ioatdma_device *device; |
93 | int err; | 98 | int err; |
94 | 99 | ||
95 | err = pcim_enable_device(pdev); | 100 | err = pcim_enable_device(pdev); |
@@ -119,33 +124,24 @@ static int __devinit ioat_probe(struct pci_dev *pdev, | |||
119 | if (!device) | 124 | if (!device) |
120 | return -ENOMEM; | 125 | return -ENOMEM; |
121 | 126 | ||
122 | device->pdev = pdev; | ||
123 | pci_set_drvdata(pdev, device); | ||
124 | iobase = iomap[IOAT_MMIO_BAR]; | ||
125 | |||
126 | pci_set_master(pdev); | 127 | pci_set_master(pdev); |
127 | 128 | ||
128 | switch (readb(iobase + IOAT_VER_OFFSET)) { | 129 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); |
129 | case IOAT_VER_1_2: | 130 | if (!device) |
130 | device->dma = ioat_dma_probe(pdev, iobase); | 131 | return -ENOMEM; |
131 | if (device->dma && ioat_dca_enabled) | 132 | pci_set_drvdata(pdev, device); |
132 | device->dca = ioat_dca_init(pdev, iobase); | 133 | |
133 | break; | 134 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
134 | case IOAT_VER_2_0: | 135 | if (device->version == IOAT_VER_1_2) |
135 | device->dma = ioat_dma_probe(pdev, iobase); | 136 | err = ioat1_dma_probe(device, ioat_dca_enabled); |
136 | if (device->dma && ioat_dca_enabled) | 137 | else if (device->version == IOAT_VER_2_0) |
137 | device->dca = ioat2_dca_init(pdev, iobase); | 138 | err = ioat2_dma_probe(device, ioat_dca_enabled); |
138 | break; | 139 | else if (device->version >= IOAT_VER_3_0) |
139 | case IOAT_VER_3_0: | 140 | err = ioat3_dma_probe(device, ioat_dca_enabled); |
140 | device->dma = ioat_dma_probe(pdev, iobase); | 141 | else |
141 | if (device->dma && ioat_dca_enabled) | ||
142 | device->dca = ioat3_dca_init(pdev, iobase); | ||
143 | break; | ||
144 | default: | ||
145 | return -ENODEV; | 142 | return -ENODEV; |
146 | } | ||
147 | 143 | ||
148 | if (!device->dma) { | 144 | if (err) { |
149 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); | 145 | dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); |
150 | return -ENODEV; | 146 | return -ENODEV; |
151 | } | 147 | } |
@@ -155,7 +151,10 @@ static int __devinit ioat_probe(struct pci_dev *pdev, | |||
155 | 151 | ||
156 | static void __devexit ioat_remove(struct pci_dev *pdev) | 152 | static void __devexit ioat_remove(struct pci_dev *pdev) |
157 | { | 153 | { |
158 | struct ioat_device *device = pci_get_drvdata(pdev); | 154 | struct ioatdma_device *device = pci_get_drvdata(pdev); |
155 | |||
156 | if (!device) | ||
157 | return; | ||
159 | 158 | ||
160 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | 159 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
161 | if (device->dca) { | 160 | if (device->dca) { |
@@ -163,11 +162,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev) | |||
163 | free_dca_provider(device->dca); | 162 | free_dca_provider(device->dca); |
164 | device->dca = NULL; | 163 | device->dca = NULL; |
165 | } | 164 | } |
166 | 165 | ioat_dma_remove(device); | |
167 | if (device->dma) { | ||
168 | ioat_dma_remove(device->dma); | ||
169 | device->dma = NULL; | ||
170 | } | ||
171 | } | 166 | } |
172 | 167 | ||
173 | static int __init ioat_init_module(void) | 168 | static int __init ioat_init_module(void) |