diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-07-28 17:42:38 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:29:54 -0400 |
commit | f2427e276ffec5ce599c6bc116e0927269a360ef (patch) | |
tree | d23b47ad7a00daeba720c25bb900fd96bf226f54 /drivers/dma/ioat/dma.c | |
parent | b31b78f1ab7806759622b703357e39a21f757281 (diff) |
ioat: split ioat_dma_probe into core/version-specific routines
Towards the removal of ioatdma_device.version split the initialization
path into distinct versions. This conversion:
1/ moves version specific probe code to version specific routines
2/ removes the need for ioat_device
3/ turns off the ioat1 msi quirk if the device is reinitialized for intx
Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r-- | drivers/dma/ioat/dma.c | 253 |
1 files changed, 154 insertions, 99 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 462dae627191..b7508041c6d7 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -121,52 +121,21 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
121 | int i; | 121 | int i; |
122 | struct ioat_dma_chan *ioat_chan; | 122 | struct ioat_dma_chan *ioat_chan; |
123 | struct device *dev = &device->pdev->dev; | 123 | struct device *dev = &device->pdev->dev; |
124 | struct dma_device *dma = &device->common; | ||
124 | 125 | ||
125 | /* | 126 | INIT_LIST_HEAD(&dma->channels); |
126 | * IOAT ver.3 workarounds | 127 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); |
127 | */ | ||
128 | if (device->version == IOAT_VER_3_0) { | ||
129 | u32 chan_err_mask; | ||
130 | u16 dev_id; | ||
131 | u32 dmauncerrsts; | ||
132 | |||
133 | /* | ||
134 | * Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
135 | * that can cause stability issues for IOAT ver.3 | ||
136 | */ | ||
137 | chan_err_mask = 0x3E07; | ||
138 | pci_write_config_dword(device->pdev, | ||
139 | IOAT_PCI_CHANERRMASK_INT_OFFSET, | ||
140 | chan_err_mask); | ||
141 | |||
142 | /* | ||
143 | * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
144 | * (workaround for spurious config parity error after restart) | ||
145 | */ | ||
146 | pci_read_config_word(device->pdev, | ||
147 | IOAT_PCI_DEVICE_ID_OFFSET, | ||
148 | &dev_id); | ||
149 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
150 | dmauncerrsts = 0x10; | ||
151 | pci_write_config_dword(device->pdev, | ||
152 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
153 | dmauncerrsts); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
158 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | 128 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
159 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | 129 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
160 | 130 | ||
161 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | 131 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
162 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { | 132 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) |
163 | device->common.chancnt--; | 133 | dma->chancnt--; |
164 | } | ||
165 | #endif | 134 | #endif |
166 | for (i = 0; i < device->common.chancnt; i++) { | 135 | for (i = 0; i < dma->chancnt; i++) { |
167 | ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); | 136 | ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); |
168 | if (!ioat_chan) { | 137 | if (!ioat_chan) { |
169 | device->common.chancnt = i; | 138 | dma->chancnt = i; |
170 | break; | 139 | break; |
171 | } | 140 | } |
172 | 141 | ||
@@ -175,28 +144,20 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
175 | ioat_chan->xfercap = xfercap; | 144 | ioat_chan->xfercap = xfercap; |
176 | ioat_chan->desccount = 0; | 145 | ioat_chan->desccount = 0; |
177 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); | 146 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); |
178 | if (ioat_chan->device->version == IOAT_VER_2_0) | ||
179 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | | ||
180 | IOAT_DMA_DCA_ANY_CPU, | ||
181 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
182 | else if (ioat_chan->device->version == IOAT_VER_3_0) | ||
183 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
184 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
185 | spin_lock_init(&ioat_chan->cleanup_lock); | 147 | spin_lock_init(&ioat_chan->cleanup_lock); |
186 | spin_lock_init(&ioat_chan->desc_lock); | 148 | spin_lock_init(&ioat_chan->desc_lock); |
187 | INIT_LIST_HEAD(&ioat_chan->free_desc); | 149 | INIT_LIST_HEAD(&ioat_chan->free_desc); |
188 | INIT_LIST_HEAD(&ioat_chan->used_desc); | 150 | INIT_LIST_HEAD(&ioat_chan->used_desc); |
189 | /* This should be made common somewhere in dmaengine.c */ | 151 | /* This should be made common somewhere in dmaengine.c */ |
190 | ioat_chan->common.device = &device->common; | 152 | ioat_chan->common.device = &device->common; |
191 | list_add_tail(&ioat_chan->common.device_node, | 153 | list_add_tail(&ioat_chan->common.device_node, &dma->channels); |
192 | &device->common.channels); | ||
193 | device->idx[i] = ioat_chan; | 154 | device->idx[i] = ioat_chan; |
194 | tasklet_init(&ioat_chan->cleanup_task, | 155 | tasklet_init(&ioat_chan->cleanup_task, |
195 | ioat_dma_cleanup_tasklet, | 156 | ioat_dma_cleanup_tasklet, |
196 | (unsigned long) ioat_chan); | 157 | (unsigned long) ioat_chan); |
197 | tasklet_disable(&ioat_chan->cleanup_task); | 158 | tasklet_disable(&ioat_chan->cleanup_task); |
198 | } | 159 | } |
199 | return device->common.chancnt; | 160 | return dma->chancnt; |
200 | } | 161 | } |
201 | 162 | ||
202 | /** | 163 | /** |
@@ -1504,15 +1465,6 @@ msi: | |||
1504 | pci_disable_msi(pdev); | 1465 | pci_disable_msi(pdev); |
1505 | goto intx; | 1466 | goto intx; |
1506 | } | 1467 | } |
1507 | /* | ||
1508 | * CB 1.2 devices need a bit set in configuration space to enable MSI | ||
1509 | */ | ||
1510 | if (device->version == IOAT_VER_1_2) { | ||
1511 | u32 dmactrl; | ||
1512 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1513 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1514 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1515 | } | ||
1516 | goto done; | 1468 | goto done; |
1517 | 1469 | ||
1518 | intx: | 1470 | intx: |
@@ -1522,6 +1474,8 @@ intx: | |||
1522 | goto err_no_irq; | 1474 | goto err_no_irq; |
1523 | 1475 | ||
1524 | done: | 1476 | done: |
1477 | if (device->intr_quirk) | ||
1478 | device->intr_quirk(device); | ||
1525 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | 1479 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
1526 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | 1480 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); |
1527 | return 0; | 1481 | return 0; |
@@ -1539,21 +1493,12 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) | |||
1539 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | 1493 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); |
1540 | } | 1494 | } |
1541 | 1495 | ||
1542 | struct ioatdma_device * | 1496 | static int ioat_probe(struct ioatdma_device *device) |
1543 | ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) | ||
1544 | { | 1497 | { |
1545 | int err; | 1498 | int err = -ENODEV; |
1499 | struct dma_device *dma = &device->common; | ||
1500 | struct pci_dev *pdev = device->pdev; | ||
1546 | struct device *dev = &pdev->dev; | 1501 | struct device *dev = &pdev->dev; |
1547 | struct ioatdma_device *device; | ||
1548 | struct dma_device *dma; | ||
1549 | |||
1550 | device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); | ||
1551 | if (!device) | ||
1552 | err = -ENOMEM; | ||
1553 | device->pdev = pdev; | ||
1554 | device->reg_base = iobase; | ||
1555 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | ||
1556 | dma = &device->common; | ||
1557 | 1502 | ||
1558 | /* DMA coherent memory pool for DMA descriptor allocations */ | 1503 | /* DMA coherent memory pool for DMA descriptor allocations */ |
1559 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | 1504 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, |
@@ -1572,26 +1517,13 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) | |||
1572 | goto err_completion_pool; | 1517 | goto err_completion_pool; |
1573 | } | 1518 | } |
1574 | 1519 | ||
1575 | INIT_LIST_HEAD(&dma->channels); | ||
1576 | ioat_dma_enumerate_channels(device); | 1520 | ioat_dma_enumerate_channels(device); |
1577 | 1521 | ||
1522 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
1578 | dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; | 1523 | dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; |
1579 | dma->device_free_chan_resources = ioat_dma_free_chan_resources; | 1524 | dma->device_free_chan_resources = ioat_dma_free_chan_resources; |
1580 | dma->dev = &pdev->dev; | ||
1581 | |||
1582 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
1583 | dma->device_is_tx_complete = ioat_dma_is_complete; | 1525 | dma->device_is_tx_complete = ioat_dma_is_complete; |
1584 | switch (device->version) { | 1526 | dma->dev = &pdev->dev; |
1585 | case IOAT_VER_1_2: | ||
1586 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1587 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
1588 | break; | ||
1589 | case IOAT_VER_2_0: | ||
1590 | case IOAT_VER_3_0: | ||
1591 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
1592 | dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; | ||
1593 | break; | ||
1594 | } | ||
1595 | 1527 | ||
1596 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," | 1528 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," |
1597 | " %d channels, device version 0x%02x, driver version %s\n", | 1529 | " %d channels, device version 0x%02x, driver version %s\n", |
@@ -1611,19 +1543,7 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) | |||
1611 | if (err) | 1543 | if (err) |
1612 | goto err_self_test; | 1544 | goto err_self_test; |
1613 | 1545 | ||
1614 | err = dma_async_device_register(dma); | 1546 | return 0; |
1615 | if (err) | ||
1616 | goto err_self_test; | ||
1617 | |||
1618 | ioat_set_tcp_copy_break(device); | ||
1619 | |||
1620 | if (device->version != IOAT_VER_3_0) { | ||
1621 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1622 | schedule_delayed_work(&device->work, | ||
1623 | WATCHDOG_DELAY); | ||
1624 | } | ||
1625 | |||
1626 | return device; | ||
1627 | 1547 | ||
1628 | err_self_test: | 1548 | err_self_test: |
1629 | ioat_disable_interrupts(device); | 1549 | ioat_disable_interrupts(device); |
@@ -1632,7 +1552,142 @@ err_setup_interrupts: | |||
1632 | err_completion_pool: | 1552 | err_completion_pool: |
1633 | pci_pool_destroy(device->dma_pool); | 1553 | pci_pool_destroy(device->dma_pool); |
1634 | err_dma_pool: | 1554 | err_dma_pool: |
1635 | return NULL; | 1555 | return err; |
1556 | } | ||
1557 | |||
1558 | static int ioat_register(struct ioatdma_device *device) | ||
1559 | { | ||
1560 | int err = dma_async_device_register(&device->common); | ||
1561 | |||
1562 | if (err) { | ||
1563 | ioat_disable_interrupts(device); | ||
1564 | pci_pool_destroy(device->completion_pool); | ||
1565 | pci_pool_destroy(device->dma_pool); | ||
1566 | } | ||
1567 | |||
1568 | return err; | ||
1569 | } | ||
1570 | |||
1571 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | ||
1572 | static void ioat1_intr_quirk(struct ioatdma_device *device) | ||
1573 | { | ||
1574 | struct pci_dev *pdev = device->pdev; | ||
1575 | u32 dmactrl; | ||
1576 | |||
1577 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1578 | if (pdev->msi_enabled) | ||
1579 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1580 | else | ||
1581 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | ||
1582 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1583 | } | ||
1584 | |||
1585 | int ioat1_dma_probe(struct ioatdma_device *device, int dca) | ||
1586 | { | ||
1587 | struct pci_dev *pdev = device->pdev; | ||
1588 | struct dma_device *dma; | ||
1589 | int err; | ||
1590 | |||
1591 | device->intr_quirk = ioat1_intr_quirk; | ||
1592 | dma = &device->common; | ||
1593 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1594 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
1595 | |||
1596 | err = ioat_probe(device); | ||
1597 | if (err) | ||
1598 | return err; | ||
1599 | ioat_set_tcp_copy_break(4096); | ||
1600 | err = ioat_register(device); | ||
1601 | if (err) | ||
1602 | return err; | ||
1603 | if (dca) | ||
1604 | device->dca = ioat_dca_init(pdev, device->reg_base); | ||
1605 | |||
1606 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1607 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
1608 | |||
1609 | return err; | ||
1610 | } | ||
1611 | |||
1612 | int ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||
1613 | { | ||
1614 | struct pci_dev *pdev = device->pdev; | ||
1615 | struct dma_device *dma; | ||
1616 | struct dma_chan *chan; | ||
1617 | struct ioat_dma_chan *ioat_chan; | ||
1618 | int err; | ||
1619 | |||
1620 | dma = &device->common; | ||
1621 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
1622 | dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; | ||
1623 | |||
1624 | err = ioat_probe(device); | ||
1625 | if (err) | ||
1626 | return err; | ||
1627 | ioat_set_tcp_copy_break(2048); | ||
1628 | |||
1629 | list_for_each_entry(chan, &dma->channels, device_node) { | ||
1630 | ioat_chan = to_ioat_chan(chan); | ||
1631 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | ||
1632 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
1633 | } | ||
1634 | |||
1635 | err = ioat_register(device); | ||
1636 | if (err) | ||
1637 | return err; | ||
1638 | if (dca) | ||
1639 | device->dca = ioat2_dca_init(pdev, device->reg_base); | ||
1640 | |||
1641 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1642 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
1643 | |||
1644 | return err; | ||
1645 | } | ||
1646 | |||
1647 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
1648 | { | ||
1649 | struct pci_dev *pdev = device->pdev; | ||
1650 | struct dma_device *dma; | ||
1651 | struct dma_chan *chan; | ||
1652 | struct ioat_dma_chan *ioat_chan; | ||
1653 | int err; | ||
1654 | u16 dev_id; | ||
1655 | |||
1656 | dma = &device->common; | ||
1657 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | ||
1658 | dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; | ||
1659 | |||
1660 | /* -= IOAT ver.3 workarounds =- */ | ||
1661 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
1662 | * that can cause stability issues for IOAT ver.3 | ||
1663 | */ | ||
1664 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
1665 | |||
1666 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
1667 | * (workaround for spurious config parity error after restart) | ||
1668 | */ | ||
1669 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
1670 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
1671 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
1672 | |||
1673 | err = ioat_probe(device); | ||
1674 | if (err) | ||
1675 | return err; | ||
1676 | ioat_set_tcp_copy_break(262144); | ||
1677 | |||
1678 | list_for_each_entry(chan, &dma->channels, device_node) { | ||
1679 | ioat_chan = to_ioat_chan(chan); | ||
1680 | writel(IOAT_DMA_DCA_ANY_CPU, | ||
1681 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
1682 | } | ||
1683 | |||
1684 | err = ioat_register(device); | ||
1685 | if (err) | ||
1686 | return err; | ||
1687 | if (dca) | ||
1688 | device->dca = ioat3_dca_init(pdev, device->reg_base); | ||
1689 | |||
1690 | return err; | ||
1636 | } | 1691 | } |
1637 | 1692 | ||
1638 | void ioat_dma_remove(struct ioatdma_device *device) | 1693 | void ioat_dma_remove(struct ioatdma_device *device) |