aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-05 09:31:08 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-11 03:04:04 -0400
commitfd7b5535e10ce820f030842da3f289f80ec0d4f3 (patch)
tree3772c4406efa13dde0b8cd50f62ff896b6159c90 /arch/x86/kernel/amd_iommu.c
parent60f723b4117507c05c8b0b5c8b98ecc12a76878e (diff)
x86/amd-iommu: Add ATS enable/disable code
This patch adds the necessary code to the AMD IOMMU driver for enabling and disabling the ATS capability on a device and to setup the IOMMU data structures correctly. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f3ce4338dade..e4791f66aa38 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1452,17 +1452,22 @@ static bool dma_ops_domain(struct protection_domain *domain)
1452 return domain->flags & PD_DMA_OPS_MASK; 1452 return domain->flags & PD_DMA_OPS_MASK;
1453} 1453}
1454 1454
1455static void set_dte_entry(u16 devid, struct protection_domain *domain) 1455static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1456{ 1456{
1457 u64 pte_root = virt_to_phys(domain->pt_root); 1457 u64 pte_root = virt_to_phys(domain->pt_root);
1458 u32 flags = 0;
1458 1459
1459 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) 1460 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1460 << DEV_ENTRY_MODE_SHIFT; 1461 << DEV_ENTRY_MODE_SHIFT;
1461 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; 1462 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1462 1463
1463 amd_iommu_dev_table[devid].data[2] = domain->id; 1464 if (ats)
1464 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); 1465 flags |= DTE_FLAG_IOTLB;
1465 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1466
1467 amd_iommu_dev_table[devid].data[3] |= flags;
1468 amd_iommu_dev_table[devid].data[2] = domain->id;
1469 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1470 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1466} 1471}
1467 1472
1468static void clear_dte_entry(u16 devid) 1473static void clear_dte_entry(u16 devid)
@@ -1479,16 +1484,22 @@ static void do_attach(struct device *dev, struct protection_domain *domain)
1479{ 1484{
1480 struct iommu_dev_data *dev_data; 1485 struct iommu_dev_data *dev_data;
1481 struct amd_iommu *iommu; 1486 struct amd_iommu *iommu;
1487 struct pci_dev *pdev;
1488 bool ats = false;
1482 u16 devid; 1489 u16 devid;
1483 1490
1484 devid = get_device_id(dev); 1491 devid = get_device_id(dev);
1485 iommu = amd_iommu_rlookup_table[devid]; 1492 iommu = amd_iommu_rlookup_table[devid];
1486 dev_data = get_dev_data(dev); 1493 dev_data = get_dev_data(dev);
1494 pdev = to_pci_dev(dev);
1495
1496 if (amd_iommu_iotlb_sup)
1497 ats = pci_ats_enabled(pdev);
1487 1498
1488 /* Update data structures */ 1499 /* Update data structures */
1489 dev_data->domain = domain; 1500 dev_data->domain = domain;
1490 list_add(&dev_data->list, &domain->dev_list); 1501 list_add(&dev_data->list, &domain->dev_list);
1491 set_dte_entry(devid, domain); 1502 set_dte_entry(devid, domain, ats);
1492 1503
1493 /* Do reference counting */ 1504 /* Do reference counting */
1494 domain->dev_iommu[iommu->index] += 1; 1505 domain->dev_iommu[iommu->index] += 1;
@@ -1502,11 +1513,13 @@ static void do_detach(struct device *dev)
1502{ 1513{
1503 struct iommu_dev_data *dev_data; 1514 struct iommu_dev_data *dev_data;
1504 struct amd_iommu *iommu; 1515 struct amd_iommu *iommu;
1516 struct pci_dev *pdev;
1505 u16 devid; 1517 u16 devid;
1506 1518
1507 devid = get_device_id(dev); 1519 devid = get_device_id(dev);
1508 iommu = amd_iommu_rlookup_table[devid]; 1520 iommu = amd_iommu_rlookup_table[devid];
1509 dev_data = get_dev_data(dev); 1521 dev_data = get_dev_data(dev);
1522 pdev = to_pci_dev(dev);
1510 1523
1511 /* decrease reference counters */ 1524 /* decrease reference counters */
1512 dev_data->domain->dev_iommu[iommu->index] -= 1; 1525 dev_data->domain->dev_iommu[iommu->index] -= 1;
@@ -1581,9 +1594,13 @@ out_unlock:
1581static int attach_device(struct device *dev, 1594static int attach_device(struct device *dev,
1582 struct protection_domain *domain) 1595 struct protection_domain *domain)
1583{ 1596{
1597 struct pci_dev *pdev = to_pci_dev(dev);
1584 unsigned long flags; 1598 unsigned long flags;
1585 int ret; 1599 int ret;
1586 1600
1601 if (amd_iommu_iotlb_sup)
1602 pci_enable_ats(pdev, PAGE_SHIFT);
1603
1587 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1604 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1588 ret = __attach_device(dev, domain); 1605 ret = __attach_device(dev, domain);
1589 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1606 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -1640,12 +1657,16 @@ static void __detach_device(struct device *dev)
1640 */ 1657 */
1641static void detach_device(struct device *dev) 1658static void detach_device(struct device *dev)
1642{ 1659{
1660 struct pci_dev *pdev = to_pci_dev(dev);
1643 unsigned long flags; 1661 unsigned long flags;
1644 1662
1645 /* lock device table */ 1663 /* lock device table */
1646 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1664 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1647 __detach_device(dev); 1665 __detach_device(dev);
1648 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1666 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1667
1668 if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev))
1669 pci_disable_ats(pdev);
1649} 1670}
1650 1671
1651/* 1672/*
@@ -1795,8 +1816,9 @@ static void update_device_table(struct protection_domain *domain)
1795 struct iommu_dev_data *dev_data; 1816 struct iommu_dev_data *dev_data;
1796 1817
1797 list_for_each_entry(dev_data, &domain->dev_list, list) { 1818 list_for_each_entry(dev_data, &domain->dev_list, list) {
1819 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
1798 u16 devid = get_device_id(dev_data->dev); 1820 u16 devid = get_device_id(dev_data->dev);
1799 set_dte_entry(devid, domain); 1821 set_dte_entry(devid, domain, pci_ats_enabled(pdev));
1800 } 1822 }
1801} 1823}
1802 1824