diff options
-rw-r--r-- | arch/x86/include/asm/amd_iommu_types.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 44 |
2 files changed, 28 insertions, 20 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 2833862311b5..9fc045ee2fcb 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -316,6 +316,10 @@ struct iommu_dev_data { | |||
316 | struct protection_domain *domain; /* Domain the device is bound to */ | 316 | struct protection_domain *domain; /* Domain the device is bound to */ |
317 | atomic_t bind; /* Domain attach reverent count */ | 317 | atomic_t bind; /* Domain attach reverent count */ |
318 | u16 devid; /* PCI Device ID */ | 318 | u16 devid; /* PCI Device ID */ |
319 | struct { | ||
320 | bool enabled; | ||
321 | int qdep; | ||
322 | } ats; /* ATS state */ | ||
319 | }; | 323 | }; |
320 | 324 | ||
321 | /* | 325 | /* |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 4e8b176297b5..9e07ef65a016 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -695,17 +695,16 @@ void iommu_flush_all_caches(struct amd_iommu *iommu) | |||
695 | */ | 695 | */ |
696 | static int device_flush_iotlb(struct device *dev, u64 address, size_t size) | 696 | static int device_flush_iotlb(struct device *dev, u64 address, size_t size) |
697 | { | 697 | { |
698 | struct pci_dev *pdev = to_pci_dev(dev); | 698 | struct iommu_dev_data *dev_data; |
699 | struct amd_iommu *iommu; | 699 | struct amd_iommu *iommu; |
700 | struct iommu_cmd cmd; | 700 | struct iommu_cmd cmd; |
701 | u16 devid; | ||
702 | int qdep; | 701 | int qdep; |
703 | 702 | ||
704 | qdep = pci_ats_queue_depth(pdev); | 703 | dev_data = get_dev_data(dev); |
705 | devid = get_device_id(dev); | 704 | qdep = dev_data->ats.qdep; |
706 | iommu = amd_iommu_rlookup_table[devid]; | 705 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
707 | 706 | ||
708 | build_inv_iotlb_pages(&cmd, devid, qdep, address, size); | 707 | build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); |
709 | 708 | ||
710 | return iommu_queue_command(iommu, &cmd); | 709 | return iommu_queue_command(iommu, &cmd); |
711 | } | 710 | } |
@@ -728,7 +727,7 @@ static int device_flush_dte(struct device *dev) | |||
728 | if (ret) | 727 | if (ret) |
729 | return ret; | 728 | return ret; |
730 | 729 | ||
731 | if (pci_ats_enabled(pdev)) | 730 | if (dev_data->ats.enabled) |
732 | ret = device_flush_iotlb(dev, 0, ~0UL); | 731 | ret = device_flush_iotlb(dev, 0, ~0UL); |
733 | 732 | ||
734 | return ret; | 733 | return ret; |
@@ -760,9 +759,8 @@ static void __domain_flush_pages(struct protection_domain *domain, | |||
760 | } | 759 | } |
761 | 760 | ||
762 | list_for_each_entry(dev_data, &domain->dev_list, list) { | 761 | list_for_each_entry(dev_data, &domain->dev_list, list) { |
763 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | ||
764 | 762 | ||
765 | if (!pci_ats_enabled(pdev)) | 763 | if (!dev_data->ats.enabled) |
766 | continue; | 764 | continue; |
767 | 765 | ||
768 | ret |= device_flush_iotlb(dev_data->dev, address, size); | 766 | ret |= device_flush_iotlb(dev_data->dev, address, size); |
@@ -1576,8 +1574,7 @@ static void do_attach(struct device *dev, struct protection_domain *domain) | |||
1576 | iommu = amd_iommu_rlookup_table[dev_data->devid]; | 1574 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
1577 | pdev = to_pci_dev(dev); | 1575 | pdev = to_pci_dev(dev); |
1578 | 1576 | ||
1579 | if (amd_iommu_iotlb_sup) | 1577 | ats = dev_data->ats.enabled; |
1580 | ats = pci_ats_enabled(pdev); | ||
1581 | 1578 | ||
1582 | /* Update data structures */ | 1579 | /* Update data structures */ |
1583 | dev_data->domain = domain; | 1580 | dev_data->domain = domain; |
@@ -1674,11 +1671,16 @@ static int attach_device(struct device *dev, | |||
1674 | struct protection_domain *domain) | 1671 | struct protection_domain *domain) |
1675 | { | 1672 | { |
1676 | struct pci_dev *pdev = to_pci_dev(dev); | 1673 | struct pci_dev *pdev = to_pci_dev(dev); |
1674 | struct iommu_dev_data *dev_data; | ||
1677 | unsigned long flags; | 1675 | unsigned long flags; |
1678 | int ret; | 1676 | int ret; |
1679 | 1677 | ||
1680 | if (amd_iommu_iotlb_sup) | 1678 | dev_data = get_dev_data(dev); |
1681 | pci_enable_ats(pdev, PAGE_SHIFT); | 1679 | |
1680 | if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { | ||
1681 | dev_data->ats.enabled = true; | ||
1682 | dev_data->ats.qdep = pci_ats_queue_depth(pdev); | ||
1683 | } | ||
1682 | 1684 | ||
1683 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1685 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1684 | ret = __attach_device(dev, domain); | 1686 | ret = __attach_device(dev, domain); |
@@ -1736,7 +1738,7 @@ static void __detach_device(struct device *dev) | |||
1736 | */ | 1738 | */ |
1737 | static void detach_device(struct device *dev) | 1739 | static void detach_device(struct device *dev) |
1738 | { | 1740 | { |
1739 | struct pci_dev *pdev = to_pci_dev(dev); | 1741 | struct iommu_dev_data *dev_data; |
1740 | unsigned long flags; | 1742 | unsigned long flags; |
1741 | 1743 | ||
1742 | /* lock device table */ | 1744 | /* lock device table */ |
@@ -1744,8 +1746,12 @@ static void detach_device(struct device *dev) | |||
1744 | __detach_device(dev); | 1746 | __detach_device(dev); |
1745 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1747 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1746 | 1748 | ||
1747 | if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) | 1749 | dev_data = get_dev_data(dev); |
1748 | pci_disable_ats(pdev); | 1750 | |
1751 | if (dev_data->ats.enabled) { | ||
1752 | pci_disable_ats(to_pci_dev(dev)); | ||
1753 | dev_data->ats.enabled = false; | ||
1754 | } | ||
1749 | } | 1755 | } |
1750 | 1756 | ||
1751 | /* | 1757 | /* |
@@ -1890,10 +1896,8 @@ static void update_device_table(struct protection_domain *domain) | |||
1890 | { | 1896 | { |
1891 | struct iommu_dev_data *dev_data; | 1897 | struct iommu_dev_data *dev_data; |
1892 | 1898 | ||
1893 | list_for_each_entry(dev_data, &domain->dev_list, list) { | 1899 | list_for_each_entry(dev_data, &domain->dev_list, list) |
1894 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | 1900 | set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); |
1895 | set_dte_entry(dev_data->devid, domain, pci_ats_enabled(pdev)); | ||
1896 | } | ||
1897 | } | 1901 | } |
1898 | 1902 | ||
1899 | static void update_domain(struct protection_domain *domain) | 1903 | static void update_domain(struct protection_domain *domain) |