aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-11-17 11:24:28 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2011-12-12 09:18:57 -0500
commit52815b75682e25db45545911fd2b09ef5856e695 (patch)
treea54d7812e6d68d0663e6929f29dadf0ce5f74c59 /drivers/iommu/amd_iommu.c
parent132bd68f180dd5de9176e20532910503f6393f14 (diff)
iommu/amd: Add support for IOMMUv2 domain mode
This patch adds support for protection domains that implement two-level paging for devices. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c144
1 files changed, 140 insertions, 4 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6ed536769102..7dda0d4a8f8c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -63,6 +63,7 @@ static struct protection_domain *pt_domain;
63static struct iommu_ops amd_iommu_ops; 63static struct iommu_ops amd_iommu_ops;
64 64
65static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 65static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
66int amd_iommu_max_glx_val = -1;
66 67
67/* 68/*
68 * general struct to manage commands send to an IOMMU 69 * general struct to manage commands send to an IOMMU
@@ -1598,6 +1599,11 @@ static void free_pagetable(struct protection_domain *domain)
1598 domain->pt_root = NULL; 1599 domain->pt_root = NULL;
1599} 1600}
1600 1601
1602static void free_gcr3_table(struct protection_domain *domain)
1603{
1604 free_page((unsigned long)domain->gcr3_tbl);
1605}
1606
1601/* 1607/*
1602 * Free a domain, only used if something went wrong in the 1608 * Free a domain, only used if something went wrong in the
1603 * allocation path and we need to free an already allocated page table 1609 * allocation path and we need to free an already allocated page table
@@ -1699,6 +1705,32 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1699 if (ats) 1705 if (ats)
1700 flags |= DTE_FLAG_IOTLB; 1706 flags |= DTE_FLAG_IOTLB;
1701 1707
1708 if (domain->flags & PD_IOMMUV2_MASK) {
1709 u64 gcr3 = __pa(domain->gcr3_tbl);
1710 u64 glx = domain->glx;
1711 u64 tmp;
1712
1713 pte_root |= DTE_FLAG_GV;
1714 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1715
1716 /* First mask out possible old values for GCR3 table */
1717 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1718 flags &= ~tmp;
1719
1720 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1721 flags &= ~tmp;
1722
1723 /* Encode GCR3 table into DTE */
1724 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1725 pte_root |= tmp;
1726
1727 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1728 flags |= tmp;
1729
1730 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1731 flags |= tmp;
1732 }
1733
1702 flags &= ~(0xffffUL); 1734 flags &= ~(0xffffUL);
1703 flags |= domain->id; 1735 flags |= domain->id;
1704 1736
@@ -1803,6 +1835,46 @@ out_unlock:
1803 return ret; 1835 return ret;
1804} 1836}
1805 1837
1838
1839static void pdev_iommuv2_disable(struct pci_dev *pdev)
1840{
1841 pci_disable_ats(pdev);
1842 pci_disable_pri(pdev);
1843 pci_disable_pasid(pdev);
1844}
1845
1846static int pdev_iommuv2_enable(struct pci_dev *pdev)
1847{
1848 int ret;
1849
1850 /* Only allow access to user-accessible pages */
1851 ret = pci_enable_pasid(pdev, 0);
1852 if (ret)
1853 goto out_err;
1854
1855 /* First reset the PRI state of the device */
1856 ret = pci_reset_pri(pdev);
1857 if (ret)
1858 goto out_err;
1859
1860 /* FIXME: Hardcode number of outstanding requests for now */
1861 ret = pci_enable_pri(pdev, 32);
1862 if (ret)
1863 goto out_err;
1864
1865 ret = pci_enable_ats(pdev, PAGE_SHIFT);
1866 if (ret)
1867 goto out_err;
1868
1869 return 0;
1870
1871out_err:
1872 pci_disable_pri(pdev);
1873 pci_disable_pasid(pdev);
1874
1875 return ret;
1876}
1877
1806/* 1878/*
1807 * If a device is not yet associated with a domain, this function does 1879 * If a device is not yet associated with a domain, this function does
1808 * assigns it visible for the hardware 1880 * assigns it visible for the hardware
@@ -1817,7 +1889,17 @@ static int attach_device(struct device *dev,
1817 1889
1818 dev_data = get_dev_data(dev); 1890 dev_data = get_dev_data(dev);
1819 1891
1820 if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 1892 if (domain->flags & PD_IOMMUV2_MASK) {
1893 if (!dev_data->iommu_v2 || !dev_data->passthrough)
1894 return -EINVAL;
1895
1896 if (pdev_iommuv2_enable(pdev) != 0)
1897 return -EINVAL;
1898
1899 dev_data->ats.enabled = true;
1900 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1901 } else if (amd_iommu_iotlb_sup &&
1902 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
1821 dev_data->ats.enabled = true; 1903 dev_data->ats.enabled = true;
1822 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 1904 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1823 } 1905 }
@@ -1877,20 +1959,24 @@ static void __detach_device(struct iommu_dev_data *dev_data)
1877 */ 1959 */
1878static void detach_device(struct device *dev) 1960static void detach_device(struct device *dev)
1879{ 1961{
1962 struct protection_domain *domain;
1880 struct iommu_dev_data *dev_data; 1963 struct iommu_dev_data *dev_data;
1881 unsigned long flags; 1964 unsigned long flags;
1882 1965
1883 dev_data = get_dev_data(dev); 1966 dev_data = get_dev_data(dev);
1967 domain = dev_data->domain;
1884 1968
1885 /* lock device table */ 1969 /* lock device table */
1886 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1970 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1887 __detach_device(dev_data); 1971 __detach_device(dev_data);
1888 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1972 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1889 1973
1890 if (dev_data->ats.enabled) { 1974 if (domain->flags & PD_IOMMUV2_MASK)
1975 pdev_iommuv2_disable(to_pci_dev(dev));
1976 else if (dev_data->ats.enabled)
1891 pci_disable_ats(to_pci_dev(dev)); 1977 pci_disable_ats(to_pci_dev(dev));
1892 dev_data->ats.enabled = false; 1978
1893 } 1979 dev_data->ats.enabled = false;
1894} 1980}
1895 1981
1896/* 1982/*
@@ -2788,6 +2874,9 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2788 if (domain->mode != PAGE_MODE_NONE) 2874 if (domain->mode != PAGE_MODE_NONE)
2789 free_pagetable(domain); 2875 free_pagetable(domain);
2790 2876
2877 if (domain->flags & PD_IOMMUV2_MASK)
2878 free_gcr3_table(domain);
2879
2791 protection_domain_free(domain); 2880 protection_domain_free(domain);
2792 2881
2793 dom->priv = NULL; 2882 dom->priv = NULL;
@@ -3010,3 +3099,50 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3010 spin_unlock_irqrestore(&domain->lock, flags); 3099 spin_unlock_irqrestore(&domain->lock, flags);
3011} 3100}
3012EXPORT_SYMBOL(amd_iommu_domain_direct_map); 3101EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3102
3103int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3104{
3105 struct protection_domain *domain = dom->priv;
3106 unsigned long flags;
3107 int levels, ret;
3108
3109 if (pasids <= 0 || pasids > (PASID_MASK + 1))
3110 return -EINVAL;
3111
3112 /* Number of GCR3 table levels required */
3113 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3114 levels += 1;
3115
3116 if (levels > amd_iommu_max_glx_val)
3117 return -EINVAL;
3118
3119 spin_lock_irqsave(&domain->lock, flags);
3120
3121 /*
3122 * Save us all sanity checks whether devices already in the
3123 * domain support IOMMUv2. Just force that the domain has no
3124 * devices attached when it is switched into IOMMUv2 mode.
3125 */
3126 ret = -EBUSY;
3127 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3128 goto out;
3129
3130 ret = -ENOMEM;
3131 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3132 if (domain->gcr3_tbl == NULL)
3133 goto out;
3134
3135 domain->glx = levels;
3136 domain->flags |= PD_IOMMUV2_MASK;
3137 domain->updated = true;
3138
3139 update_domain(domain);
3140
3141 ret = 0;
3142
3143out:
3144 spin_unlock_irqrestore(&domain->lock, flags);
3145
3146 return ret;
3147}
3148EXPORT_SYMBOL(amd_iommu_domain_enable_v2);