summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c136
1 files changed, 106 insertions, 30 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 13c3c2dd0459..21a6853290cc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1197,6 +1197,8 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
1197 unsigned long flag; 1197 unsigned long flag;
1198 1198
1199 addr = virt_to_phys(iommu->root_entry); 1199 addr = virt_to_phys(iommu->root_entry);
1200 if (sm_supported(iommu))
1201 addr |= DMA_RTADDR_SMT;
1200 1202
1201 raw_spin_lock_irqsave(&iommu->register_lock, flag); 1203 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1202 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); 1204 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
@@ -1918,6 +1920,56 @@ static void domain_exit(struct dmar_domain *domain)
1918 free_domain_mem(domain); 1920 free_domain_mem(domain);
1919} 1921}
1920 1922
1923/*
1924 * Get the PASID directory size for scalable mode context entry.
1925 * Value of X in the PDTS field of a scalable mode context entry
1926 * indicates PASID directory with 2^(X + 7) entries.
1927 */
1928static inline unsigned long context_get_sm_pds(struct pasid_table *table)
1929{
1930 int pds, max_pde;
1931
1932 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1933 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
1934 if (pds < 7)
1935 return 0;
1936
1937 return pds - 7;
1938}
1939
1940/*
1941 * Set the RID_PASID field of a scalable mode context entry. The
1942 * IOMMU hardware will use the PASID value set in this field for
1943 * DMA translations of DMA requests without PASID.
1944 */
1945static inline void
1946context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1947{
1948 context->hi |= pasid & ((1 << 20) - 1);
1949 context->hi |= (1 << 20);
1950}
1951
1952/*
1953 * Set the DTE(Device-TLB Enable) field of a scalable mode context
1954 * entry.
1955 */
1956static inline void context_set_sm_dte(struct context_entry *context)
1957{
1958 context->lo |= (1 << 2);
1959}
1960
1961/*
1962 * Set the PRE(Page Request Enable) field of a scalable mode context
1963 * entry.
1964 */
1965static inline void context_set_sm_pre(struct context_entry *context)
1966{
1967 context->lo |= (1 << 4);
1968}
1969
1970/* Convert value to context PASID directory size field coding. */
1971#define context_pdts(pds) (((pds) & 0x7) << 9)
1972
1921static int domain_context_mapping_one(struct dmar_domain *domain, 1973static int domain_context_mapping_one(struct dmar_domain *domain,
1922 struct intel_iommu *iommu, 1974 struct intel_iommu *iommu,
1923 struct pasid_table *table, 1975 struct pasid_table *table,
@@ -1928,8 +1980,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1928 struct device_domain_info *info = NULL; 1980 struct device_domain_info *info = NULL;
1929 struct context_entry *context; 1981 struct context_entry *context;
1930 unsigned long flags; 1982 unsigned long flags;
1931 struct dma_pte *pgd; 1983 int ret;
1932 int ret, agaw;
1933 1984
1934 WARN_ON(did == 0); 1985 WARN_ON(did == 0);
1935 1986
@@ -1975,41 +2026,67 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1975 } 2026 }
1976 } 2027 }
1977 2028
1978 pgd = domain->pgd;
1979
1980 context_clear_entry(context); 2029 context_clear_entry(context);
1981 context_set_domain_id(context, did);
1982 2030
1983 /* 2031 if (sm_supported(iommu)) {
1984 * Skip top levels of page tables for iommu which has less agaw 2032 unsigned long pds;
1985 * than default. Unnecessary for PT mode.
1986 */
1987 if (translation != CONTEXT_TT_PASS_THROUGH) {
1988 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
1989 ret = -ENOMEM;
1990 pgd = phys_to_virt(dma_pte_addr(pgd));
1991 if (!dma_pte_present(pgd))
1992 goto out_unlock;
1993 }
1994 2033
1995 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); 2034 WARN_ON(!table);
1996 if (info && info->ats_supported) 2035
1997 translation = CONTEXT_TT_DEV_IOTLB; 2036 /* Setup the PASID DIR pointer: */
1998 else 2037 pds = context_get_sm_pds(table);
1999 translation = CONTEXT_TT_MULTI_LEVEL; 2038 context->lo = (u64)virt_to_phys(table->table) |
2039 context_pdts(pds);
2040
2041 /* Setup the RID_PASID field: */
2042 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2000 2043
2001 context_set_address_root(context, virt_to_phys(pgd));
2002 context_set_address_width(context, agaw);
2003 } else {
2004 /* 2044 /*
2005 * In pass through mode, AW must be programmed to 2045 * Setup the Device-TLB enable bit and Page request
2006 * indicate the largest AGAW value supported by 2046 * Enable bit:
2007 * hardware. And ASR is ignored by hardware.
2008 */ 2047 */
2009 context_set_address_width(context, iommu->msagaw); 2048 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2049 if (info && info->ats_supported)
2050 context_set_sm_dte(context);
2051 if (info && info->pri_supported)
2052 context_set_sm_pre(context);
2053 } else {
2054 struct dma_pte *pgd = domain->pgd;
2055 int agaw;
2056
2057 context_set_domain_id(context, did);
2058 context_set_translation_type(context, translation);
2059
2060 if (translation != CONTEXT_TT_PASS_THROUGH) {
2061 /*
2062 * Skip top levels of page tables for iommu which has
2063 * less agaw than default. Unnecessary for PT mode.
2064 */
2065 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2066 ret = -ENOMEM;
2067 pgd = phys_to_virt(dma_pte_addr(pgd));
2068 if (!dma_pte_present(pgd))
2069 goto out_unlock;
2070 }
2071
2072 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2073 if (info && info->ats_supported)
2074 translation = CONTEXT_TT_DEV_IOTLB;
2075 else
2076 translation = CONTEXT_TT_MULTI_LEVEL;
2077
2078 context_set_address_root(context, virt_to_phys(pgd));
2079 context_set_address_width(context, agaw);
2080 } else {
2081 /*
2082 * In pass through mode, AW must be programmed to
2083 * indicate the largest AGAW value supported by
2084 * hardware. And ASR is ignored by hardware.
2085 */
2086 context_set_address_width(context, iommu->msagaw);
2087 }
2010 } 2088 }
2011 2089
2012 context_set_translation_type(context, translation);
2013 context_set_fault_enable(context); 2090 context_set_fault_enable(context);
2014 context_set_present(context); 2091 context_set_present(context);
2015 domain_flush_cache(domain, context, sizeof(*context)); 2092 domain_flush_cache(domain, context, sizeof(*context));
@@ -5180,7 +5257,6 @@ static void intel_iommu_put_resv_regions(struct device *dev,
5180} 5257}
5181 5258
5182#ifdef CONFIG_INTEL_IOMMU_SVM 5259#ifdef CONFIG_INTEL_IOMMU_SVM
5183#define MAX_NR_PASID_BITS (20)
5184static inline unsigned long intel_iommu_get_pts(struct device *dev) 5260static inline unsigned long intel_iommu_get_pts(struct device *dev)
5185{ 5261{
5186 int pts, max_pasid; 5262 int pts, max_pasid;