aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 13:06:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 13:06:10 -0400
commitb09a75fc5e77b7c58d097236f89b1ff72dcdb562 (patch)
tree8f818f1b3e44d9bc822b13dc7c368077981dd6ea /drivers/pci/intel-iommu.c
parentcf63ff5fa4399e215cc5ef322ccd8bddfff9afa6 (diff)
parentb94996c99c8befed9cbbb8804a4625e203913318 (diff)
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: (23 commits) intel-iommu: Disable PMRs after we enable translation, not before intel-iommu: Kill DMAR_BROKEN_GFX_WA option. intel-iommu: Fix integer wrap on 32 bit kernels intel-iommu: Fix integer overflow in dma_pte_{clear_range,free_pagetable}() intel-iommu: Limit DOMAIN_MAX_PFN to fit in an 'unsigned long' intel-iommu: Fix kernel hang if interrupt remapping disabled in BIOS intel-iommu: Disallow interrupt remapping if not all ioapics covered intel-iommu: include linux/dmi.h to use dmi_ routines pci/dmar: correct off-by-one error in dmar_fault() intel-iommu: Cope with yet another BIOS screwup causing crashes intel-iommu: iommu init error path bug fixes intel-iommu: Mark functions with __init USB: Work around BIOS bugs by quiescing USB controllers earlier ia64: IOMMU passthrough mode shouldn't trigger swiotlb init intel-iommu: make domain_add_dev_info() call domain_context_mapping() intel-iommu: Unify hardware and software passthrough support intel-iommu: Cope with broken HP DC7900 BIOS iommu=pt is a valid early param intel-iommu: double kfree() intel-iommu: Kill pointless intel_unmap_single() function ... Fixed up trivial include lines conflict in drivers/pci/intel-iommu.c
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c323
1 files changed, 161 insertions, 162 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 562221e11917..855dd7ca47f3 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -38,6 +38,7 @@
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/sysdev.h> 39#include <linux/sysdev.h>
40#include <linux/tboot.h> 40#include <linux/tboot.h>
41#include <linux/dmi.h>
41#include <asm/cacheflush.h> 42#include <asm/cacheflush.h>
42#include <asm/iommu.h> 43#include <asm/iommu.h>
43#include "pci.h" 44#include "pci.h"
@@ -56,8 +57,14 @@
56 57
57#define MAX_AGAW_WIDTH 64 58#define MAX_AGAW_WIDTH 64
58 59
59#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 60#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) 61#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
62
63/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
64 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
65#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
66 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
67#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
61 68
62#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 69#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
63#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 70#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
@@ -252,7 +259,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
252 * 2. It maps to each iommu if successful. 259 * 2. It maps to each iommu if successful.
253 * 3. Each iommu mapps to this domain if successful. 260 * 3. Each iommu mapps to this domain if successful.
254 */ 261 */
255struct dmar_domain *si_domain; 262static struct dmar_domain *si_domain;
263static int hw_pass_through = 1;
256 264
257/* devices under the same p2p bridge are owned in one domain */ 265/* devices under the same p2p bridge are owned in one domain */
258#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) 266#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -728,7 +736,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
728 return NULL; 736 return NULL;
729 737
730 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); 738 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
731 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; 739 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
732 if (cmpxchg64(&pte->val, 0ULL, pteval)) { 740 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
733 /* Someone else set it while we were thinking; use theirs. */ 741 /* Someone else set it while we were thinking; use theirs. */
734 free_pgtable_page(tmp_page); 742 free_pgtable_page(tmp_page);
@@ -778,9 +786,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
778 786
779 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 787 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
780 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 788 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
789 BUG_ON(start_pfn > last_pfn);
781 790
782 /* we don't need lock here; nobody else touches the iova range */ 791 /* we don't need lock here; nobody else touches the iova range */
783 while (start_pfn <= last_pfn) { 792 do {
784 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); 793 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
785 if (!pte) { 794 if (!pte) {
786 start_pfn = align_to_level(start_pfn + 1, 2); 795 start_pfn = align_to_level(start_pfn + 1, 2);
@@ -794,7 +803,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
794 803
795 domain_flush_cache(domain, first_pte, 804 domain_flush_cache(domain, first_pte,
796 (void *)pte - (void *)first_pte); 805 (void *)pte - (void *)first_pte);
797 } 806
807 } while (start_pfn && start_pfn <= last_pfn);
798} 808}
799 809
800/* free page table pages. last level pte should already be cleared */ 810/* free page table pages. last level pte should already be cleared */
@@ -810,6 +820,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
810 820
811 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 821 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
812 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 822 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
823 BUG_ON(start_pfn > last_pfn);
813 824
814 /* We don't need lock here; nobody else touches the iova range */ 825 /* We don't need lock here; nobody else touches the iova range */
815 level = 2; 826 level = 2;
@@ -820,7 +831,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
820 if (tmp + level_size(level) - 1 > last_pfn) 831 if (tmp + level_size(level) - 1 > last_pfn)
821 return; 832 return;
822 833
823 while (tmp + level_size(level) - 1 <= last_pfn) { 834 do {
824 first_pte = pte = dma_pfn_level_pte(domain, tmp, level); 835 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
825 if (!pte) { 836 if (!pte) {
826 tmp = align_to_level(tmp + 1, level + 1); 837 tmp = align_to_level(tmp + 1, level + 1);
@@ -839,7 +850,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
839 domain_flush_cache(domain, first_pte, 850 domain_flush_cache(domain, first_pte,
840 (void *)pte - (void *)first_pte); 851 (void *)pte - (void *)first_pte);
841 852
842 } 853 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
843 level++; 854 level++;
844 } 855 }
845 /* free pgd */ 856 /* free pgd */
@@ -1158,6 +1169,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1158 pr_debug("Number of Domains supportd <%ld>\n", ndomains); 1169 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1159 nlongs = BITS_TO_LONGS(ndomains); 1170 nlongs = BITS_TO_LONGS(ndomains);
1160 1171
1172 spin_lock_init(&iommu->lock);
1173
1161 /* TBD: there might be 64K domains, 1174 /* TBD: there might be 64K domains,
1162 * consider other allocation for future chip 1175 * consider other allocation for future chip
1163 */ 1176 */
@@ -1170,12 +1183,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1170 GFP_KERNEL); 1183 GFP_KERNEL);
1171 if (!iommu->domains) { 1184 if (!iommu->domains) {
1172 printk(KERN_ERR "Allocating domain array failed\n"); 1185 printk(KERN_ERR "Allocating domain array failed\n");
1173 kfree(iommu->domain_ids);
1174 return -ENOMEM; 1186 return -ENOMEM;
1175 } 1187 }
1176 1188
1177 spin_lock_init(&iommu->lock);
1178
1179 /* 1189 /*
1180 * if Caching mode is set, then invalid translations are tagged 1190 * if Caching mode is set, then invalid translations are tagged
1181 * with domainid 0. Hence we need to pre-allocate it. 1191 * with domainid 0. Hence we need to pre-allocate it.
@@ -1195,22 +1205,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1195 int i; 1205 int i;
1196 unsigned long flags; 1206 unsigned long flags;
1197 1207
1198 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 1208 if ((iommu->domains) && (iommu->domain_ids)) {
1199 for (; i < cap_ndoms(iommu->cap); ) { 1209 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1200 domain = iommu->domains[i]; 1210 for (; i < cap_ndoms(iommu->cap); ) {
1201 clear_bit(i, iommu->domain_ids); 1211 domain = iommu->domains[i];
1212 clear_bit(i, iommu->domain_ids);
1213
1214 spin_lock_irqsave(&domain->iommu_lock, flags);
1215 if (--domain->iommu_count == 0) {
1216 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1217 vm_domain_exit(domain);
1218 else
1219 domain_exit(domain);
1220 }
1221 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1202 1222
1203 spin_lock_irqsave(&domain->iommu_lock, flags); 1223 i = find_next_bit(iommu->domain_ids,
1204 if (--domain->iommu_count == 0) { 1224 cap_ndoms(iommu->cap), i+1);
1205 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1206 vm_domain_exit(domain);
1207 else
1208 domain_exit(domain);
1209 } 1225 }
1210 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1211
1212 i = find_next_bit(iommu->domain_ids,
1213 cap_ndoms(iommu->cap), i+1);
1214 } 1226 }
1215 1227
1216 if (iommu->gcmd & DMA_GCMD_TE) 1228 if (iommu->gcmd & DMA_GCMD_TE)
@@ -1310,7 +1322,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1310} 1322}
1311 1323
1312static struct iova_domain reserved_iova_list; 1324static struct iova_domain reserved_iova_list;
1313static struct lock_class_key reserved_alloc_key;
1314static struct lock_class_key reserved_rbtree_key; 1325static struct lock_class_key reserved_rbtree_key;
1315 1326
1316static void dmar_init_reserved_ranges(void) 1327static void dmar_init_reserved_ranges(void)
@@ -1321,8 +1332,6 @@ static void dmar_init_reserved_ranges(void)
1321 1332
1322 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); 1333 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1323 1334
1324 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1325 &reserved_alloc_key);
1326 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, 1335 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1327 &reserved_rbtree_key); 1336 &reserved_rbtree_key);
1328 1337
@@ -1959,14 +1968,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1959 struct dmar_domain *domain; 1968 struct dmar_domain *domain;
1960 int ret; 1969 int ret;
1961 1970
1962 printk(KERN_INFO
1963 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1964 pci_name(pdev), start, end);
1965
1966 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 1971 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1967 if (!domain) 1972 if (!domain)
1968 return -ENOMEM; 1973 return -ENOMEM;
1969 1974
1975 /* For _hardware_ passthrough, don't bother. But for software
1976 passthrough, we do it anyway -- it may indicate a memory
1977 range which is reserved in E820, so which didn't get set
1978 up to start with in si_domain */
1979 if (domain == si_domain && hw_pass_through) {
1980 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1981 pci_name(pdev), start, end);
1982 return 0;
1983 }
1984
1985 printk(KERN_INFO
1986 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1987 pci_name(pdev), start, end);
1988
1989 if (end >> agaw_to_width(domain->agaw)) {
1990 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1991 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1992 agaw_to_width(domain->agaw),
1993 dmi_get_system_info(DMI_BIOS_VENDOR),
1994 dmi_get_system_info(DMI_BIOS_VERSION),
1995 dmi_get_system_info(DMI_PRODUCT_VERSION));
1996 ret = -EIO;
1997 goto error;
1998 }
1999
1970 ret = iommu_domain_identity_map(domain, start, end); 2000 ret = iommu_domain_identity_map(domain, start, end);
1971 if (ret) 2001 if (ret)
1972 goto error; 2002 goto error;
@@ -2017,23 +2047,6 @@ static inline void iommu_prepare_isa(void)
2017} 2047}
2018#endif /* !CONFIG_DMAR_FLPY_WA */ 2048#endif /* !CONFIG_DMAR_FLPY_WA */
2019 2049
2020/* Initialize each context entry as pass through.*/
2021static int __init init_context_pass_through(void)
2022{
2023 struct pci_dev *pdev = NULL;
2024 struct dmar_domain *domain;
2025 int ret;
2026
2027 for_each_pci_dev(pdev) {
2028 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2029 ret = domain_context_mapping(domain, pdev,
2030 CONTEXT_TT_PASS_THROUGH);
2031 if (ret)
2032 return ret;
2033 }
2034 return 0;
2035}
2036
2037static int md_domain_init(struct dmar_domain *domain, int guest_width); 2050static int md_domain_init(struct dmar_domain *domain, int guest_width);
2038 2051
2039static int __init si_domain_work_fn(unsigned long start_pfn, 2052static int __init si_domain_work_fn(unsigned long start_pfn,
@@ -2048,7 +2061,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
2048 2061
2049} 2062}
2050 2063
2051static int si_domain_init(void) 2064static int __init si_domain_init(int hw)
2052{ 2065{
2053 struct dmar_drhd_unit *drhd; 2066 struct dmar_drhd_unit *drhd;
2054 struct intel_iommu *iommu; 2067 struct intel_iommu *iommu;
@@ -2075,6 +2088,9 @@ static int si_domain_init(void)
2075 2088
2076 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2089 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2077 2090
2091 if (hw)
2092 return 0;
2093
2078 for_each_online_node(nid) { 2094 for_each_online_node(nid) {
2079 work_with_active_regions(nid, si_domain_work_fn, &ret); 2095 work_with_active_regions(nid, si_domain_work_fn, &ret);
2080 if (ret) 2096 if (ret)
@@ -2101,15 +2117,23 @@ static int identity_mapping(struct pci_dev *pdev)
2101} 2117}
2102 2118
2103static int domain_add_dev_info(struct dmar_domain *domain, 2119static int domain_add_dev_info(struct dmar_domain *domain,
2104 struct pci_dev *pdev) 2120 struct pci_dev *pdev,
2121 int translation)
2105{ 2122{
2106 struct device_domain_info *info; 2123 struct device_domain_info *info;
2107 unsigned long flags; 2124 unsigned long flags;
2125 int ret;
2108 2126
2109 info = alloc_devinfo_mem(); 2127 info = alloc_devinfo_mem();
2110 if (!info) 2128 if (!info)
2111 return -ENOMEM; 2129 return -ENOMEM;
2112 2130
2131 ret = domain_context_mapping(domain, pdev, translation);
2132 if (ret) {
2133 free_devinfo_mem(info);
2134 return ret;
2135 }
2136
2113 info->segment = pci_domain_nr(pdev->bus); 2137 info->segment = pci_domain_nr(pdev->bus);
2114 info->bus = pdev->bus->number; 2138 info->bus = pdev->bus->number;
2115 info->devfn = pdev->devfn; 2139 info->devfn = pdev->devfn;
@@ -2166,27 +2190,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2166 return 1; 2190 return 1;
2167} 2191}
2168 2192
2169static int iommu_prepare_static_identity_mapping(void) 2193static int __init iommu_prepare_static_identity_mapping(int hw)
2170{ 2194{
2171 struct pci_dev *pdev = NULL; 2195 struct pci_dev *pdev = NULL;
2172 int ret; 2196 int ret;
2173 2197
2174 ret = si_domain_init(); 2198 ret = si_domain_init(hw);
2175 if (ret) 2199 if (ret)
2176 return -EFAULT; 2200 return -EFAULT;
2177 2201
2178 for_each_pci_dev(pdev) { 2202 for_each_pci_dev(pdev) {
2179 if (iommu_should_identity_map(pdev, 1)) { 2203 if (iommu_should_identity_map(pdev, 1)) {
2180 printk(KERN_INFO "IOMMU: identity mapping for device %s\n", 2204 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2181 pci_name(pdev)); 2205 hw ? "hardware" : "software", pci_name(pdev));
2182 2206
2183 ret = domain_context_mapping(si_domain, pdev, 2207 ret = domain_add_dev_info(si_domain, pdev,
2208 hw ? CONTEXT_TT_PASS_THROUGH :
2184 CONTEXT_TT_MULTI_LEVEL); 2209 CONTEXT_TT_MULTI_LEVEL);
2185 if (ret) 2210 if (ret)
2186 return ret; 2211 return ret;
2187 ret = domain_add_dev_info(si_domain, pdev);
2188 if (ret)
2189 return ret;
2190 } 2212 }
2191 } 2213 }
2192 2214
@@ -2200,14 +2222,6 @@ int __init init_dmars(void)
2200 struct pci_dev *pdev; 2222 struct pci_dev *pdev;
2201 struct intel_iommu *iommu; 2223 struct intel_iommu *iommu;
2202 int i, ret; 2224 int i, ret;
2203 int pass_through = 1;
2204
2205 /*
2206 * In case pass through can not be enabled, iommu tries to use identity
2207 * mapping.
2208 */
2209 if (iommu_pass_through)
2210 iommu_identity_mapping = 1;
2211 2225
2212 /* 2226 /*
2213 * for each drhd 2227 * for each drhd
@@ -2235,7 +2249,6 @@ int __init init_dmars(void)
2235 deferred_flush = kzalloc(g_num_of_iommus * 2249 deferred_flush = kzalloc(g_num_of_iommus *
2236 sizeof(struct deferred_flush_tables), GFP_KERNEL); 2250 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2237 if (!deferred_flush) { 2251 if (!deferred_flush) {
2238 kfree(g_iommus);
2239 ret = -ENOMEM; 2252 ret = -ENOMEM;
2240 goto error; 2253 goto error;
2241 } 2254 }
@@ -2262,14 +2275,8 @@ int __init init_dmars(void)
2262 goto error; 2275 goto error;
2263 } 2276 }
2264 if (!ecap_pass_through(iommu->ecap)) 2277 if (!ecap_pass_through(iommu->ecap))
2265 pass_through = 0; 2278 hw_pass_through = 0;
2266 } 2279 }
2267 if (iommu_pass_through)
2268 if (!pass_through) {
2269 printk(KERN_INFO
2270 "Pass Through is not supported by hardware.\n");
2271 iommu_pass_through = 0;
2272 }
2273 2280
2274 /* 2281 /*
2275 * Start from the sane iommu hardware state. 2282 * Start from the sane iommu hardware state.
@@ -2324,64 +2331,57 @@ int __init init_dmars(void)
2324 } 2331 }
2325 } 2332 }
2326 2333
2334 if (iommu_pass_through)
2335 iommu_identity_mapping = 1;
2336#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2337 else
2338 iommu_identity_mapping = 2;
2339#endif
2327 /* 2340 /*
2328 * If pass through is set and enabled, context entries of all pci 2341 * If pass through is not set or not enabled, setup context entries for
2329 * devices are intialized by pass through translation type. 2342 * identity mappings for rmrr, gfx, and isa and may fall back to static
2343 * identity mapping if iommu_identity_mapping is set.
2330 */ 2344 */
2331 if (iommu_pass_through) { 2345 if (iommu_identity_mapping) {
2332 ret = init_context_pass_through(); 2346 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2333 if (ret) { 2347 if (ret) {
2334 printk(KERN_ERR "IOMMU: Pass through init failed.\n"); 2348 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2335 iommu_pass_through = 0; 2349 goto error;
2336 } 2350 }
2337 } 2351 }
2338
2339 /* 2352 /*
2340 * If pass through is not set or not enabled, setup context entries for 2353 * For each rmrr
2341 * identity mappings for rmrr, gfx, and isa and may fall back to static 2354 * for each dev attached to rmrr
2342 * identity mapping if iommu_identity_mapping is set. 2355 * do
2356 * locate drhd for dev, alloc domain for dev
2357 * allocate free domain
2358 * allocate page table entries for rmrr
2359 * if context not allocated for bus
2360 * allocate and init context
2361 * set present in root table for this bus
2362 * init context with domain, translation etc
2363 * endfor
2364 * endfor
2343 */ 2365 */
2344 if (!iommu_pass_through) { 2366 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2345#ifdef CONFIG_DMAR_BROKEN_GFX_WA 2367 for_each_rmrr_units(rmrr) {
2346 if (!iommu_identity_mapping) 2368 for (i = 0; i < rmrr->devices_cnt; i++) {
2347 iommu_identity_mapping = 2; 2369 pdev = rmrr->devices[i];
2348#endif 2370 /*
2349 if (iommu_identity_mapping) 2371 * some BIOS lists non-exist devices in DMAR
2350 iommu_prepare_static_identity_mapping(); 2372 * table.
2351 /* 2373 */
2352 * For each rmrr 2374 if (!pdev)
2353 * for each dev attached to rmrr 2375 continue;
2354 * do 2376 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2355 * locate drhd for dev, alloc domain for dev 2377 if (ret)
2356 * allocate free domain 2378 printk(KERN_ERR
2357 * allocate page table entries for rmrr 2379 "IOMMU: mapping reserved region failed\n");
2358 * if context not allocated for bus
2359 * allocate and init context
2360 * set present in root table for this bus
2361 * init context with domain, translation etc
2362 * endfor
2363 * endfor
2364 */
2365 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2366 for_each_rmrr_units(rmrr) {
2367 for (i = 0; i < rmrr->devices_cnt; i++) {
2368 pdev = rmrr->devices[i];
2369 /*
2370 * some BIOS lists non-exist devices in DMAR
2371 * table.
2372 */
2373 if (!pdev)
2374 continue;
2375 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2376 if (ret)
2377 printk(KERN_ERR
2378 "IOMMU: mapping reserved region failed\n");
2379 }
2380 } 2380 }
2381
2382 iommu_prepare_isa();
2383 } 2381 }
2384 2382
2383 iommu_prepare_isa();
2384
2385 /* 2385 /*
2386 * for each drhd 2386 * for each drhd
2387 * enable fault log 2387 * enable fault log
@@ -2404,11 +2404,12 @@ int __init init_dmars(void)
2404 2404
2405 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); 2405 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2406 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); 2406 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2407 iommu_disable_protect_mem_regions(iommu);
2408 2407
2409 ret = iommu_enable_translation(iommu); 2408 ret = iommu_enable_translation(iommu);
2410 if (ret) 2409 if (ret)
2411 goto error; 2410 goto error;
2411
2412 iommu_disable_protect_mem_regions(iommu);
2412 } 2413 }
2413 2414
2414 return 0; 2415 return 0;
@@ -2455,8 +2456,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
2455 return iova; 2456 return iova;
2456} 2457}
2457 2458
2458static struct dmar_domain * 2459static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2459get_valid_domain_for_dev(struct pci_dev *pdev)
2460{ 2460{
2461 struct dmar_domain *domain; 2461 struct dmar_domain *domain;
2462 int ret; 2462 int ret;
@@ -2484,6 +2484,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2484 return domain; 2484 return domain;
2485} 2485}
2486 2486
2487static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2488{
2489 struct device_domain_info *info;
2490
2491 /* No lock here, assumes no domain exit in normal case */
2492 info = dev->dev.archdata.iommu;
2493 if (likely(info))
2494 return info->domain;
2495
2496 return __get_valid_domain_for_dev(dev);
2497}
2498
2487static int iommu_dummy(struct pci_dev *pdev) 2499static int iommu_dummy(struct pci_dev *pdev)
2488{ 2500{
2489 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; 2501 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
@@ -2526,10 +2538,10 @@ static int iommu_no_mapping(struct device *dev)
2526 */ 2538 */
2527 if (iommu_should_identity_map(pdev, 0)) { 2539 if (iommu_should_identity_map(pdev, 0)) {
2528 int ret; 2540 int ret;
2529 ret = domain_add_dev_info(si_domain, pdev); 2541 ret = domain_add_dev_info(si_domain, pdev,
2530 if (ret) 2542 hw_pass_through ?
2531 return 0; 2543 CONTEXT_TT_PASS_THROUGH :
2532 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); 2544 CONTEXT_TT_MULTI_LEVEL);
2533 if (!ret) { 2545 if (!ret) {
2534 printk(KERN_INFO "64bit %s uses identity mapping\n", 2546 printk(KERN_INFO "64bit %s uses identity mapping\n",
2535 pci_name(pdev)); 2547 pci_name(pdev));
@@ -2638,10 +2650,9 @@ static void flush_unmaps(void)
2638 unsigned long mask; 2650 unsigned long mask;
2639 struct iova *iova = deferred_flush[i].iova[j]; 2651 struct iova *iova = deferred_flush[i].iova[j];
2640 2652
2641 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT; 2653 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2642 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2643 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], 2654 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2644 iova->pfn_lo << PAGE_SHIFT, mask); 2655 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2645 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); 2656 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2646 } 2657 }
2647 deferred_flush[i].next = 0; 2658 deferred_flush[i].next = 0;
@@ -2734,12 +2745,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2734 } 2745 }
2735} 2746}
2736 2747
2737static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2738 int dir)
2739{
2740 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2741}
2742
2743static void *intel_alloc_coherent(struct device *hwdev, size_t size, 2748static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2744 dma_addr_t *dma_handle, gfp_t flags) 2749 dma_addr_t *dma_handle, gfp_t flags)
2745{ 2750{
@@ -2772,7 +2777,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2772 size = PAGE_ALIGN(size); 2777 size = PAGE_ALIGN(size);
2773 order = get_order(size); 2778 order = get_order(size);
2774 2779
2775 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2780 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2776 free_pages((unsigned long)vaddr, order); 2781 free_pages((unsigned long)vaddr, order);
2777} 2782}
2778 2783
@@ -2808,11 +2813,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2808 /* free page tables */ 2813 /* free page tables */
2809 dma_pte_free_pagetable(domain, start_pfn, last_pfn); 2814 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2810 2815
2811 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2816 if (intel_iommu_strict) {
2812 (last_pfn - start_pfn + 1)); 2817 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2813 2818 last_pfn - start_pfn + 1);
2814 /* free iova */ 2819 /* free iova */
2815 __free_iova(&domain->iovad, iova); 2820 __free_iova(&domain->iovad, iova);
2821 } else {
2822 add_unmap(domain, iova);
2823 /*
2824 * queue up the release of the unmap to save the 1/6th of the
2825 * cpu used up by the iotlb flush operation...
2826 */
2827 }
2816} 2828}
2817 2829
2818static int intel_nontranslate_map_sg(struct device *hddev, 2830static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3056,8 +3068,8 @@ static int init_iommu_hw(void)
3056 DMA_CCMD_GLOBAL_INVL); 3068 DMA_CCMD_GLOBAL_INVL);
3057 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 3069 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3058 DMA_TLB_GLOBAL_FLUSH); 3070 DMA_TLB_GLOBAL_FLUSH);
3059 iommu_disable_protect_mem_regions(iommu);
3060 iommu_enable_translation(iommu); 3071 iommu_enable_translation(iommu);
3072 iommu_disable_protect_mem_regions(iommu);
3061 } 3073 }
3062 3074
3063 return 0; 3075 return 0;
@@ -3205,7 +3217,7 @@ int __init intel_iommu_init(void)
3205 * Check the need for DMA-remapping initialization now. 3217 * Check the need for DMA-remapping initialization now.
3206 * Above initialization will also be used by Interrupt-remapping. 3218 * Above initialization will also be used by Interrupt-remapping.
3207 */ 3219 */
3208 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) 3220 if (no_iommu || swiotlb || dmar_disabled)
3209 return -ENODEV; 3221 return -ENODEV;
3210 3222
3211 iommu_init_mempool(); 3223 iommu_init_mempool();
@@ -3227,14 +3239,7 @@ int __init intel_iommu_init(void)
3227 3239
3228 init_timer(&unmap_timer); 3240 init_timer(&unmap_timer);
3229 force_iommu = 1; 3241 force_iommu = 1;
3230 3242 dma_ops = &intel_dma_ops;
3231 if (!iommu_pass_through) {
3232 printk(KERN_INFO
3233 "Multi-level page-table translation for DMAR.\n");
3234 dma_ops = &intel_dma_ops;
3235 } else
3236 printk(KERN_INFO
3237 "DMAR: Pass through translation for DMAR.\n");
3238 3243
3239 init_iommu_sysfs(); 3244 init_iommu_sysfs();
3240 3245
@@ -3517,7 +3522,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3517 struct intel_iommu *iommu; 3522 struct intel_iommu *iommu;
3518 int addr_width; 3523 int addr_width;
3519 u64 end; 3524 u64 end;
3520 int ret;
3521 3525
3522 /* normally pdev is not mapped */ 3526 /* normally pdev is not mapped */
3523 if (unlikely(domain_context_mapped(pdev))) { 3527 if (unlikely(domain_context_mapped(pdev))) {
@@ -3549,12 +3553,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3549 return -EFAULT; 3553 return -EFAULT;
3550 } 3554 }
3551 3555
3552 ret = domain_add_dev_info(dmar_domain, pdev); 3556 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3553 if (ret)
3554 return ret;
3555
3556 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3557 return ret;
3558} 3557}
3559 3558
3560static void intel_iommu_detach_device(struct iommu_domain *domain, 3559static void intel_iommu_detach_device(struct iommu_domain *domain,