aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/pci-swiotlb.c5
-rw-r--r--drivers/pci/dmar.c22
-rw-r--r--drivers/pci/intel-iommu.c218
-rw-r--r--drivers/pci/iova.c16
-rw-r--r--include/linux/iova.h1
6 files changed, 125 insertions, 141 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1a041bcf506b..ae13e34f7248 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -212,10 +212,8 @@ static __init int iommu_setup(char *p)
212 if (!strncmp(p, "soft", 4)) 212 if (!strncmp(p, "soft", 4))
213 swiotlb = 1; 213 swiotlb = 1;
214#endif 214#endif
215 if (!strncmp(p, "pt", 2)) { 215 if (!strncmp(p, "pt", 2))
216 iommu_pass_through = 1; 216 iommu_pass_through = 1;
217 return 1;
218 }
219 217
220 gart_parse_options(p); 218 gart_parse_options(p);
221 219
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 6af96ee44200..1e66b18f45cb 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -71,9 +71,8 @@ void __init pci_swiotlb_init(void)
71{ 71{
72 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 72 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
73#ifdef CONFIG_X86_64 73#ifdef CONFIG_X86_64
74 if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || 74 if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
75 iommu_pass_through) 75 swiotlb = 1;
76 swiotlb = 1;
77#endif 76#endif
78 if (swiotlb_force) 77 if (swiotlb_force)
79 swiotlb = 1; 78 swiotlb = 1;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 7b287cb38b7a..380b60e677e0 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -632,20 +632,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
632 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); 632 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
633 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 633 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
634 634
635 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
636 /* Promote an attitude of violence to a BIOS engineer today */
637 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
638 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
639 drhd->reg_base_addr,
640 dmi_get_system_info(DMI_BIOS_VENDOR),
641 dmi_get_system_info(DMI_BIOS_VERSION),
642 dmi_get_system_info(DMI_PRODUCT_VERSION));
643 goto err_unmap;
644 }
645
635#ifdef CONFIG_DMAR 646#ifdef CONFIG_DMAR
636 agaw = iommu_calculate_agaw(iommu); 647 agaw = iommu_calculate_agaw(iommu);
637 if (agaw < 0) { 648 if (agaw < 0) {
638 printk(KERN_ERR 649 printk(KERN_ERR
639 "Cannot get a valid agaw for iommu (seq_id = %d)\n", 650 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
640 iommu->seq_id); 651 iommu->seq_id);
641 goto error; 652 goto err_unmap;
642 } 653 }
643 msagaw = iommu_calculate_max_sagaw(iommu); 654 msagaw = iommu_calculate_max_sagaw(iommu);
644 if (msagaw < 0) { 655 if (msagaw < 0) {
645 printk(KERN_ERR 656 printk(KERN_ERR
646 "Cannot get a valid max agaw for iommu (seq_id = %d)\n", 657 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
647 iommu->seq_id); 658 iommu->seq_id);
648 goto error; 659 goto err_unmap;
649 } 660 }
650#endif 661#endif
651 iommu->agaw = agaw; 662 iommu->agaw = agaw;
@@ -665,7 +676,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
665 } 676 }
666 677
667 ver = readl(iommu->reg + DMAR_VER_REG); 678 ver = readl(iommu->reg + DMAR_VER_REG);
668 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", 679 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
669 (unsigned long long)drhd->reg_base_addr, 680 (unsigned long long)drhd->reg_base_addr,
670 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), 681 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
671 (unsigned long long)iommu->cap, 682 (unsigned long long)iommu->cap,
@@ -675,7 +686,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
675 686
676 drhd->iommu = iommu; 687 drhd->iommu = iommu;
677 return 0; 688 return 0;
678error: 689
690 err_unmap:
691 iounmap(iommu->reg);
692 error:
679 kfree(iommu); 693 kfree(iommu);
680 return -1; 694 return -1;
681} 695}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 2314ad7ee5fe..3f256b8d83c1 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -251,7 +251,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
251 * 2. It maps to each iommu if successful. 251 * 2. It maps to each iommu if successful.
252 * 3. Each iommu mapps to this domain if successful. 252 * 3. Each iommu mapps to this domain if successful.
253 */ 253 */
254struct dmar_domain *si_domain; 254static struct dmar_domain *si_domain;
255static int hw_pass_through = 1;
255 256
256/* devices under the same p2p bridge are owned in one domain */ 257/* devices under the same p2p bridge are owned in one domain */
257#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) 258#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -1309,7 +1310,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1309} 1310}
1310 1311
1311static struct iova_domain reserved_iova_list; 1312static struct iova_domain reserved_iova_list;
1312static struct lock_class_key reserved_alloc_key;
1313static struct lock_class_key reserved_rbtree_key; 1313static struct lock_class_key reserved_rbtree_key;
1314 1314
1315static void dmar_init_reserved_ranges(void) 1315static void dmar_init_reserved_ranges(void)
@@ -1320,8 +1320,6 @@ static void dmar_init_reserved_ranges(void)
1320 1320
1321 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); 1321 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1322 1322
1323 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1324 &reserved_alloc_key);
1325 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, 1323 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1326 &reserved_rbtree_key); 1324 &reserved_rbtree_key);
1327 1325
@@ -1958,14 +1956,24 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
1958 struct dmar_domain *domain; 1956 struct dmar_domain *domain;
1959 int ret; 1957 int ret;
1960 1958
1961 printk(KERN_INFO
1962 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1963 pci_name(pdev), start, end);
1964
1965 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 1959 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1966 if (!domain) 1960 if (!domain)
1967 return -ENOMEM; 1961 return -ENOMEM;
1968 1962
1963 /* For _hardware_ passthrough, don't bother. But for software
1964 passthrough, we do it anyway -- it may indicate a memory
1965 range which is reserved in E820, so which didn't get set
1966 up to start with in si_domain */
1967 if (domain == si_domain && hw_pass_through) {
1968 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1969 pci_name(pdev), start, end);
1970 return 0;
1971 }
1972
1973 printk(KERN_INFO
1974 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1975 pci_name(pdev), start, end);
1976
1969 ret = iommu_domain_identity_map(domain, start, end); 1977 ret = iommu_domain_identity_map(domain, start, end);
1970 if (ret) 1978 if (ret)
1971 goto error; 1979 goto error;
@@ -2016,23 +2024,6 @@ static inline void iommu_prepare_isa(void)
2016} 2024}
2017#endif /* !CONFIG_DMAR_FLPY_WA */ 2025#endif /* !CONFIG_DMAR_FLPY_WA */
2018 2026
2019/* Initialize each context entry as pass through.*/
2020static int __init init_context_pass_through(void)
2021{
2022 struct pci_dev *pdev = NULL;
2023 struct dmar_domain *domain;
2024 int ret;
2025
2026 for_each_pci_dev(pdev) {
2027 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2028 ret = domain_context_mapping(domain, pdev,
2029 CONTEXT_TT_PASS_THROUGH);
2030 if (ret)
2031 return ret;
2032 }
2033 return 0;
2034}
2035
2036static int md_domain_init(struct dmar_domain *domain, int guest_width); 2027static int md_domain_init(struct dmar_domain *domain, int guest_width);
2037 2028
2038static int __init si_domain_work_fn(unsigned long start_pfn, 2029static int __init si_domain_work_fn(unsigned long start_pfn,
@@ -2047,7 +2038,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
2047 2038
2048} 2039}
2049 2040
2050static int si_domain_init(void) 2041static int si_domain_init(int hw)
2051{ 2042{
2052 struct dmar_drhd_unit *drhd; 2043 struct dmar_drhd_unit *drhd;
2053 struct intel_iommu *iommu; 2044 struct intel_iommu *iommu;
@@ -2074,6 +2065,9 @@ static int si_domain_init(void)
2074 2065
2075 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2066 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2076 2067
2068 if (hw)
2069 return 0;
2070
2077 for_each_online_node(nid) { 2071 for_each_online_node(nid) {
2078 work_with_active_regions(nid, si_domain_work_fn, &ret); 2072 work_with_active_regions(nid, si_domain_work_fn, &ret);
2079 if (ret) 2073 if (ret)
@@ -2165,24 +2159,26 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2165 return 1; 2159 return 1;
2166} 2160}
2167 2161
2168static int iommu_prepare_static_identity_mapping(void) 2162static int iommu_prepare_static_identity_mapping(int hw)
2169{ 2163{
2170 struct pci_dev *pdev = NULL; 2164 struct pci_dev *pdev = NULL;
2171 int ret; 2165 int ret;
2172 2166
2173 ret = si_domain_init(); 2167 ret = si_domain_init(hw);
2174 if (ret) 2168 if (ret)
2175 return -EFAULT; 2169 return -EFAULT;
2176 2170
2177 for_each_pci_dev(pdev) { 2171 for_each_pci_dev(pdev) {
2178 if (iommu_should_identity_map(pdev, 1)) { 2172 if (iommu_should_identity_map(pdev, 1)) {
2179 printk(KERN_INFO "IOMMU: identity mapping for device %s\n", 2173 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2180 pci_name(pdev)); 2174 hw ? "hardware" : "software", pci_name(pdev));
2181 2175
2182 ret = domain_context_mapping(si_domain, pdev, 2176 ret = domain_context_mapping(si_domain, pdev,
2177 hw ? CONTEXT_TT_PASS_THROUGH :
2183 CONTEXT_TT_MULTI_LEVEL); 2178 CONTEXT_TT_MULTI_LEVEL);
2184 if (ret) 2179 if (ret)
2185 return ret; 2180 return ret;
2181
2186 ret = domain_add_dev_info(si_domain, pdev); 2182 ret = domain_add_dev_info(si_domain, pdev);
2187 if (ret) 2183 if (ret)
2188 return ret; 2184 return ret;
@@ -2199,14 +2195,6 @@ int __init init_dmars(void)
2199 struct pci_dev *pdev; 2195 struct pci_dev *pdev;
2200 struct intel_iommu *iommu; 2196 struct intel_iommu *iommu;
2201 int i, ret; 2197 int i, ret;
2202 int pass_through = 1;
2203
2204 /*
2205 * In case pass through can not be enabled, iommu tries to use identity
2206 * mapping.
2207 */
2208 if (iommu_pass_through)
2209 iommu_identity_mapping = 1;
2210 2198
2211 /* 2199 /*
2212 * for each drhd 2200 * for each drhd
@@ -2234,7 +2222,6 @@ int __init init_dmars(void)
2234 deferred_flush = kzalloc(g_num_of_iommus * 2222 deferred_flush = kzalloc(g_num_of_iommus *
2235 sizeof(struct deferred_flush_tables), GFP_KERNEL); 2223 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2236 if (!deferred_flush) { 2224 if (!deferred_flush) {
2237 kfree(g_iommus);
2238 ret = -ENOMEM; 2225 ret = -ENOMEM;
2239 goto error; 2226 goto error;
2240 } 2227 }
@@ -2261,14 +2248,8 @@ int __init init_dmars(void)
2261 goto error; 2248 goto error;
2262 } 2249 }
2263 if (!ecap_pass_through(iommu->ecap)) 2250 if (!ecap_pass_through(iommu->ecap))
2264 pass_through = 0; 2251 hw_pass_through = 0;
2265 } 2252 }
2266 if (iommu_pass_through)
2267 if (!pass_through) {
2268 printk(KERN_INFO
2269 "Pass Through is not supported by hardware.\n");
2270 iommu_pass_through = 0;
2271 }
2272 2253
2273 /* 2254 /*
2274 * Start from the sane iommu hardware state. 2255 * Start from the sane iommu hardware state.
@@ -2323,64 +2304,57 @@ int __init init_dmars(void)
2323 } 2304 }
2324 } 2305 }
2325 2306
2307 if (iommu_pass_through)
2308 iommu_identity_mapping = 1;
2309#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2310 else
2311 iommu_identity_mapping = 2;
2312#endif
2326 /* 2313 /*
2327 * If pass through is set and enabled, context entries of all pci 2314 * If pass through is not set or not enabled, setup context entries for
2328 * devices are intialized by pass through translation type. 2315 * identity mappings for rmrr, gfx, and isa and may fall back to static
2316 * identity mapping if iommu_identity_mapping is set.
2329 */ 2317 */
2330 if (iommu_pass_through) { 2318 if (iommu_identity_mapping) {
2331 ret = init_context_pass_through(); 2319 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2332 if (ret) { 2320 if (ret) {
2333 printk(KERN_ERR "IOMMU: Pass through init failed.\n"); 2321 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2334 iommu_pass_through = 0; 2322 goto error;
2335 } 2323 }
2336 } 2324 }
2337
2338 /* 2325 /*
2339 * If pass through is not set or not enabled, setup context entries for 2326 * For each rmrr
2340 * identity mappings for rmrr, gfx, and isa and may fall back to static 2327 * for each dev attached to rmrr
2341 * identity mapping if iommu_identity_mapping is set. 2328 * do
2329 * locate drhd for dev, alloc domain for dev
2330 * allocate free domain
2331 * allocate page table entries for rmrr
2332 * if context not allocated for bus
2333 * allocate and init context
2334 * set present in root table for this bus
2335 * init context with domain, translation etc
2336 * endfor
2337 * endfor
2342 */ 2338 */
2343 if (!iommu_pass_through) { 2339 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2344#ifdef CONFIG_DMAR_BROKEN_GFX_WA 2340 for_each_rmrr_units(rmrr) {
2345 if (!iommu_identity_mapping) 2341 for (i = 0; i < rmrr->devices_cnt; i++) {
2346 iommu_identity_mapping = 2; 2342 pdev = rmrr->devices[i];
2347#endif 2343 /*
2348 if (iommu_identity_mapping) 2344 * some BIOS lists non-exist devices in DMAR
2349 iommu_prepare_static_identity_mapping(); 2345 * table.
2350 /* 2346 */
2351 * For each rmrr 2347 if (!pdev)
2352 * for each dev attached to rmrr 2348 continue;
2353 * do 2349 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2354 * locate drhd for dev, alloc domain for dev 2350 if (ret)
2355 * allocate free domain 2351 printk(KERN_ERR
2356 * allocate page table entries for rmrr 2352 "IOMMU: mapping reserved region failed\n");
2357 * if context not allocated for bus
2358 * allocate and init context
2359 * set present in root table for this bus
2360 * init context with domain, translation etc
2361 * endfor
2362 * endfor
2363 */
2364 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2365 for_each_rmrr_units(rmrr) {
2366 for (i = 0; i < rmrr->devices_cnt; i++) {
2367 pdev = rmrr->devices[i];
2368 /*
2369 * some BIOS lists non-exist devices in DMAR
2370 * table.
2371 */
2372 if (!pdev)
2373 continue;
2374 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2375 if (ret)
2376 printk(KERN_ERR
2377 "IOMMU: mapping reserved region failed\n");
2378 }
2379 } 2353 }
2380
2381 iommu_prepare_isa();
2382 } 2354 }
2383 2355
2356 iommu_prepare_isa();
2357
2384 /* 2358 /*
2385 * for each drhd 2359 * for each drhd
2386 * enable fault log 2360 * enable fault log
@@ -2454,8 +2428,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
2454 return iova; 2428 return iova;
2455} 2429}
2456 2430
2457static struct dmar_domain * 2431static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2458get_valid_domain_for_dev(struct pci_dev *pdev)
2459{ 2432{
2460 struct dmar_domain *domain; 2433 struct dmar_domain *domain;
2461 int ret; 2434 int ret;
@@ -2483,6 +2456,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
2483 return domain; 2456 return domain;
2484} 2457}
2485 2458
2459static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2460{
2461 struct device_domain_info *info;
2462
2463 /* No lock here, assumes no domain exit in normal case */
2464 info = dev->dev.archdata.iommu;
2465 if (likely(info))
2466 return info->domain;
2467
2468 return __get_valid_domain_for_dev(dev);
2469}
2470
2486static int iommu_dummy(struct pci_dev *pdev) 2471static int iommu_dummy(struct pci_dev *pdev)
2487{ 2472{
2488 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; 2473 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
@@ -2528,7 +2513,10 @@ static int iommu_no_mapping(struct device *dev)
2528 ret = domain_add_dev_info(si_domain, pdev); 2513 ret = domain_add_dev_info(si_domain, pdev);
2529 if (ret) 2514 if (ret)
2530 return 0; 2515 return 0;
2531 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); 2516 ret = domain_context_mapping(si_domain, pdev,
2517 hw_pass_through ?
2518 CONTEXT_TT_PASS_THROUGH :
2519 CONTEXT_TT_MULTI_LEVEL);
2532 if (!ret) { 2520 if (!ret) {
2533 printk(KERN_INFO "64bit %s uses identity mapping\n", 2521 printk(KERN_INFO "64bit %s uses identity mapping\n",
2534 pci_name(pdev)); 2522 pci_name(pdev));
@@ -2733,12 +2721,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2733 } 2721 }
2734} 2722}
2735 2723
2736static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2737 int dir)
2738{
2739 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2740}
2741
2742static void *intel_alloc_coherent(struct device *hwdev, size_t size, 2724static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2743 dma_addr_t *dma_handle, gfp_t flags) 2725 dma_addr_t *dma_handle, gfp_t flags)
2744{ 2726{
@@ -2771,7 +2753,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2771 size = PAGE_ALIGN(size); 2753 size = PAGE_ALIGN(size);
2772 order = get_order(size); 2754 order = get_order(size);
2773 2755
2774 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2756 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2775 free_pages((unsigned long)vaddr, order); 2757 free_pages((unsigned long)vaddr, order);
2776} 2758}
2777 2759
@@ -2807,11 +2789,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2807 /* free page tables */ 2789 /* free page tables */
2808 dma_pte_free_pagetable(domain, start_pfn, last_pfn); 2790 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2809 2791
2810 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2792 if (intel_iommu_strict) {
2811 (last_pfn - start_pfn + 1)); 2793 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2812 2794 last_pfn - start_pfn + 1);
2813 /* free iova */ 2795 /* free iova */
2814 __free_iova(&domain->iovad, iova); 2796 __free_iova(&domain->iovad, iova);
2797 } else {
2798 add_unmap(domain, iova);
2799 /*
2800 * queue up the release of the unmap to save the 1/6th of the
2801 * cpu used up by the iotlb flush operation...
2802 */
2803 }
2815} 2804}
2816 2805
2817static int intel_nontranslate_map_sg(struct device *hddev, 2806static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3194,7 +3183,7 @@ int __init intel_iommu_init(void)
3194 * Check the need for DMA-remapping initialization now. 3183 * Check the need for DMA-remapping initialization now.
3195 * Above initialization will also be used by Interrupt-remapping. 3184 * Above initialization will also be used by Interrupt-remapping.
3196 */ 3185 */
3197 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) 3186 if (no_iommu || swiotlb || dmar_disabled)
3198 return -ENODEV; 3187 return -ENODEV;
3199 3188
3200 iommu_init_mempool(); 3189 iommu_init_mempool();
@@ -3214,14 +3203,7 @@ int __init intel_iommu_init(void)
3214 3203
3215 init_timer(&unmap_timer); 3204 init_timer(&unmap_timer);
3216 force_iommu = 1; 3205 force_iommu = 1;
3217 3206 dma_ops = &intel_dma_ops;
3218 if (!iommu_pass_through) {
3219 printk(KERN_INFO
3220 "Multi-level page-table translation for DMAR.\n");
3221 dma_ops = &intel_dma_ops;
3222 } else
3223 printk(KERN_INFO
3224 "DMAR: Pass through translation for DMAR.\n");
3225 3207
3226 init_iommu_sysfs(); 3208 init_iommu_sysfs();
3227 3209
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 46dd440e2315..7914951ef29a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -22,7 +22,6 @@
22void 22void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
24{ 24{
25 spin_lock_init(&iovad->iova_alloc_lock);
26 spin_lock_init(&iovad->iova_rbtree_lock); 25 spin_lock_init(&iovad->iova_rbtree_lock);
27 iovad->rbroot = RB_ROOT; 26 iovad->rbroot = RB_ROOT;
28 iovad->cached32_node = NULL; 27 iovad->cached32_node = NULL;
@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
205 unsigned long limit_pfn, 204 unsigned long limit_pfn,
206 bool size_aligned) 205 bool size_aligned)
207{ 206{
208 unsigned long flags;
209 struct iova *new_iova; 207 struct iova *new_iova;
210 int ret; 208 int ret;
211 209
@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
219 if (size_aligned) 217 if (size_aligned)
220 size = __roundup_pow_of_two(size); 218 size = __roundup_pow_of_two(size);
221 219
222 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
223 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 220 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
224 new_iova, size_aligned); 221 new_iova, size_aligned);
225 222
226 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
227 if (ret) { 223 if (ret) {
228 free_iova_mem(new_iova); 224 free_iova_mem(new_iova);
229 return NULL; 225 return NULL;
@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
381 struct iova *iova; 377 struct iova *iova;
382 unsigned int overlap = 0; 378 unsigned int overlap = 0;
383 379
384 spin_lock_irqsave(&iovad->iova_alloc_lock, flags); 380 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
385 spin_lock(&iovad->iova_rbtree_lock);
386 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { 381 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
387 if (__is_range_overlap(node, pfn_lo, pfn_hi)) { 382 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
388 iova = container_of(node, struct iova, node); 383 iova = container_of(node, struct iova, node);
@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
402 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); 397 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
403finish: 398finish:
404 399
405 spin_unlock(&iovad->iova_rbtree_lock); 400 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
406 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
407 return iova; 401 return iova;
408} 402}
409 403
@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
420 unsigned long flags; 414 unsigned long flags;
421 struct rb_node *node; 415 struct rb_node *node;
422 416
423 spin_lock_irqsave(&from->iova_alloc_lock, flags); 417 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
424 spin_lock(&from->iova_rbtree_lock);
425 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { 418 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
426 struct iova *iova = container_of(node, struct iova, node); 419 struct iova *iova = container_of(node, struct iova, node);
427 struct iova *new_iova; 420 struct iova *new_iova;
@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
430 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", 423 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
431 iova->pfn_lo, iova->pfn_lo); 424 iova->pfn_lo, iova->pfn_lo);
432 } 425 }
433 spin_unlock(&from->iova_rbtree_lock); 426 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
434 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
435} 427}
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 228f6c94b69c..76a0759e88ec 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -28,7 +28,6 @@ struct iova {
28 28
29/* holds all the iova translations for a domain */ 29/* holds all the iova translations for a domain */
30struct iova_domain { 30struct iova_domain {
31 spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
32 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 31 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
33 struct rb_root rbroot; /* iova domain rbtree root */ 32 struct rb_root rbroot; /* iova domain rbtree root */
34 struct rb_node *cached32_node; /* Save last alloced node */ 33 struct rb_node *cached32_node; /* Save last alloced node */