aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-07-06 17:03:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-06 17:03:59 -0400
commit085ff82c9c615d502d1e6754879d8d4ff590d143 (patch)
tree5c29d4a691064adc6e5d62e29960a4be02a445b5 /drivers/pci/intel-iommu.c
parentf63bafe55654caf3a62f73500eafd1b89ca6f7ff (diff)
parent3dfc813d94bba2046c6aed216e0fd69ac93a8e03 (diff)
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: intel-iommu: Don't use identity mapping for PCI devices behind bridges intel-iommu: Use iommu_should_identity_map() at startup time too. intel-iommu: No mapping for non-PCI devices intel-iommu: Restore DMAR_BROKEN_GFX_WA option for broken graphics drivers intel-iommu: Add iommu_should_identity_map() function intel-iommu: Fix reattaching of devices to identity mapping domain intel-iommu: Don't set identity mapping for bypassed graphics devices intel-iommu: Fix dma vs. mm page confusion with aligned_nrpages()
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c106
1 files changed, 83 insertions, 23 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 53075424a434..360fb67a30d7 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2117,6 +2117,47 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2117 return 0; 2117 return 0;
2118} 2118}
2119 2119
2120static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2121{
2122 if (iommu_identity_mapping == 2)
2123 return IS_GFX_DEVICE(pdev);
2124
2125 /*
2126 * We want to start off with all devices in the 1:1 domain, and
2127 * take them out later if we find they can't access all of memory.
2128 *
2129 * However, we can't do this for PCI devices behind bridges,
2130 * because all PCI devices behind the same bridge will end up
2131 * with the same source-id on their transactions.
2132 *
2133 * Practically speaking, we can't change things around for these
2134 * devices at run-time, because we can't be sure there'll be no
2135 * DMA transactions in flight for any of their siblings.
2136 *
2137 * So PCI devices (unless they're on the root bus) as well as
2138 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2139 * the 1:1 domain, just in _case_ one of their siblings turns out
2140 * not to be able to map all of memory.
2141 */
2142 if (!pdev->is_pcie) {
2143 if (!pci_is_root_bus(pdev->bus))
2144 return 0;
2145 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2146 return 0;
2147 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2148 return 0;
2149
2150 /*
2151 * At boot time, we don't yet know if devices will be 64-bit capable.
2152 * Assume that they will -- if they turn out not to be, then we can
2153 * take them out of the 1:1 domain later.
2154 */
2155 if (!startup)
2156 return pdev->dma_mask > DMA_BIT_MASK(32);
2157
2158 return 1;
2159}
2160
2120static int iommu_prepare_static_identity_mapping(void) 2161static int iommu_prepare_static_identity_mapping(void)
2121{ 2162{
2122 struct pci_dev *pdev = NULL; 2163 struct pci_dev *pdev = NULL;
@@ -2127,16 +2168,18 @@ static int iommu_prepare_static_identity_mapping(void)
2127 return -EFAULT; 2168 return -EFAULT;
2128 2169
2129 for_each_pci_dev(pdev) { 2170 for_each_pci_dev(pdev) {
2130 printk(KERN_INFO "IOMMU: identity mapping for device %s\n", 2171 if (iommu_should_identity_map(pdev, 1)) {
2131 pci_name(pdev)); 2172 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2173 pci_name(pdev));
2132 2174
2133 ret = domain_context_mapping(si_domain, pdev, 2175 ret = domain_context_mapping(si_domain, pdev,
2134 CONTEXT_TT_MULTI_LEVEL); 2176 CONTEXT_TT_MULTI_LEVEL);
2135 if (ret) 2177 if (ret)
2136 return ret; 2178 return ret;
2137 ret = domain_add_dev_info(si_domain, pdev); 2179 ret = domain_add_dev_info(si_domain, pdev);
2138 if (ret) 2180 if (ret)
2139 return ret; 2181 return ret;
2182 }
2140 } 2183 }
2141 2184
2142 return 0; 2185 return 0;
@@ -2291,6 +2334,10 @@ int __init init_dmars(void)
2291 * identity mapping if iommu_identity_mapping is set. 2334 * identity mapping if iommu_identity_mapping is set.
2292 */ 2335 */
2293 if (!iommu_pass_through) { 2336 if (!iommu_pass_through) {
2337#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2338 if (!iommu_identity_mapping)
2339 iommu_identity_mapping = 2;
2340#endif
2294 if (iommu_identity_mapping) 2341 if (iommu_identity_mapping)
2295 iommu_prepare_static_identity_mapping(); 2342 iommu_prepare_static_identity_mapping();
2296 /* 2343 /*
@@ -2368,15 +2415,15 @@ error:
2368 return ret; 2415 return ret;
2369} 2416}
2370 2417
2418/* Returns a number of VTD pages, but aligned to MM page size */
2371static inline unsigned long aligned_nrpages(unsigned long host_addr, 2419static inline unsigned long aligned_nrpages(unsigned long host_addr,
2372 size_t size) 2420 size_t size)
2373{ 2421{
2374 host_addr &= ~PAGE_MASK; 2422 host_addr &= ~PAGE_MASK;
2375 host_addr += size + PAGE_SIZE - 1; 2423 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2376
2377 return host_addr >> VTD_PAGE_SHIFT;
2378} 2424}
2379 2425
2426/* This takes a number of _MM_ pages, not VTD pages */
2380static struct iova *intel_alloc_iova(struct device *dev, 2427static struct iova *intel_alloc_iova(struct device *dev,
2381 struct dmar_domain *domain, 2428 struct dmar_domain *domain,
2382 unsigned long nrpages, uint64_t dma_mask) 2429 unsigned long nrpages, uint64_t dma_mask)
@@ -2443,16 +2490,24 @@ static int iommu_dummy(struct pci_dev *pdev)
2443} 2490}
2444 2491
2445/* Check if the pdev needs to go through non-identity map and unmap process.*/ 2492/* Check if the pdev needs to go through non-identity map and unmap process.*/
2446static int iommu_no_mapping(struct pci_dev *pdev) 2493static int iommu_no_mapping(struct device *dev)
2447{ 2494{
2495 struct pci_dev *pdev;
2448 int found; 2496 int found;
2449 2497
2498 if (unlikely(dev->bus != &pci_bus_type))
2499 return 1;
2500
2501 pdev = to_pci_dev(dev);
2502 if (iommu_dummy(pdev))
2503 return 1;
2504
2450 if (!iommu_identity_mapping) 2505 if (!iommu_identity_mapping)
2451 return iommu_dummy(pdev); 2506 return 0;
2452 2507
2453 found = identity_mapping(pdev); 2508 found = identity_mapping(pdev);
2454 if (found) { 2509 if (found) {
2455 if (pdev->dma_mask > DMA_BIT_MASK(32)) 2510 if (iommu_should_identity_map(pdev, 0))
2456 return 1; 2511 return 1;
2457 else { 2512 else {
2458 /* 2513 /*
@@ -2469,9 +2524,12 @@ static int iommu_no_mapping(struct pci_dev *pdev)
2469 * In case of a detached 64 bit DMA device from vm, the device 2524 * In case of a detached 64 bit DMA device from vm, the device
2470 * is put into si_domain for identity mapping. 2525 * is put into si_domain for identity mapping.
2471 */ 2526 */
2472 if (pdev->dma_mask > DMA_BIT_MASK(32)) { 2527 if (iommu_should_identity_map(pdev, 0)) {
2473 int ret; 2528 int ret;
2474 ret = domain_add_dev_info(si_domain, pdev); 2529 ret = domain_add_dev_info(si_domain, pdev);
2530 if (ret)
2531 return 0;
2532 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2475 if (!ret) { 2533 if (!ret) {
2476 printk(KERN_INFO "64bit %s uses identity mapping\n", 2534 printk(KERN_INFO "64bit %s uses identity mapping\n",
2477 pci_name(pdev)); 2535 pci_name(pdev));
@@ -2480,7 +2538,7 @@ static int iommu_no_mapping(struct pci_dev *pdev)
2480 } 2538 }
2481 } 2539 }
2482 2540
2483 return iommu_dummy(pdev); 2541 return 0;
2484} 2542}
2485 2543
2486static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2544static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
@@ -2496,7 +2554,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2496 2554
2497 BUG_ON(dir == DMA_NONE); 2555 BUG_ON(dir == DMA_NONE);
2498 2556
2499 if (iommu_no_mapping(pdev)) 2557 if (iommu_no_mapping(hwdev))
2500 return paddr; 2558 return paddr;
2501 2559
2502 domain = get_valid_domain_for_dev(pdev); 2560 domain = get_valid_domain_for_dev(pdev);
@@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2506 iommu = domain_get_iommu(domain); 2564 iommu = domain_get_iommu(domain);
2507 size = aligned_nrpages(paddr, size); 2565 size = aligned_nrpages(paddr, size);
2508 2566
2509 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2567 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2568 pdev->dma_mask);
2510 if (!iova) 2569 if (!iova)
2511 goto error; 2570 goto error;
2512 2571
@@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2635 struct iova *iova; 2694 struct iova *iova;
2636 struct intel_iommu *iommu; 2695 struct intel_iommu *iommu;
2637 2696
2638 if (iommu_no_mapping(pdev)) 2697 if (iommu_no_mapping(dev))
2639 return; 2698 return;
2640 2699
2641 domain = find_domain(pdev); 2700 domain = find_domain(pdev);
@@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2726 struct iova *iova; 2785 struct iova *iova;
2727 struct intel_iommu *iommu; 2786 struct intel_iommu *iommu;
2728 2787
2729 if (iommu_no_mapping(pdev)) 2788 if (iommu_no_mapping(hwdev))
2730 return; 2789 return;
2731 2790
2732 domain = find_domain(pdev); 2791 domain = find_domain(pdev);
@@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2785 struct intel_iommu *iommu; 2844 struct intel_iommu *iommu;
2786 2845
2787 BUG_ON(dir == DMA_NONE); 2846 BUG_ON(dir == DMA_NONE);
2788 if (iommu_no_mapping(pdev)) 2847 if (iommu_no_mapping(hwdev))
2789 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 2848 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2790 2849
2791 domain = get_valid_domain_for_dev(pdev); 2850 domain = get_valid_domain_for_dev(pdev);
@@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2797 for_each_sg(sglist, sg, nelems, i) 2856 for_each_sg(sglist, sg, nelems, i)
2798 size += aligned_nrpages(sg->offset, sg->length); 2857 size += aligned_nrpages(sg->offset, sg->length);
2799 2858
2800 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2859 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2860 pdev->dma_mask);
2801 if (!iova) { 2861 if (!iova) {
2802 sglist->dma_length = 0; 2862 sglist->dma_length = 0;
2803 return 0; 2863 return 0;