aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c130
1 files changed, 97 insertions, 33 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 53075424a434..2314ad7ee5fe 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1505 } 1505 }
1506 1506
1507 set_bit(num, iommu->domain_ids); 1507 set_bit(num, iommu->domain_ids);
1508 set_bit(iommu->seq_id, &domain->iommu_bmp);
1509 iommu->domains[num] = domain; 1508 iommu->domains[num] = domain;
1510 id = num; 1509 id = num;
1511 } 1510 }
@@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
1648 tmp->devfn); 1647 tmp->devfn);
1649} 1648}
1650 1649
1650/* Returns a number of VTD pages, but aligned to MM page size */
1651static inline unsigned long aligned_nrpages(unsigned long host_addr,
1652 size_t size)
1653{
1654 host_addr &= ~PAGE_MASK;
1655 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1656}
1657
1651static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1658static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1652 struct scatterlist *sg, unsigned long phys_pfn, 1659 struct scatterlist *sg, unsigned long phys_pfn,
1653 unsigned long nr_pages, int prot) 1660 unsigned long nr_pages, int prot)
@@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1675 uint64_t tmp; 1682 uint64_t tmp;
1676 1683
1677 if (!sg_res) { 1684 if (!sg_res) {
1678 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; 1685 sg_res = aligned_nrpages(sg->offset, sg->length);
1679 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1686 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1680 sg->dma_length = sg->length; 1687 sg->dma_length = sg->length;
1681 pteval = page_to_phys(sg_page(sg)) | prot; 1688 pteval = page_to_phys(sg_page(sg)) | prot;
@@ -2117,6 +2124,47 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2117 return 0; 2124 return 0;
2118} 2125}
2119 2126
2127static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2128{
2129 if (iommu_identity_mapping == 2)
2130 return IS_GFX_DEVICE(pdev);
2131
2132 /*
2133 * We want to start off with all devices in the 1:1 domain, and
2134 * take them out later if we find they can't access all of memory.
2135 *
2136 * However, we can't do this for PCI devices behind bridges,
2137 * because all PCI devices behind the same bridge will end up
2138 * with the same source-id on their transactions.
2139 *
2140 * Practically speaking, we can't change things around for these
2141 * devices at run-time, because we can't be sure there'll be no
2142 * DMA transactions in flight for any of their siblings.
2143 *
2144 * So PCI devices (unless they're on the root bus) as well as
2145 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2146 * the 1:1 domain, just in _case_ one of their siblings turns out
2147 * not to be able to map all of memory.
2148 */
2149 if (!pdev->is_pcie) {
2150 if (!pci_is_root_bus(pdev->bus))
2151 return 0;
2152 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2153 return 0;
2154 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2155 return 0;
2156
2157 /*
2158 * At boot time, we don't yet know if devices will be 64-bit capable.
2159 * Assume that they will -- if they turn out not to be, then we can
2160 * take them out of the 1:1 domain later.
2161 */
2162 if (!startup)
2163 return pdev->dma_mask > DMA_BIT_MASK(32);
2164
2165 return 1;
2166}
2167
2120static int iommu_prepare_static_identity_mapping(void) 2168static int iommu_prepare_static_identity_mapping(void)
2121{ 2169{
2122 struct pci_dev *pdev = NULL; 2170 struct pci_dev *pdev = NULL;
@@ -2127,16 +2175,18 @@ static int iommu_prepare_static_identity_mapping(void)
2127 return -EFAULT; 2175 return -EFAULT;
2128 2176
2129 for_each_pci_dev(pdev) { 2177 for_each_pci_dev(pdev) {
2130 printk(KERN_INFO "IOMMU: identity mapping for device %s\n", 2178 if (iommu_should_identity_map(pdev, 1)) {
2131 pci_name(pdev)); 2179 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2180 pci_name(pdev));
2132 2181
2133 ret = domain_context_mapping(si_domain, pdev, 2182 ret = domain_context_mapping(si_domain, pdev,
2134 CONTEXT_TT_MULTI_LEVEL); 2183 CONTEXT_TT_MULTI_LEVEL);
2135 if (ret) 2184 if (ret)
2136 return ret; 2185 return ret;
2137 ret = domain_add_dev_info(si_domain, pdev); 2186 ret = domain_add_dev_info(si_domain, pdev);
2138 if (ret) 2187 if (ret)
2139 return ret; 2188 return ret;
2189 }
2140 } 2190 }
2141 2191
2142 return 0; 2192 return 0;
@@ -2291,6 +2341,10 @@ int __init init_dmars(void)
2291 * identity mapping if iommu_identity_mapping is set. 2341 * identity mapping if iommu_identity_mapping is set.
2292 */ 2342 */
2293 if (!iommu_pass_through) { 2343 if (!iommu_pass_through) {
2344#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2345 if (!iommu_identity_mapping)
2346 iommu_identity_mapping = 2;
2347#endif
2294 if (iommu_identity_mapping) 2348 if (iommu_identity_mapping)
2295 iommu_prepare_static_identity_mapping(); 2349 iommu_prepare_static_identity_mapping();
2296 /* 2350 /*
@@ -2368,15 +2422,7 @@ error:
2368 return ret; 2422 return ret;
2369} 2423}
2370 2424
2371static inline unsigned long aligned_nrpages(unsigned long host_addr, 2425/* This takes a number of _MM_ pages, not VTD pages */
2372 size_t size)
2373{
2374 host_addr &= ~PAGE_MASK;
2375 host_addr += size + PAGE_SIZE - 1;
2376
2377 return host_addr >> VTD_PAGE_SHIFT;
2378}
2379
2380static struct iova *intel_alloc_iova(struct device *dev, 2426static struct iova *intel_alloc_iova(struct device *dev,
2381 struct dmar_domain *domain, 2427 struct dmar_domain *domain,
2382 unsigned long nrpages, uint64_t dma_mask) 2428 unsigned long nrpages, uint64_t dma_mask)
@@ -2443,16 +2489,24 @@ static int iommu_dummy(struct pci_dev *pdev)
2443} 2489}
2444 2490
2445/* Check if the pdev needs to go through non-identity map and unmap process.*/ 2491/* Check if the pdev needs to go through non-identity map and unmap process.*/
2446static int iommu_no_mapping(struct pci_dev *pdev) 2492static int iommu_no_mapping(struct device *dev)
2447{ 2493{
2494 struct pci_dev *pdev;
2448 int found; 2495 int found;
2449 2496
2497 if (unlikely(dev->bus != &pci_bus_type))
2498 return 1;
2499
2500 pdev = to_pci_dev(dev);
2501 if (iommu_dummy(pdev))
2502 return 1;
2503
2450 if (!iommu_identity_mapping) 2504 if (!iommu_identity_mapping)
2451 return iommu_dummy(pdev); 2505 return 0;
2452 2506
2453 found = identity_mapping(pdev); 2507 found = identity_mapping(pdev);
2454 if (found) { 2508 if (found) {
2455 if (pdev->dma_mask > DMA_BIT_MASK(32)) 2509 if (iommu_should_identity_map(pdev, 0))
2456 return 1; 2510 return 1;
2457 else { 2511 else {
2458 /* 2512 /*
@@ -2469,9 +2523,12 @@ static int iommu_no_mapping(struct pci_dev *pdev)
2469 * In case of a detached 64 bit DMA device from vm, the device 2523 * In case of a detached 64 bit DMA device from vm, the device
2470 * is put into si_domain for identity mapping. 2524 * is put into si_domain for identity mapping.
2471 */ 2525 */
2472 if (pdev->dma_mask > DMA_BIT_MASK(32)) { 2526 if (iommu_should_identity_map(pdev, 0)) {
2473 int ret; 2527 int ret;
2474 ret = domain_add_dev_info(si_domain, pdev); 2528 ret = domain_add_dev_info(si_domain, pdev);
2529 if (ret)
2530 return 0;
2531 ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2475 if (!ret) { 2532 if (!ret) {
2476 printk(KERN_INFO "64bit %s uses identity mapping\n", 2533 printk(KERN_INFO "64bit %s uses identity mapping\n",
2477 pci_name(pdev)); 2534 pci_name(pdev));
@@ -2480,7 +2537,7 @@ static int iommu_no_mapping(struct pci_dev *pdev)
2480 } 2537 }
2481 } 2538 }
2482 2539
2483 return iommu_dummy(pdev); 2540 return 0;
2484} 2541}
2485 2542
2486static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2543static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
@@ -2493,10 +2550,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2493 int prot = 0; 2550 int prot = 0;
2494 int ret; 2551 int ret;
2495 struct intel_iommu *iommu; 2552 struct intel_iommu *iommu;
2553 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2496 2554
2497 BUG_ON(dir == DMA_NONE); 2555 BUG_ON(dir == DMA_NONE);
2498 2556
2499 if (iommu_no_mapping(pdev)) 2557 if (iommu_no_mapping(hwdev))
2500 return paddr; 2558 return paddr;
2501 2559
2502 domain = get_valid_domain_for_dev(pdev); 2560 domain = get_valid_domain_for_dev(pdev);
@@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2506 iommu = domain_get_iommu(domain); 2564 iommu = domain_get_iommu(domain);
2507 size = aligned_nrpages(paddr, size); 2565 size = aligned_nrpages(paddr, size);
2508 2566
2509 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2567 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2568 pdev->dma_mask);
2510 if (!iova) 2569 if (!iova)
2511 goto error; 2570 goto error;
2512 2571
@@ -2526,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2526 * is not a big problem 2585 * is not a big problem
2527 */ 2586 */
2528 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), 2587 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2529 paddr >> VTD_PAGE_SHIFT, size, prot); 2588 mm_to_dma_pfn(paddr_pfn), size, prot);
2530 if (ret) 2589 if (ret)
2531 goto error; 2590 goto error;
2532 2591
@@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2635 struct iova *iova; 2694 struct iova *iova;
2636 struct intel_iommu *iommu; 2695 struct intel_iommu *iommu;
2637 2696
2638 if (iommu_no_mapping(pdev)) 2697 if (iommu_no_mapping(dev))
2639 return; 2698 return;
2640 2699
2641 domain = find_domain(pdev); 2700 domain = find_domain(pdev);
@@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2726 struct iova *iova; 2785 struct iova *iova;
2727 struct intel_iommu *iommu; 2786 struct intel_iommu *iommu;
2728 2787
2729 if (iommu_no_mapping(pdev)) 2788 if (iommu_no_mapping(hwdev))
2730 return; 2789 return;
2731 2790
2732 domain = find_domain(pdev); 2791 domain = find_domain(pdev);
@@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2785 struct intel_iommu *iommu; 2844 struct intel_iommu *iommu;
2786 2845
2787 BUG_ON(dir == DMA_NONE); 2846 BUG_ON(dir == DMA_NONE);
2788 if (iommu_no_mapping(pdev)) 2847 if (iommu_no_mapping(hwdev))
2789 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 2848 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2790 2849
2791 domain = get_valid_domain_for_dev(pdev); 2850 domain = get_valid_domain_for_dev(pdev);
@@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2797 for_each_sg(sglist, sg, nelems, i) 2856 for_each_sg(sglist, sg, nelems, i)
2798 size += aligned_nrpages(sg->offset, sg->length); 2857 size += aligned_nrpages(sg->offset, sg->length);
2799 2858
2800 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2859 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2860 pdev->dma_mask);
2801 if (!iova) { 2861 if (!iova) {
2802 sglist->dma_length = 0; 2862 sglist->dma_length = 0;
2803 return 0; 2863 return 0;
@@ -2815,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2815 2875
2816 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2876 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2817 2877
2818 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); 2878 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2819 if (unlikely(ret)) { 2879 if (unlikely(ret)) {
2820 /* clear the page */ 2880 /* clear the page */
2821 dma_pte_clear_range(domain, start_vpfn, 2881 dma_pte_clear_range(domain, start_vpfn,
@@ -3348,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3348 3408
3349 domain->iommu_count = 0; 3409 domain->iommu_count = 0;
3350 domain->iommu_coherency = 0; 3410 domain->iommu_coherency = 0;
3411 domain->iommu_snooping = 0;
3351 domain->max_addr = 0; 3412 domain->max_addr = 0;
3352 3413
3353 /* always allocate the top pgd */ 3414 /* always allocate the top pgd */
@@ -3540,6 +3601,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
3540{ 3601{
3541 struct dmar_domain *dmar_domain = domain->priv; 3602 struct dmar_domain *dmar_domain = domain->priv;
3542 3603
3604 if (!size)
3605 return;
3606
3543 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 3607 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3544 (iova + size - 1) >> VTD_PAGE_SHIFT); 3608 (iova + size - 1) >> VTD_PAGE_SHIFT);
3545 3609