aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-27 17:09:11 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 08:28:10 -0400
commit595badf5d65d50300319e6178e6df005ea501f70 (patch)
tree00bb65e865ab8aface12c57634a152b1440364ee /drivers/pci
parent04b18e65dd5a3e544f07f4bcfa8fb52704a1833b (diff)
intel-iommu: Make dma_pte_clear_range() take pfns as argument
Noting that this is now an _inclusive_ range. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index d4217f737159..ff8b7ce4a013 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -56,6 +56,7 @@
56#define MAX_AGAW_WIDTH 64 56#define MAX_AGAW_WIDTH 64
57 57
58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
59 60
60#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 61#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
61#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 62#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
@@ -777,17 +778,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
777} 778}
778 779
779/* clear last level pte, a tlb flush should be followed */ 780/* clear last level pte, a tlb flush should be followed */
780static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) 781static void dma_pte_clear_range(struct dmar_domain *domain,
782 unsigned long start_pfn,
783 unsigned long last_pfn)
781{ 784{
782 unsigned long start_pfn = IOVA_PFN(start);
783 unsigned long end_pfn = IOVA_PFN(end-1);
784 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 785 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
785 786
786 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 787 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
787 BUG_ON(addr_width < BITS_PER_LONG && end_pfn >> addr_width); 788 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
788 789
789 /* we don't need lock here; nobody else touches the iova range */ 790 /* we don't need lock here; nobody else touches the iova range */
790 while (start_pfn <= end_pfn) { 791 while (start_pfn <= last_pfn) {
791 dma_pte_clear_one(domain, start_pfn); 792 dma_pte_clear_one(domain, start_pfn);
792 start_pfn++; 793 start_pfn++;
793 } 794 }
@@ -1424,7 +1425,7 @@ static void domain_exit(struct dmar_domain *domain)
1424 end = end & (~PAGE_MASK); 1425 end = end & (~PAGE_MASK);
1425 1426
1426 /* clear ptes */ 1427 /* clear ptes */
1427 dma_pte_clear_range(domain, 0, end); 1428 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1428 1429
1429 /* free page tables */ 1430 /* free page tables */
1430 dma_pte_free_pagetable(domain, 0, end); 1431 dma_pte_free_pagetable(domain, 0, end);
@@ -1890,7 +1891,8 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
1890 * RMRR range might have overlap with physical memory range, 1891 * RMRR range might have overlap with physical memory range,
1891 * clear it first 1892 * clear it first
1892 */ 1893 */
1893 dma_pte_clear_range(domain, base, base + size); 1894 dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT,
1895 (base + size - 1) >> VTD_PAGE_SHIFT);
1894 1896
1895 return domain_page_mapping(domain, base, base, size, 1897 return domain_page_mapping(domain, base, base, size,
1896 DMA_PTE_READ|DMA_PTE_WRITE); 1898 DMA_PTE_READ|DMA_PTE_WRITE);
@@ -2618,7 +2620,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2618 pci_name(pdev), size, (unsigned long long)start_addr); 2620 pci_name(pdev), size, (unsigned long long)start_addr);
2619 2621
2620 /* clear the whole page */ 2622 /* clear the whole page */
2621 dma_pte_clear_range(domain, start_addr, start_addr + size); 2623 dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT,
2624 (start_addr + size - 1) >> VTD_PAGE_SHIFT);
2622 /* free page tables */ 2625 /* free page tables */
2623 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2626 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2624 if (intel_iommu_strict) { 2627 if (intel_iommu_strict) {
@@ -2710,7 +2713,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2710 start_addr = iova->pfn_lo << PAGE_SHIFT; 2713 start_addr = iova->pfn_lo << PAGE_SHIFT;
2711 2714
2712 /* clear the whole page */ 2715 /* clear the whole page */
2713 dma_pte_clear_range(domain, start_addr, start_addr + size); 2716 dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT,
2717 (start_addr + size - 1) >> VTD_PAGE_SHIFT);
2714 /* free page tables */ 2718 /* free page tables */
2715 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2719 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2716 2720
@@ -2792,8 +2796,9 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2792 size, prot); 2796 size, prot);
2793 if (ret) { 2797 if (ret) {
2794 /* clear the page */ 2798 /* clear the page */
2795 dma_pte_clear_range(domain, start_addr, 2799 dma_pte_clear_range(domain,
2796 start_addr + offset); 2800 start_addr >> VTD_PAGE_SHIFT,
2801 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
2797 /* free page tables */ 2802 /* free page tables */
2798 dma_pte_free_pagetable(domain, start_addr, 2803 dma_pte_free_pagetable(domain, start_addr,
2799 start_addr + offset); 2804 start_addr + offset);
@@ -3382,7 +3387,7 @@ static void vm_domain_exit(struct dmar_domain *domain)
3382 end = end & (~VTD_PAGE_MASK); 3387 end = end & (~VTD_PAGE_MASK);
3383 3388
3384 /* clear ptes */ 3389 /* clear ptes */
3385 dma_pte_clear_range(domain, 0, end); 3390 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3386 3391
3387 /* free page tables */ 3392 /* free page tables */
3388 dma_pte_free_pagetable(domain, 0, end); 3393 dma_pte_free_pagetable(domain, 0, end);
@@ -3526,7 +3531,8 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
3526 /* The address might not be aligned */ 3531 /* The address might not be aligned */
3527 base = iova & VTD_PAGE_MASK; 3532 base = iova & VTD_PAGE_MASK;
3528 size = VTD_PAGE_ALIGN(size); 3533 size = VTD_PAGE_ALIGN(size);
3529 dma_pte_clear_range(dmar_domain, base, base + size); 3534 dma_pte_clear_range(dmar_domain, base >> VTD_PAGE_SHIFT,
3535 (base + size - 1) >> VTD_PAGE_SHIFT);
3530 3536
3531 if (dmar_domain->max_addr == base + size) 3537 if (dmar_domain->max_addr == base + size)
3532 dmar_domain->max_addr = base; 3538 dmar_domain->max_addr = base;