aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-27 19:27:49 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 08:30:45 -0400
commitd794dc9b302c2781c571c10dedb8094e223d31b8 (patch)
tree60a410f5b37779db3efd946ec6a32aa7bf50e651 /drivers/pci/intel-iommu.c
parent6660c63a79a639b86e3a709e25a8c4fc3ab24770 (diff)
intel-iommu: Make dma_pte_free_pagetable() take pfns as argument
With some cleanup of intel_unmap_page(), intel_unmap_sg() and vm_domain_exit() to no longer play with 64-bit addresses. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c68
1 files changed, 28 insertions, 40 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 1526864a9d6f..fc593adb049a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -796,11 +796,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
796 796
797/* free page table pages. last level pte should already be cleared */ 797/* free page table pages. last level pte should already be cleared */
798static void dma_pte_free_pagetable(struct dmar_domain *domain, 798static void dma_pte_free_pagetable(struct dmar_domain *domain,
799 u64 start, u64 end) 799 unsigned long start_pfn,
800 unsigned long last_pfn)
800{ 801{
801 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 802 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
802 unsigned long start_pfn = start >> VTD_PAGE_SHIFT;
803 unsigned long last_pfn = (end-1) >> VTD_PAGE_SHIFT;
804 struct dma_pte *pte; 803 struct dma_pte *pte;
805 int total = agaw_to_level(domain->agaw); 804 int total = agaw_to_level(domain->agaw);
806 int level; 805 int level;
@@ -832,7 +831,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
832 level++; 831 level++;
833 } 832 }
834 /* free pgd */ 833 /* free pgd */
835 if (start == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { 834 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
836 free_pgtable_page(domain->pgd); 835 free_pgtable_page(domain->pgd);
837 domain->pgd = NULL; 836 domain->pgd = NULL;
838 } 837 }
@@ -1416,7 +1415,6 @@ static void domain_exit(struct dmar_domain *domain)
1416{ 1415{
1417 struct dmar_drhd_unit *drhd; 1416 struct dmar_drhd_unit *drhd;
1418 struct intel_iommu *iommu; 1417 struct intel_iommu *iommu;
1419 u64 end;
1420 1418
1421 /* Domain 0 is reserved, so dont process it */ 1419 /* Domain 0 is reserved, so dont process it */
1422 if (!domain) 1420 if (!domain)
@@ -1425,14 +1423,12 @@ static void domain_exit(struct dmar_domain *domain)
1425 domain_remove_dev_info(domain); 1423 domain_remove_dev_info(domain);
1426 /* destroy iovas */ 1424 /* destroy iovas */
1427 put_iova_domain(&domain->iovad); 1425 put_iova_domain(&domain->iovad);
1428 end = DOMAIN_MAX_ADDR(domain->gaw);
1429 end = end & (~PAGE_MASK);
1430 1426
1431 /* clear ptes */ 1427 /* clear ptes */
1432 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 1428 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1433 1429
1434 /* free page tables */ 1430 /* free page tables */
1435 dma_pte_free_pagetable(domain, 0, end); 1431 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1436 1432
1437 for_each_active_iommu(iommu, drhd) 1433 for_each_active_iommu(iommu, drhd)
1438 if (test_bit(iommu->seq_id, &domain->iommu_bmp)) 1434 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
@@ -2601,7 +2597,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2601{ 2597{
2602 struct pci_dev *pdev = to_pci_dev(dev); 2598 struct pci_dev *pdev = to_pci_dev(dev);
2603 struct dmar_domain *domain; 2599 struct dmar_domain *domain;
2604 unsigned long start_addr; 2600 unsigned long start_pfn, last_pfn;
2605 struct iova *iova; 2601 struct iova *iova;
2606 struct intel_iommu *iommu; 2602 struct intel_iommu *iommu;
2607 2603
@@ -2617,20 +2613,22 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2617 if (!iova) 2613 if (!iova)
2618 return; 2614 return;
2619 2615
2620 start_addr = iova->pfn_lo << PAGE_SHIFT; 2616 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2621 size = aligned_size((u64)dev_addr, size); 2617 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2622 2618
2623 pr_debug("Device %s unmapping: %zx@%llx\n", 2619 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2624 pci_name(pdev), size, (unsigned long long)start_addr); 2620 pci_name(pdev), start_pfn, last_pfn);
2625 2621
2626 /* clear the whole page */ 2622 /* clear the whole page */
2627 dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT, 2623 dma_pte_clear_range(domain, start_pfn, last_pfn);
2628 (start_addr + size - 1) >> VTD_PAGE_SHIFT); 2624
2629 /* free page tables */ 2625 /* free page tables */
2630 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2626 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2627
2631 if (intel_iommu_strict) { 2628 if (intel_iommu_strict) {
2632 iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2629 iommu_flush_iotlb_psi(iommu, domain->id,
2633 size >> VTD_PAGE_SHIFT); 2630 start_pfn << VTD_PAGE_SHIFT,
2631 last_pfn - start_pfn + 1);
2634 /* free iova */ 2632 /* free iova */
2635 __free_iova(&domain->iovad, iova); 2633 __free_iova(&domain->iovad, iova);
2636 } else { 2634 } else {
@@ -2688,14 +2686,10 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2688 int nelems, enum dma_data_direction dir, 2686 int nelems, enum dma_data_direction dir,
2689 struct dma_attrs *attrs) 2687 struct dma_attrs *attrs)
2690{ 2688{
2691 int i;
2692 struct pci_dev *pdev = to_pci_dev(hwdev); 2689 struct pci_dev *pdev = to_pci_dev(hwdev);
2693 struct dmar_domain *domain; 2690 struct dmar_domain *domain;
2694 unsigned long start_addr; 2691 unsigned long start_pfn, last_pfn;
2695 struct iova *iova; 2692 struct iova *iova;
2696 size_t size = 0;
2697 phys_addr_t addr;
2698 struct scatterlist *sg;
2699 struct intel_iommu *iommu; 2693 struct intel_iommu *iommu;
2700 2694
2701 if (iommu_no_mapping(pdev)) 2695 if (iommu_no_mapping(pdev))
@@ -2709,21 +2703,19 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2709 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); 2703 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2710 if (!iova) 2704 if (!iova)
2711 return; 2705 return;
2712 for_each_sg(sglist, sg, nelems, i) {
2713 addr = page_to_phys(sg_page(sg)) + sg->offset;
2714 size += aligned_size((u64)addr, sg->length);
2715 }
2716 2706
2717 start_addr = iova->pfn_lo << PAGE_SHIFT; 2707 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2708 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2718 2709
2719 /* clear the whole page */ 2710 /* clear the whole page */
2720 dma_pte_clear_range(domain, start_addr >> VTD_PAGE_SHIFT, 2711 dma_pte_clear_range(domain, start_pfn, last_pfn);
2721 (start_addr + size - 1) >> VTD_PAGE_SHIFT); 2712
2722 /* free page tables */ 2713 /* free page tables */
2723 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2714 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2724 2715
2725 iommu_flush_iotlb_psi(iommu, domain->id, start_addr, 2716 iommu_flush_iotlb_psi(iommu, domain->id,
2726 size >> VTD_PAGE_SHIFT); 2717 start_pfn << VTD_PAGE_SHIFT,
2718 (last_pfn - start_pfn + 1));
2727 2719
2728 /* free iova */ 2720 /* free iova */
2729 __free_iova(&domain->iovad, iova); 2721 __free_iova(&domain->iovad, iova);
@@ -2804,8 +2796,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2804 start_addr >> VTD_PAGE_SHIFT, 2796 start_addr >> VTD_PAGE_SHIFT,
2805 (start_addr + offset - 1) >> VTD_PAGE_SHIFT); 2797 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
2806 /* free page tables */ 2798 /* free page tables */
2807 dma_pte_free_pagetable(domain, start_addr, 2799 dma_pte_free_pagetable(domain, start_addr >> VTD_PAGE_SHIFT,
2808 start_addr + offset); 2800 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
2809 /* free iova */ 2801 /* free iova */
2810 __free_iova(&domain->iovad, iova); 2802 __free_iova(&domain->iovad, iova);
2811 return 0; 2803 return 0;
@@ -3378,8 +3370,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3378 3370
3379static void vm_domain_exit(struct dmar_domain *domain) 3371static void vm_domain_exit(struct dmar_domain *domain)
3380{ 3372{
3381 u64 end;
3382
3383 /* Domain 0 is reserved, so dont process it */ 3373 /* Domain 0 is reserved, so dont process it */
3384 if (!domain) 3374 if (!domain)
3385 return; 3375 return;
@@ -3387,14 +3377,12 @@ static void vm_domain_exit(struct dmar_domain *domain)
3387 vm_domain_remove_all_dev_info(domain); 3377 vm_domain_remove_all_dev_info(domain);
3388 /* destroy iovas */ 3378 /* destroy iovas */
3389 put_iova_domain(&domain->iovad); 3379 put_iova_domain(&domain->iovad);
3390 end = DOMAIN_MAX_ADDR(domain->gaw);
3391 end = end & (~VTD_PAGE_MASK);
3392 3380
3393 /* clear ptes */ 3381 /* clear ptes */
3394 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 3382 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3395 3383
3396 /* free page tables */ 3384 /* free page tables */
3397 dma_pte_free_pagetable(domain, 0, end); 3385 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3398 3386
3399 iommu_free_vm_domain(domain); 3387 iommu_free_vm_domain(domain);
3400 free_domain_mem(domain); 3388 free_domain_mem(domain);