aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-28 09:49:31 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 08:35:06 -0400
commitb536d24d212c994a7d98469ea3a8891573d45fd4 (patch)
tree137751d6969f757e70548bc5ae5abee55e7c6559 /drivers/pci
parentad05122162b67f64d5a1c6d35e001f7a88619b88 (diff)
intel-iommu: Clean up intel_map_sg(), remove domain_page_mapping()
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c54
1 files changed, 19 insertions, 35 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a490b39ca3d5..bc49b121c667 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1674,17 +1674,6 @@ static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1674 return 0; 1674 return 0;
1675} 1675}
1676 1676
1677static int domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1678 u64 hpa, size_t size, int prot)
1679{
1680 unsigned long first_pfn = hpa >> VTD_PAGE_SHIFT;
1681 unsigned long last_pfn = (hpa + size - 1) >> VTD_PAGE_SHIFT;
1682
1683 return domain_pfn_mapping(domain, iova >> VTD_PAGE_SHIFT, first_pfn,
1684 last_pfn - first_pfn + 1, prot);
1685
1686}
1687
1688static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) 1677static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1689{ 1678{
1690 if (!iommu) 1679 if (!iommu)
@@ -2745,17 +2734,16 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2745static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2734static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2746 enum dma_data_direction dir, struct dma_attrs *attrs) 2735 enum dma_data_direction dir, struct dma_attrs *attrs)
2747{ 2736{
2748 phys_addr_t addr;
2749 int i; 2737 int i;
2750 struct pci_dev *pdev = to_pci_dev(hwdev); 2738 struct pci_dev *pdev = to_pci_dev(hwdev);
2751 struct dmar_domain *domain; 2739 struct dmar_domain *domain;
2752 size_t size = 0; 2740 size_t size = 0;
2753 int prot = 0; 2741 int prot = 0;
2754 size_t offset = 0; 2742 size_t offset_pfn = 0;
2755 struct iova *iova = NULL; 2743 struct iova *iova = NULL;
2756 int ret; 2744 int ret;
2757 struct scatterlist *sg; 2745 struct scatterlist *sg;
2758 unsigned long start_addr; 2746 unsigned long start_vpfn;
2759 struct intel_iommu *iommu; 2747 struct intel_iommu *iommu;
2760 2748
2761 BUG_ON(dir == DMA_NONE); 2749 BUG_ON(dir == DMA_NONE);
@@ -2768,10 +2756,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2768 2756
2769 iommu = domain_get_iommu(domain); 2757 iommu = domain_get_iommu(domain);
2770 2758
2771 for_each_sg(sglist, sg, nelems, i) { 2759 for_each_sg(sglist, sg, nelems, i)
2772 addr = page_to_phys(sg_page(sg)) + sg->offset; 2760 size += aligned_size(sg->offset, sg->length);
2773 size += aligned_size((u64)addr, sg->length);
2774 }
2775 2761
2776 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2762 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2777 if (!iova) { 2763 if (!iova) {
@@ -2789,36 +2775,34 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2789 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2775 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2790 prot |= DMA_PTE_WRITE; 2776 prot |= DMA_PTE_WRITE;
2791 2777
2792 start_addr = iova->pfn_lo << PAGE_SHIFT; 2778 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2793 offset = 0; 2779 offset_pfn = 0;
2794 for_each_sg(sglist, sg, nelems, i) { 2780 for_each_sg(sglist, sg, nelems, i) {
2795 addr = page_to_phys(sg_page(sg)) + sg->offset; 2781 int nr_pages = aligned_size(sg->offset, sg->length) >> VTD_PAGE_SHIFT;
2796 size = aligned_size((u64)addr, sg->length); 2782 ret = domain_pfn_mapping(domain, start_vpfn + offset_pfn,
2797 ret = domain_page_mapping(domain, start_addr + offset, 2783 page_to_dma_pfn(sg_page(sg)),
2798 ((u64)addr) & PHYSICAL_PAGE_MASK, 2784 nr_pages, prot);
2799 size, prot);
2800 if (ret) { 2785 if (ret) {
2801 /* clear the page */ 2786 /* clear the page */
2802 dma_pte_clear_range(domain, 2787 dma_pte_clear_range(domain, start_vpfn,
2803 start_addr >> VTD_PAGE_SHIFT, 2788 start_vpfn + offset_pfn);
2804 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
2805 /* free page tables */ 2789 /* free page tables */
2806 dma_pte_free_pagetable(domain, start_addr >> VTD_PAGE_SHIFT, 2790 dma_pte_free_pagetable(domain, start_vpfn,
2807 (start_addr + offset - 1) >> VTD_PAGE_SHIFT); 2791 start_vpfn + offset_pfn);
2808 /* free iova */ 2792 /* free iova */
2809 __free_iova(&domain->iovad, iova); 2793 __free_iova(&domain->iovad, iova);
2810 return 0; 2794 return 0;
2811 } 2795 }
2812 sg->dma_address = start_addr + offset + 2796 sg->dma_address = ((dma_addr_t)(start_vpfn + offset_pfn)
2813 ((u64)addr & (~PAGE_MASK)); 2797 << VTD_PAGE_SHIFT) + sg->offset;
2814 sg->dma_length = sg->length; 2798 sg->dma_length = sg->length;
2815 offset += size; 2799 offset_pfn += nr_pages;
2816 } 2800 }
2817 2801
2818 /* it's a non-present to present mapping. Only flush if caching mode */ 2802 /* it's a non-present to present mapping. Only flush if caching mode */
2819 if (cap_caching_mode(iommu->cap)) 2803 if (cap_caching_mode(iommu->cap))
2820 iommu_flush_iotlb_psi(iommu, 0, start_addr, 2804 iommu_flush_iotlb_psi(iommu, 0, start_vpfn << VTD_PAGE_SHIFT,
2821 offset >> VTD_PAGE_SHIFT); 2805 offset_pfn);
2822 else 2806 else
2823 iommu_flush_write_buffer(iommu); 2807 iommu_flush_write_buffer(iommu);
2824 2808