aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-28 10:03:06 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 08:38:04 -0400
commit88cb6a7424d9465faf6caaaadff5af0766c93991 (patch)
tree5c50bec9d13911ea295349de24db46ec732c67c7 /drivers/pci/intel-iommu.c
parentb536d24d212c994a7d98469ea3a8891573d45fd4 (diff)
intel-iommu: Change aligned_size() to aligned_nrpages()
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index bc49b121c667..22add36fd731 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2320,11 +2320,13 @@ error:
2320 return ret; 2320 return ret;
2321} 2321}
2322 2322
2323static inline u64 aligned_size(u64 host_addr, size_t size) 2323static inline unsigned long aligned_nrpages(unsigned long host_addr,
2324 size_t size)
2324{ 2325{
2325 u64 addr; 2326 host_addr &= ~PAGE_MASK;
2326 addr = (host_addr & (~PAGE_MASK)) + size; 2327 host_addr += size + PAGE_SIZE - 1;
2327 return PAGE_ALIGN(addr); 2328
2329 return host_addr >> VTD_PAGE_SHIFT;
2328} 2330}
2329 2331
2330struct iova * 2332struct iova *
@@ -2466,7 +2468,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2466 return 0; 2468 return 0;
2467 2469
2468 iommu = domain_get_iommu(domain); 2470 iommu = domain_get_iommu(domain);
2469 size = aligned_size(paddr, size) >> VTD_PAGE_SHIFT; 2471 size = aligned_nrpages(paddr, size);
2470 2472
2471 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask); 2473 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask);
2472 if (!iova) 2474 if (!iova)
@@ -2757,9 +2759,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2757 iommu = domain_get_iommu(domain); 2759 iommu = domain_get_iommu(domain);
2758 2760
2759 for_each_sg(sglist, sg, nelems, i) 2761 for_each_sg(sglist, sg, nelems, i)
2760 size += aligned_size(sg->offset, sg->length); 2762 size += aligned_nrpages(sg->offset, sg->length);
2761 2763
2762 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2764 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT,
2765 pdev->dma_mask);
2763 if (!iova) { 2766 if (!iova) {
2764 sglist->dma_length = 0; 2767 sglist->dma_length = 0;
2765 return 0; 2768 return 0;
@@ -2778,7 +2781,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2778 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2781 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2779 offset_pfn = 0; 2782 offset_pfn = 0;
2780 for_each_sg(sglist, sg, nelems, i) { 2783 for_each_sg(sglist, sg, nelems, i) {
2781 int nr_pages = aligned_size(sg->offset, sg->length) >> VTD_PAGE_SHIFT; 2784 int nr_pages = aligned_nrpages(sg->offset, sg->length);
2782 ret = domain_pfn_mapping(domain, start_vpfn + offset_pfn, 2785 ret = domain_pfn_mapping(domain, start_vpfn + offset_pfn,
2783 page_to_dma_pfn(sg_page(sg)), 2786 page_to_dma_pfn(sg_page(sg)),
2784 nr_pages, prot); 2787 nr_pages, prot);
@@ -3502,7 +3505,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3502 } 3505 }
3503 /* Round up size to next multiple of PAGE_SIZE, if it and 3506 /* Round up size to next multiple of PAGE_SIZE, if it and
3504 the low bits of hpa would take us onto the next page */ 3507 the low bits of hpa would take us onto the next page */
3505 size = aligned_size(hpa, size) >> VTD_PAGE_SHIFT; 3508 size = aligned_nrpages(hpa, size);
3506 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, 3509 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3507 hpa >> VTD_PAGE_SHIFT, size, prot); 3510 hpa >> VTD_PAGE_SHIFT, size, prot);
3508 return ret; 3511 return ret;