aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-07-04 04:35:44 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-07-04 04:35:52 -0400
commit5a5e02a614e59db7536cd11029e6674adc41b191 (patch)
tree11f56f4ca2ea02f14e5cdfaae89e034db23edb28 /drivers/pci
parent405d7ca51597645f3be850574d6c10f01be11e8d (diff)
intel-iommu: Fix dma vs. mm page confusion with aligned_nrpages()
The aligned_nrpages() function rounds up to the next VM page, but returns its result as a number of DMA pages. Purely theoretical except on IA64, which doesn't boot with VT-d right now anyway. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 53075424a434..ad85e95d2dcc 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2368,15 +2368,15 @@ error:
2368 return ret; 2368 return ret;
2369} 2369}
2370 2370
2371/* Returns a number of VTD pages, but aligned to MM page size */
2371static inline unsigned long aligned_nrpages(unsigned long host_addr, 2372static inline unsigned long aligned_nrpages(unsigned long host_addr,
2372 size_t size) 2373 size_t size)
2373{ 2374{
2374 host_addr &= ~PAGE_MASK; 2375 host_addr &= ~PAGE_MASK;
2375 host_addr += size + PAGE_SIZE - 1; 2376 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2376
2377 return host_addr >> VTD_PAGE_SHIFT;
2378} 2377}
2379 2378
2379/* This takes a number of _MM_ pages, not VTD pages */
2380static struct iova *intel_alloc_iova(struct device *dev, 2380static struct iova *intel_alloc_iova(struct device *dev,
2381 struct dmar_domain *domain, 2381 struct dmar_domain *domain,
2382 unsigned long nrpages, uint64_t dma_mask) 2382 unsigned long nrpages, uint64_t dma_mask)
@@ -2506,7 +2506,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2506 iommu = domain_get_iommu(domain); 2506 iommu = domain_get_iommu(domain);
2507 size = aligned_nrpages(paddr, size); 2507 size = aligned_nrpages(paddr, size);
2508 2508
2509 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2509 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2510 pdev->dma_mask);
2510 if (!iova) 2511 if (!iova)
2511 goto error; 2512 goto error;
2512 2513
@@ -2797,7 +2798,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2797 for_each_sg(sglist, sg, nelems, i) 2798 for_each_sg(sglist, sg, nelems, i)
2798 size += aligned_nrpages(sg->offset, sg->length); 2799 size += aligned_nrpages(sg->offset, sg->length);
2799 2800
2800 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); 2801 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2802 pdev->dma_mask);
2801 if (!iova) { 2803 if (!iova) {
2802 sglist->dma_length = 0; 2804 sglist->dma_length = 0;
2803 return 0; 2805 return 0;