aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-28 16:20:51 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 08:39:53 -0400
commit875764de6f0ddb23d270c29357d5a339232a0488 (patch)
treed122b470ffbe6c3651a3ba5b0441b46067f69a3b /drivers
parent6f6a00e40aa3fdd3b29c30e3ef1fc9690506bc03 (diff)
intel-iommu: Simplify __intel_alloc_iova()
There's no need for the separate iommu_alloc_iova() function, and certainly not for it to be global. Remove the underscores while we're at it. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/intel-iommu.c49
1 files changed, 18 insertions, 31 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f8074236bcce..11a23201445a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2323,43 +2323,31 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
2323 return host_addr >> VTD_PAGE_SHIFT; 2323 return host_addr >> VTD_PAGE_SHIFT;
2324} 2324}
2325 2325
2326struct iova * 2326static struct iova *intel_alloc_iova(struct device *dev,
2327iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) 2327 struct dmar_domain *domain,
2328{ 2328 unsigned long nrpages, uint64_t dma_mask)
2329 struct iova *piova;
2330
2331 /* Make sure it's in range */
2332 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2333 if (!size || (IOVA_START_ADDR + size > end))
2334 return NULL;
2335
2336 piova = alloc_iova(&domain->iovad,
2337 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2338 return piova;
2339}
2340
2341static struct iova *
2342__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2343 size_t size, u64 dma_mask)
2344{ 2329{
2345 struct pci_dev *pdev = to_pci_dev(dev); 2330 struct pci_dev *pdev = to_pci_dev(dev);
2346 struct iova *iova = NULL; 2331 struct iova *iova = NULL;
2347 2332
2348 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac) 2333 /* Restrict dma_mask to the width that the iommu can handle */
2349 iova = iommu_alloc_iova(domain, size, dma_mask); 2334 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2350 else { 2335
2336 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2351 /* 2337 /*
2352 * First try to allocate an io virtual address in 2338 * First try to allocate an io virtual address in
2353 * DMA_BIT_MASK(32) and if that fails then try allocating 2339 * DMA_BIT_MASK(32) and if that fails then try allocating
2354 * from higher range 2340 * from higher range
2355 */ 2341 */
2356 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32)); 2342 iova = alloc_iova(&domain->iovad, nrpages,
2357 if (!iova) 2343 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2358 iova = iommu_alloc_iova(domain, size, dma_mask); 2344 if (iova)
2359 } 2345 return iova;
2360 2346 }
2361 if (!iova) { 2347 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2362 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev)); 2348 if (unlikely(!iova)) {
2349 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2350 nrpages, pci_name(pdev));
2363 return NULL; 2351 return NULL;
2364 } 2352 }
2365 2353
@@ -2464,7 +2452,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2464 iommu = domain_get_iommu(domain); 2452 iommu = domain_get_iommu(domain);
2465 size = aligned_nrpages(paddr, size); 2453 size = aligned_nrpages(paddr, size);
2466 2454
2467 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask); 2455 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2468 if (!iova) 2456 if (!iova)
2469 goto error; 2457 goto error;
2470 2458
@@ -2753,8 +2741,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2753 for_each_sg(sglist, sg, nelems, i) 2741 for_each_sg(sglist, sg, nelems, i)
2754 size += aligned_nrpages(sg->offset, sg->length); 2742 size += aligned_nrpages(sg->offset, sg->length);
2755 2743
2756 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, 2744 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2757 pdev->dma_mask);
2758 if (!iova) { 2745 if (!iova) {
2759 sglist->dma_length = 0; 2746 sglist->dma_length = 0;
2760 return 0; 2747 return 0;