aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2009-08-04 18:09:37 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-08-05 03:59:47 -0400
commitf532959b77e5e567c84c914cb7c7b07d2582448b (patch)
tree268d0bf9e6660a8fdecc8dcda446745c8464f53a /drivers/pci/intel-iommu.c
parent90bc1a658a53f8832ee799685703977a450e5af9 (diff)
intel-iommu: Correct sglist size calculation.
In domain_sg_mapping(), use aligned_nrpages() instead of hand-coded rounding code for calculating the size of each sg elem. This means that on IA64 we correctly round up to the MM page size, not just to the VT-d page size. Also remove the incorrect mm_to_dma_pfn() when intel_map_sg() calls domain_sg_mapping() -- the 'size' variable is in VT-d pages already. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index ebc9b8dca881..11b317a78b49 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1648,6 +1648,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
1648 tmp->devfn); 1648 tmp->devfn);
1649} 1649}
1650 1650
1651/* Returns a number of VTD pages, but aligned to MM page size */
1652static inline unsigned long aligned_nrpages(unsigned long host_addr,
1653 size_t size)
1654{
1655 host_addr &= ~PAGE_MASK;
1656 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1657}
1658
1651static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1659static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1652 struct scatterlist *sg, unsigned long phys_pfn, 1660 struct scatterlist *sg, unsigned long phys_pfn,
1653 unsigned long nr_pages, int prot) 1661 unsigned long nr_pages, int prot)
@@ -1675,7 +1683,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1675 uint64_t tmp; 1683 uint64_t tmp;
1676 1684
1677 if (!sg_res) { 1685 if (!sg_res) {
1678 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; 1686 sg_res = aligned_nrpages(sg->offset, sg->length);
1679 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1687 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1680 sg->dma_length = sg->length; 1688 sg->dma_length = sg->length;
1681 pteval = page_to_phys(sg_page(sg)) | prot; 1689 pteval = page_to_phys(sg_page(sg)) | prot;
@@ -2415,14 +2423,6 @@ error:
2415 return ret; 2423 return ret;
2416} 2424}
2417 2425
2418/* Returns a number of VTD pages, but aligned to MM page size */
2419static inline unsigned long aligned_nrpages(unsigned long host_addr,
2420 size_t size)
2421{
2422 host_addr &= ~PAGE_MASK;
2423 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2424}
2425
2426/* This takes a number of _MM_ pages, not VTD pages */ 2426/* This takes a number of _MM_ pages, not VTD pages */
2427static struct iova *intel_alloc_iova(struct device *dev, 2427static struct iova *intel_alloc_iova(struct device *dev,
2428 struct dmar_domain *domain, 2428 struct dmar_domain *domain,
@@ -2875,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2875 2875
2876 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2876 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2877 2877
2878 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); 2878 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2879 if (unlikely(ret)) { 2879 if (unlikely(ret)) {
2880 /* clear the page */ 2880 /* clear the page */
2881 dma_pte_clear_range(domain, start_vpfn, 2881 dma_pte_clear_range(domain, start_vpfn,