aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-10-15 03:08:28 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-18 09:46:16 -0400
commitbb9e6d65078da2f38cfe1067cfd31a896ca867c0 (patch)
tree1cfd3a2213609aeeee0bcee160ab15645dd29c38 /drivers/pci/intel-iommu.c
parentf609891f428e1c20e270e7c350daf8c93cc459d7 (diff)
intel-iommu: use coherent_dma_mask in alloc_coherent
This patch fixes intel-iommu to use dev->coherent_dma_mask in alloc_coherent. Currently, intel-iommu uses dev->dma_mask in alloc_coherent but alloc_coherent is supposed to use coherent_dma_mask. It could break drivers that uses smaller coherent_dma_mask than dma_mask (though the current code works for the majority that use the same mask for coherent_dma_mask and dma_mask). [dwmw2: dma_mask can be bigger than 'unsigned long'] Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 2bf96babbc4f..d315e413fae0 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1762,14 +1762,14 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
1762 1762
1763static struct iova * 1763static struct iova *
1764__intel_alloc_iova(struct device *dev, struct dmar_domain *domain, 1764__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1765 size_t size) 1765 size_t size, u64 dma_mask)
1766{ 1766{
1767 struct pci_dev *pdev = to_pci_dev(dev); 1767 struct pci_dev *pdev = to_pci_dev(dev);
1768 struct iova *iova = NULL; 1768 struct iova *iova = NULL;
1769 1769
1770 if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { 1770 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
1771 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1771 iova = iommu_alloc_iova(domain, size, dma_mask);
1772 } else { 1772 else {
1773 /* 1773 /*
1774 * First try to allocate an io virtual address in 1774 * First try to allocate an io virtual address in
1775 * DMA_32BIT_MASK and if that fails then try allocating 1775 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1777,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1777 */ 1777 */
1778 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); 1778 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
1779 if (!iova) 1779 if (!iova)
1780 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1780 iova = iommu_alloc_iova(domain, size, dma_mask);
1781 } 1781 }
1782 1782
1783 if (!iova) { 1783 if (!iova) {
@@ -1816,8 +1816,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
1816 return domain; 1816 return domain;
1817} 1817}
1818 1818
1819dma_addr_t 1819static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
1820intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) 1820 size_t size, int dir, u64 dma_mask)
1821{ 1821{
1822 struct pci_dev *pdev = to_pci_dev(hwdev); 1822 struct pci_dev *pdev = to_pci_dev(hwdev);
1823 struct dmar_domain *domain; 1823 struct dmar_domain *domain;
@@ -1836,7 +1836,7 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1836 1836
1837 size = aligned_size((u64)paddr, size); 1837 size = aligned_size((u64)paddr, size);
1838 1838
1839 iova = __intel_alloc_iova(hwdev, domain, size); 1839 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
1840 if (!iova) 1840 if (!iova)
1841 goto error; 1841 goto error;
1842 1842
@@ -1878,6 +1878,13 @@ error:
1878 return 0; 1878 return 0;
1879} 1879}
1880 1880
1881dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
1882 size_t size, int dir)
1883{
1884 return __intel_map_single(hwdev, paddr, size, dir,
1885 to_pci_dev(hwdev)->dma_mask);
1886}
1887
1881static void flush_unmaps(void) 1888static void flush_unmaps(void)
1882{ 1889{
1883 int i, j; 1890 int i, j;
@@ -1993,7 +2000,9 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size,
1993 return NULL; 2000 return NULL;
1994 memset(vaddr, 0, size); 2001 memset(vaddr, 0, size);
1995 2002
1996 *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); 2003 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2004 DMA_BIDIRECTIONAL,
2005 hwdev->coherent_dma_mask);
1997 if (*dma_handle) 2006 if (*dma_handle)
1998 return vaddr; 2007 return vaddr;
1999 free_pages((unsigned long)vaddr, order); 2008 free_pages((unsigned long)vaddr, order);
@@ -2097,7 +2106,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2097 size += aligned_size((u64)addr, sg->length); 2106 size += aligned_size((u64)addr, sg->length);
2098 } 2107 }
2099 2108
2100 iova = __intel_alloc_iova(hwdev, domain, size); 2109 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2101 if (!iova) { 2110 if (!iova) {
2102 sglist->dma_length = 0; 2111 sglist->dma_length = 0;
2103 return 0; 2112 return 0;