aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-04-22 05:09:04 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-24 17:15:43 -0400
commit6865f0d19306daf3a3bf28cfcfe74639d1bc0df4 (patch)
treef4253aec566d4bd663be11a7276c4353af52affb /drivers/pci/intel-iommu.c
parent94bc891b00e40cbec375feb4568780af183fd7f4 (diff)
intel-iommu.c: dma ops fix
Stephen Rothwell noticed that: Commit 2be621498d461b63ca6124f86e3b9582e1a8e722 ("x86: dma-ops on highmem fix") in Linus' tree introduced a new warning (noticed in the x86_64 allmodconfig build of linux-next): drivers/pci/intel-iommu.c:2240: warning: initialization from incompatible pointer type Which points at an instance of map_single that needs updating. Fix it to the new prototype. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 301c68fab03b..1fd8bb765702 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1905,32 +1905,31 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
1905 return domain; 1905 return domain;
1906} 1906}
1907 1907
1908static dma_addr_t intel_map_single(struct device *hwdev, void *addr, 1908static dma_addr_t
1909 size_t size, int dir) 1909intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1910{ 1910{
1911 struct pci_dev *pdev = to_pci_dev(hwdev); 1911 struct pci_dev *pdev = to_pci_dev(hwdev);
1912 int ret;
1913 struct dmar_domain *domain; 1912 struct dmar_domain *domain;
1914 unsigned long start_addr; 1913 unsigned long start_paddr;
1915 struct iova *iova; 1914 struct iova *iova;
1916 int prot = 0; 1915 int prot = 0;
1916 int ret;
1917 1917
1918 BUG_ON(dir == DMA_NONE); 1918 BUG_ON(dir == DMA_NONE);
1919 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 1919 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1920 return virt_to_bus(addr); 1920 return paddr;
1921 1921
1922 domain = get_valid_domain_for_dev(pdev); 1922 domain = get_valid_domain_for_dev(pdev);
1923 if (!domain) 1923 if (!domain)
1924 return 0; 1924 return 0;
1925 1925
1926 addr = (void *)virt_to_phys(addr); 1926 size = aligned_size((u64)paddr, size);
1927 size = aligned_size((u64)addr, size);
1928 1927
1929 iova = __intel_alloc_iova(hwdev, domain, size); 1928 iova = __intel_alloc_iova(hwdev, domain, size);
1930 if (!iova) 1929 if (!iova)
1931 goto error; 1930 goto error;
1932 1931
1933 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 1932 start_paddr = iova->pfn_lo << PAGE_SHIFT_4K;
1934 1933
1935 /* 1934 /*
1936 * Check if DMAR supports zero-length reads on write only 1935 * Check if DMAR supports zero-length reads on write only
@@ -1942,33 +1941,33 @@ static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
1942 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 1941 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1943 prot |= DMA_PTE_WRITE; 1942 prot |= DMA_PTE_WRITE;
1944 /* 1943 /*
1945 * addr - (addr + size) might be partial page, we should map the whole 1944 * paddr - (paddr + size) might be partial page, we should map the whole
1946 * page. Note: if two part of one page are separately mapped, we 1945 * page. Note: if two part of one page are separately mapped, we
1947 * might have two guest_addr mapping to the same host addr, but this 1946 * might have two guest_addr mapping to the same host paddr, but this
1948 * is not a big problem 1947 * is not a big problem
1949 */ 1948 */
1950 ret = domain_page_mapping(domain, start_addr, 1949 ret = domain_page_mapping(domain, start_paddr,
1951 ((u64)addr) & PAGE_MASK_4K, size, prot); 1950 ((u64)paddr) & PAGE_MASK_4K, size, prot);
1952 if (ret) 1951 if (ret)
1953 goto error; 1952 goto error;
1954 1953
1955 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", 1954 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
1956 pci_name(pdev), size, (u64)addr, 1955 pci_name(pdev), size, (u64)paddr,
1957 size, (u64)start_addr, dir); 1956 size, (u64)start_paddr, dir);
1958 1957
1959 /* it's a non-present to present mapping */ 1958 /* it's a non-present to present mapping */
1960 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, 1959 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
1961 start_addr, size >> PAGE_SHIFT_4K, 1); 1960 start_paddr, size >> PAGE_SHIFT_4K, 1);
1962 if (ret) 1961 if (ret)
1963 iommu_flush_write_buffer(domain->iommu); 1962 iommu_flush_write_buffer(domain->iommu);
1964 1963
1965 return (start_addr + ((u64)addr & (~PAGE_MASK_4K))); 1964 return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K)));
1966 1965
1967error: 1966error:
1968 if (iova) 1967 if (iova)
1969 __free_iova(&domain->iovad, iova); 1968 __free_iova(&domain->iovad, iova);
1970 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 1969 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
1971 pci_name(pdev), size, (u64)addr, dir); 1970 pci_name(pdev), size, (u64)paddr, dir);
1972 return 0; 1971 return 0;
1973} 1972}
1974 1973
@@ -2082,7 +2081,7 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
2082 return NULL; 2081 return NULL;
2083 memset(vaddr, 0, size); 2082 memset(vaddr, 0, size);
2084 2083
2085 *dma_handle = intel_map_single(hwdev, vaddr, size, DMA_BIDIRECTIONAL); 2084 *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL);
2086 if (*dma_handle) 2085 if (*dma_handle)
2087 return vaddr; 2086 return vaddr;
2088 free_pages((unsigned long)vaddr, order); 2087 free_pages((unsigned long)vaddr, order);