aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-04-16 14:23:45 -0400
committerDavid S. Miller <davem@davemloft.net>2019-05-08 20:11:57 -0400
commit7e996890b88078011bfb55ce072712d464207dad (patch)
treeefe50f3b32fada0819709f90c604a258c7b09e8d /arch/sparc/mm
parent8668b38c1c7720baf76da15a7a7eef43ae0c65a4 (diff)
sparc/iommu: fix __sbus_iommu_map_page for highmem pages
__sbus_iommu_map_page currently assumes all pages are mapped into the kernel direct mapping. Switch to using physical address instead of virtual ones for all the normal mapping operations, and only use the virtual addresses for cache flushing when not operating on a highmem page. Signed-off-by: Christoph Hellwig <hch@lst.de> Reported-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/iommu.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 7e191c8ae46a..37b5ce7657f6 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -209,24 +209,23 @@ static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages)
209static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, 209static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
210 unsigned long offset, size_t len, bool per_page_flush) 210 unsigned long offset, size_t len, bool per_page_flush)
211{ 211{
212 void *vaddr = page_address(page) + offset; 212 phys_addr_t paddr = page_to_phys(page) + offset;
213 unsigned long off = (unsigned long)vaddr & ~PAGE_MASK; 213 unsigned long off = paddr & ~PAGE_MASK;
214 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 214 unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
215 215
216 /* XXX So what is maxphys for us and how do drivers know it? */ 216 /* XXX So what is maxphys for us and how do drivers know it? */
217 if (!len || len > 256 * 1024) 217 if (!len || len > 256 * 1024)
218 return DMA_MAPPING_ERROR; 218 return DMA_MAPPING_ERROR;
219 219
220 if (per_page_flush) { 220 if (per_page_flush && !PageHighMem(page)) {
221 unsigned long p = (unsigned long)vaddr & PAGE_MASK; 221 unsigned long vaddr, p;
222 222
223 while (p < (unsigned long)vaddr + len) { 223 vaddr = (unsigned long)page_address(page) + offset;
224 for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
224 flush_page_for_dma(p); 225 flush_page_for_dma(p);
225 p += PAGE_SIZE;
226 }
227 } 226 }
228 227
229 return iommu_get_one(dev, virt_to_phys(vaddr), npages) + off; 228 return iommu_get_one(dev, paddr, npages) + off;
230} 229}
231 230
232static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, 231static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,