aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-01-07 18:34:22 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-14 23:39:59 -0500
commitd262c32a4bcc3e5fda0325a64e53c25fe1e999d7 (patch)
treeaa09101b13dac2bca27c5bab878c72c9c9015505 /arch/powerpc/kernel/iommu.c
parent031f2dcd7075e218e74dd7f942ad015cf82dffab (diff)
[POWERPC] Workaround for iommu page alignment
Commit 5d2efba64b231a1733c4048d1708d77e07f26426 changed our iommu code so that it always uses an iommu page size of 4kB. That means with our current code, drivers may do a dma_map_sg() of a 64kB page and obtain a dma_addr_t that is only 4k aligned. This works fine in most cases except for some infiniband HW it seems, where they tell the HW about the page size and it ignores the low bits of the DMA address. This works around it by making our IOMMU code enforce a PAGE_SIZE alignment for mappings of objects that are page aligned in the first place and whose size is larger or equal to a page. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 2d0c9ef555e9..79a85d656871 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -278,6 +278,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
278 unsigned long flags; 278 unsigned long flags;
279 struct scatterlist *s, *outs, *segstart; 279 struct scatterlist *s, *outs, *segstart;
280 int outcount, incount, i; 280 int outcount, incount, i;
281 unsigned int align;
281 unsigned long handle; 282 unsigned long handle;
282 283
283 BUG_ON(direction == DMA_NONE); 284 BUG_ON(direction == DMA_NONE);
@@ -309,7 +310,12 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
309 /* Allocate iommu entries for that segment */ 310 /* Allocate iommu entries for that segment */
310 vaddr = (unsigned long) sg_virt(s); 311 vaddr = (unsigned long) sg_virt(s);
311 npages = iommu_num_pages(vaddr, slen); 312 npages = iommu_num_pages(vaddr, slen);
312 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); 313 align = 0;
314 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
315 (vaddr & ~PAGE_MASK) == 0)
316 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
317 entry = iommu_range_alloc(tbl, npages, &handle,
318 mask >> IOMMU_PAGE_SHIFT, align);
313 319
314 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 320 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
315 321
@@ -572,7 +578,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
572{ 578{
573 dma_addr_t dma_handle = DMA_ERROR_CODE; 579 dma_addr_t dma_handle = DMA_ERROR_CODE;
574 unsigned long uaddr; 580 unsigned long uaddr;
575 unsigned int npages; 581 unsigned int npages, align;
576 582
577 BUG_ON(direction == DMA_NONE); 583 BUG_ON(direction == DMA_NONE);
578 584
@@ -580,8 +586,13 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
580 npages = iommu_num_pages(uaddr, size); 586 npages = iommu_num_pages(uaddr, size);
581 587
582 if (tbl) { 588 if (tbl) {
589 align = 0;
590 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
591 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
592 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
593
583 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 594 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
584 mask >> IOMMU_PAGE_SHIFT, 0); 595 mask >> IOMMU_PAGE_SHIFT, align);
585 if (dma_handle == DMA_ERROR_CODE) { 596 if (dma_handle == DMA_ERROR_CODE) {
586 if (printk_ratelimit()) { 597 if (printk_ratelimit()) {
587 printk(KERN_INFO "iommu_alloc failed, " 598 printk(KERN_INFO "iommu_alloc failed, "