aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/iommu.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 2d0c9ef555e..79a85d65687 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -278,6 +278,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
278 unsigned long flags; 278 unsigned long flags;
279 struct scatterlist *s, *outs, *segstart; 279 struct scatterlist *s, *outs, *segstart;
280 int outcount, incount, i; 280 int outcount, incount, i;
281 unsigned int align;
281 unsigned long handle; 282 unsigned long handle;
282 283
283 BUG_ON(direction == DMA_NONE); 284 BUG_ON(direction == DMA_NONE);
@@ -309,7 +310,12 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
309 /* Allocate iommu entries for that segment */ 310 /* Allocate iommu entries for that segment */
310 vaddr = (unsigned long) sg_virt(s); 311 vaddr = (unsigned long) sg_virt(s);
311 npages = iommu_num_pages(vaddr, slen); 312 npages = iommu_num_pages(vaddr, slen);
312 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); 313 align = 0;
314 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
315 (vaddr & ~PAGE_MASK) == 0)
316 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
317 entry = iommu_range_alloc(tbl, npages, &handle,
318 mask >> IOMMU_PAGE_SHIFT, align);
313 319
314 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 320 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
315 321
@@ -572,7 +578,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
572{ 578{
573 dma_addr_t dma_handle = DMA_ERROR_CODE; 579 dma_addr_t dma_handle = DMA_ERROR_CODE;
574 unsigned long uaddr; 580 unsigned long uaddr;
575 unsigned int npages; 581 unsigned int npages, align;
576 582
577 BUG_ON(direction == DMA_NONE); 583 BUG_ON(direction == DMA_NONE);
578 584
@@ -580,8 +586,13 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
580 npages = iommu_num_pages(uaddr, size); 586 npages = iommu_num_pages(uaddr, size);
581 587
582 if (tbl) { 588 if (tbl) {
589 align = 0;
590 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
591 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
592 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
593
583 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 594 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
584 mask >> IOMMU_PAGE_SHIFT, 0); 595 mask >> IOMMU_PAGE_SHIFT, align);
585 if (dma_handle == DMA_ERROR_CODE) { 596 if (dma_handle == DMA_ERROR_CODE) {
586 if (printk_ratelimit()) { 597 if (printk_ratelimit()) {
587 printk(KERN_INFO "iommu_alloc failed, " 598 printk(KERN_INFO "iommu_alloc failed, "