diff options
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 36 |
1 files changed, 26 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d9a7fdef59b9..4eba60a32890 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -61,6 +61,7 @@ __setup("iommu=", setup_iommu); | |||
61 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, | 61 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, |
62 | unsigned long npages, | 62 | unsigned long npages, |
63 | unsigned long *handle, | 63 | unsigned long *handle, |
64 | unsigned long mask, | ||
64 | unsigned int align_order) | 65 | unsigned int align_order) |
65 | { | 66 | { |
66 | unsigned long n, end, i, start; | 67 | unsigned long n, end, i, start; |
@@ -97,9 +98,21 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
97 | */ | 98 | */ |
98 | if (start >= limit) | 99 | if (start >= limit) |
99 | start = largealloc ? tbl->it_largehint : tbl->it_hint; | 100 | start = largealloc ? tbl->it_largehint : tbl->it_hint; |
100 | 101 | ||
101 | again: | 102 | again: |
102 | 103 | ||
104 | if (limit + tbl->it_offset > mask) { | ||
105 | limit = mask - tbl->it_offset + 1; | ||
106 | /* If we're constrained on address range, first try | ||
107 | * at the masked hint to avoid O(n) search complexity, | ||
108 | * but on second pass, start at 0. | ||
109 | */ | ||
110 | if ((start & mask) >= limit || pass > 0) | ||
111 | start = 0; | ||
112 | else | ||
113 | start &= mask; | ||
114 | } | ||
115 | |||
103 | n = find_next_zero_bit(tbl->it_map, limit, start); | 116 | n = find_next_zero_bit(tbl->it_map, limit, start); |
104 | 117 | ||
105 | /* Align allocation */ | 118 | /* Align allocation */ |
@@ -150,14 +163,14 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
150 | 163 | ||
151 | static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, | 164 | static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, |
152 | unsigned int npages, enum dma_data_direction direction, | 165 | unsigned int npages, enum dma_data_direction direction, |
153 | unsigned int align_order) | 166 | unsigned long mask, unsigned int align_order) |
154 | { | 167 | { |
155 | unsigned long entry, flags; | 168 | unsigned long entry, flags; |
156 | dma_addr_t ret = DMA_ERROR_CODE; | 169 | dma_addr_t ret = DMA_ERROR_CODE; |
157 | 170 | ||
158 | spin_lock_irqsave(&(tbl->it_lock), flags); | 171 | spin_lock_irqsave(&(tbl->it_lock), flags); |
159 | 172 | ||
160 | entry = iommu_range_alloc(tbl, npages, NULL, align_order); | 173 | entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); |
161 | 174 | ||
162 | if (unlikely(entry == DMA_ERROR_CODE)) { | 175 | if (unlikely(entry == DMA_ERROR_CODE)) { |
163 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 176 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
@@ -236,7 +249,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
236 | 249 | ||
237 | int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | 250 | int iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
238 | struct scatterlist *sglist, int nelems, | 251 | struct scatterlist *sglist, int nelems, |
239 | enum dma_data_direction direction) | 252 | unsigned long mask, enum dma_data_direction direction) |
240 | { | 253 | { |
241 | dma_addr_t dma_next = 0, dma_addr; | 254 | dma_addr_t dma_next = 0, dma_addr; |
242 | unsigned long flags; | 255 | unsigned long flags; |
@@ -274,7 +287,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
274 | vaddr = (unsigned long)page_address(s->page) + s->offset; | 287 | vaddr = (unsigned long)page_address(s->page) + s->offset; |
275 | npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); | 288 | npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); |
276 | npages >>= PAGE_SHIFT; | 289 | npages >>= PAGE_SHIFT; |
277 | entry = iommu_range_alloc(tbl, npages, &handle, 0); | 290 | entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); |
278 | 291 | ||
279 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | 292 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); |
280 | 293 | ||
@@ -479,7 +492,8 @@ void iommu_free_table(struct device_node *dn) | |||
479 | * byte within the page as vaddr. | 492 | * byte within the page as vaddr. |
480 | */ | 493 | */ |
481 | dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | 494 | dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, |
482 | size_t size, enum dma_data_direction direction) | 495 | size_t size, unsigned long mask, |
496 | enum dma_data_direction direction) | ||
483 | { | 497 | { |
484 | dma_addr_t dma_handle = DMA_ERROR_CODE; | 498 | dma_addr_t dma_handle = DMA_ERROR_CODE; |
485 | unsigned long uaddr; | 499 | unsigned long uaddr; |
@@ -492,7 +506,8 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | |||
492 | npages >>= PAGE_SHIFT; | 506 | npages >>= PAGE_SHIFT; |
493 | 507 | ||
494 | if (tbl) { | 508 | if (tbl) { |
495 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0); | 509 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction, |
510 | mask >> PAGE_SHIFT, 0); | ||
496 | if (dma_handle == DMA_ERROR_CODE) { | 511 | if (dma_handle == DMA_ERROR_CODE) { |
497 | if (printk_ratelimit()) { | 512 | if (printk_ratelimit()) { |
498 | printk(KERN_INFO "iommu_alloc failed, " | 513 | printk(KERN_INFO "iommu_alloc failed, " |
@@ -521,7 +536,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | |||
521 | * to the dma address (mapping) of the first page. | 536 | * to the dma address (mapping) of the first page. |
522 | */ | 537 | */ |
523 | void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | 538 | void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, |
524 | dma_addr_t *dma_handle, gfp_t flag) | 539 | dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) |
525 | { | 540 | { |
526 | void *ret = NULL; | 541 | void *ret = NULL; |
527 | dma_addr_t mapping; | 542 | dma_addr_t mapping; |
@@ -551,7 +566,8 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | |||
551 | memset(ret, 0, size); | 566 | memset(ret, 0, size); |
552 | 567 | ||
553 | /* Set up tces to cover the allocated range */ | 568 | /* Set up tces to cover the allocated range */ |
554 | mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order); | 569 | mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, |
570 | mask >> PAGE_SHIFT, order); | ||
555 | if (mapping == DMA_ERROR_CODE) { | 571 | if (mapping == DMA_ERROR_CODE) { |
556 | free_pages((unsigned long)ret, order); | 572 | free_pages((unsigned long)ret, order); |
557 | ret = NULL; | 573 | ret = NULL; |