diff options
author | FUJITA Tomonori <tomof@acm.org> | 2008-02-05 01:28:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:11 -0500 |
commit | fb3475e9b6bfa666107512fbd6006c26014f04b8 (patch) | |
tree | d845a940c7a118539f58c4f52337d4b49a1222ba /arch/powerpc/kernel/iommu.c | |
parent | 0291df8cc9dac09c303d21d5bcd2ad73762c836a (diff) |
iommu sg: powerpc: convert iommu to use the IOMMU helper
This patch converts PPC's IOMMU to use the IOMMU helper functions. The IOMMU
doesn't allocate a memory area spanning LLD's segment boundary anymore.
iseries_hv_alloc and iseries_hv_map don't have proper device
struct. 4GB boundary is used for them.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 64 |
1 files changed, 31 insertions, 33 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d0e6fac4ef42..c42219c0afda 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
33 | #include <linux/bitops.h> | 33 | #include <linux/bitops.h> |
34 | #include <linux/iommu-helper.h> | ||
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
35 | #include <asm/prom.h> | 36 | #include <asm/prom.h> |
36 | #include <asm/iommu.h> | 37 | #include <asm/iommu.h> |
@@ -81,17 +82,19 @@ static int __init setup_iommu(char *str) | |||
81 | __setup("protect4gb=", setup_protect4gb); | 82 | __setup("protect4gb=", setup_protect4gb); |
82 | __setup("iommu=", setup_iommu); | 83 | __setup("iommu=", setup_iommu); |
83 | 84 | ||
84 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, | 85 | static unsigned long iommu_range_alloc(struct device *dev, |
86 | struct iommu_table *tbl, | ||
85 | unsigned long npages, | 87 | unsigned long npages, |
86 | unsigned long *handle, | 88 | unsigned long *handle, |
87 | unsigned long mask, | 89 | unsigned long mask, |
88 | unsigned int align_order) | 90 | unsigned int align_order) |
89 | { | 91 | { |
90 | unsigned long n, end, i, start; | 92 | unsigned long n, end, start; |
91 | unsigned long limit; | 93 | unsigned long limit; |
92 | int largealloc = npages > 15; | 94 | int largealloc = npages > 15; |
93 | int pass = 0; | 95 | int pass = 0; |
94 | unsigned long align_mask; | 96 | unsigned long align_mask; |
97 | unsigned long boundary_size; | ||
95 | 98 | ||
96 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | 99 | align_mask = 0xffffffffffffffffl >> (64 - align_order); |
97 | 100 | ||
@@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
136 | start &= mask; | 139 | start &= mask; |
137 | } | 140 | } |
138 | 141 | ||
139 | n = find_next_zero_bit(tbl->it_map, limit, start); | 142 | if (dev) |
140 | 143 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | |
141 | /* Align allocation */ | 144 | 1 << IOMMU_PAGE_SHIFT); |
142 | n = (n + align_mask) & ~align_mask; | 145 | else |
143 | 146 | boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); | |
144 | end = n + npages; | 147 | /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ |
145 | 148 | ||
146 | if (unlikely(end >= limit)) { | 149 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, |
150 | tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, | ||
151 | align_mask); | ||
152 | if (n == -1) { | ||
147 | if (likely(pass < 2)) { | 153 | if (likely(pass < 2)) { |
148 | /* First failure, just rescan the half of the table. | 154 | /* First failure, just rescan the half of the table. |
149 | * Second failure, rescan the other half of the table. | 155 | * Second failure, rescan the other half of the table. |
@@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
158 | } | 164 | } |
159 | } | 165 | } |
160 | 166 | ||
161 | for (i = n; i < end; i++) | 167 | end = n + npages; |
162 | if (test_bit(i, tbl->it_map)) { | ||
163 | start = i+1; | ||
164 | goto again; | ||
165 | } | ||
166 | |||
167 | for (i = n; i < end; i++) | ||
168 | __set_bit(i, tbl->it_map); | ||
169 | 168 | ||
170 | /* Bump the hint to a new block for small allocs. */ | 169 | /* Bump the hint to a new block for small allocs. */ |
171 | if (largealloc) { | 170 | if (largealloc) { |
@@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
184 | return n; | 183 | return n; |
185 | } | 184 | } |
186 | 185 | ||
187 | static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, | 186 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, |
188 | unsigned int npages, enum dma_data_direction direction, | 187 | void *page, unsigned int npages, |
189 | unsigned long mask, unsigned int align_order) | 188 | enum dma_data_direction direction, |
189 | unsigned long mask, unsigned int align_order) | ||
190 | { | 190 | { |
191 | unsigned long entry, flags; | 191 | unsigned long entry, flags; |
192 | dma_addr_t ret = DMA_ERROR_CODE; | 192 | dma_addr_t ret = DMA_ERROR_CODE; |
193 | 193 | ||
194 | spin_lock_irqsave(&(tbl->it_lock), flags); | 194 | spin_lock_irqsave(&(tbl->it_lock), flags); |
195 | 195 | ||
196 | entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); | 196 | entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); |
197 | 197 | ||
198 | if (unlikely(entry == DMA_ERROR_CODE)) { | 198 | if (unlikely(entry == DMA_ERROR_CODE)) { |
199 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 199 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
@@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
224 | unsigned int npages) | 224 | unsigned int npages) |
225 | { | 225 | { |
226 | unsigned long entry, free_entry; | 226 | unsigned long entry, free_entry; |
227 | unsigned long i; | ||
228 | 227 | ||
229 | entry = dma_addr >> IOMMU_PAGE_SHIFT; | 228 | entry = dma_addr >> IOMMU_PAGE_SHIFT; |
230 | free_entry = entry - tbl->it_offset; | 229 | free_entry = entry - tbl->it_offset; |
@@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
246 | } | 245 | } |
247 | 246 | ||
248 | ppc_md.tce_free(tbl, entry, npages); | 247 | ppc_md.tce_free(tbl, entry, npages); |
249 | 248 | iommu_area_free(tbl->it_map, free_entry, npages); | |
250 | for (i = 0; i < npages; i++) | ||
251 | __clear_bit(free_entry+i, tbl->it_map); | ||
252 | } | 249 | } |
253 | 250 | ||
254 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | 251 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
@@ -317,7 +314,7 @@ int iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |||
317 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && | 314 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && |
318 | (vaddr & ~PAGE_MASK) == 0) | 315 | (vaddr & ~PAGE_MASK) == 0) |
319 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | 316 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; |
320 | entry = iommu_range_alloc(tbl, npages, &handle, | 317 | entry = iommu_range_alloc(dev, tbl, npages, &handle, |
321 | mask >> IOMMU_PAGE_SHIFT, align); | 318 | mask >> IOMMU_PAGE_SHIFT, align); |
322 | 319 | ||
323 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | 320 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); |
@@ -574,9 +571,9 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) | |||
574 | * need not be page aligned, the dma_addr_t returned will point to the same | 571 | * need not be page aligned, the dma_addr_t returned will point to the same |
575 | * byte within the page as vaddr. | 572 | * byte within the page as vaddr. |
576 | */ | 573 | */ |
577 | dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | 574 | dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, |
578 | size_t size, unsigned long mask, | 575 | void *vaddr, size_t size, unsigned long mask, |
579 | enum dma_data_direction direction) | 576 | enum dma_data_direction direction) |
580 | { | 577 | { |
581 | dma_addr_t dma_handle = DMA_ERROR_CODE; | 578 | dma_addr_t dma_handle = DMA_ERROR_CODE; |
582 | unsigned long uaddr; | 579 | unsigned long uaddr; |
@@ -593,7 +590,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | |||
593 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) | 590 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) |
594 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | 591 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; |
595 | 592 | ||
596 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction, | 593 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
597 | mask >> IOMMU_PAGE_SHIFT, align); | 594 | mask >> IOMMU_PAGE_SHIFT, align); |
598 | if (dma_handle == DMA_ERROR_CODE) { | 595 | if (dma_handle == DMA_ERROR_CODE) { |
599 | if (printk_ratelimit()) { | 596 | if (printk_ratelimit()) { |
@@ -625,8 +622,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | |||
625 | * Returns the virtual address of the buffer and sets dma_handle | 622 | * Returns the virtual address of the buffer and sets dma_handle |
626 | * to the dma address (mapping) of the first page. | 623 | * to the dma address (mapping) of the first page. |
627 | */ | 624 | */ |
628 | void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | 625 | void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
629 | dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node) | 626 | size_t size, dma_addr_t *dma_handle, |
627 | unsigned long mask, gfp_t flag, int node) | ||
630 | { | 628 | { |
631 | void *ret = NULL; | 629 | void *ret = NULL; |
632 | dma_addr_t mapping; | 630 | dma_addr_t mapping; |
@@ -660,7 +658,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | |||
660 | /* Set up tces to cover the allocated range */ | 658 | /* Set up tces to cover the allocated range */ |
661 | nio_pages = size >> IOMMU_PAGE_SHIFT; | 659 | nio_pages = size >> IOMMU_PAGE_SHIFT; |
662 | io_order = get_iommu_order(size); | 660 | io_order = get_iommu_order(size); |
663 | mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL, | 661 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
664 | mask >> IOMMU_PAGE_SHIFT, io_order); | 662 | mask >> IOMMU_PAGE_SHIFT, io_order); |
665 | if (mapping == DMA_ERROR_CODE) { | 663 | if (mapping == DMA_ERROR_CODE) { |
666 | free_pages((unsigned long)ret, order); | 664 | free_pages((unsigned long)ret, order); |