aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2008-02-05 01:28:10 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:11 -0500
commit1b39b077789955c8389488d53d075518fdcd582e (patch)
treeb633d041bdc32959c118ec9ef77ee17bbd0b5b23 /arch
parent383af9525bb27f927511874f6306247ec13f1c28 (diff)
iommu sg: x86: convert calgary IOMMU to use the IOMMU helper
This patch converts calgary IOMMU to use the IOMMU helper functions. The IOMMU doesn't allocate a memory area spanning LLD's segment boundary anymore. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/kernel/pci-calgary_64.c34
2 files changed, 23 insertions, 14 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 59eef1c7fdaa..c976eb41c5c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -465,6 +465,9 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
465 Calgary anyway, pass 'iommu=calgary' on the kernel command line. 465 Calgary anyway, pass 'iommu=calgary' on the kernel command line.
466 If unsure, say Y. 466 If unsure, say Y.
467 467
468config IOMMU_HELPER
469 def_bool CALGARY_IOMMU
470
468# need this always selected by IOMMU for the VIA workaround 471# need this always selected by IOMMU for the VIA workaround
469config SWIOTLB 472config SWIOTLB
470 bool 473 bool
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 1fe7f043ebde..1b5464c2434f 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -35,6 +35,7 @@
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/scatterlist.h> 37#include <linux/scatterlist.h>
38#include <linux/iommu-helper.h>
38#include <asm/gart.h> 39#include <asm/gart.h>
39#include <asm/calgary.h> 40#include <asm/calgary.h>
40#include <asm/tce.h> 41#include <asm/tce.h>
@@ -260,22 +261,28 @@ static void iommu_range_reserve(struct iommu_table *tbl,
260 spin_unlock_irqrestore(&tbl->it_lock, flags); 261 spin_unlock_irqrestore(&tbl->it_lock, flags);
261} 262}
262 263
263static unsigned long iommu_range_alloc(struct iommu_table *tbl, 264static unsigned long iommu_range_alloc(struct device *dev,
264 unsigned int npages) 265 struct iommu_table *tbl,
266 unsigned int npages)
265{ 267{
266 unsigned long flags; 268 unsigned long flags;
267 unsigned long offset; 269 unsigned long offset;
270 unsigned long boundary_size;
271
272 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
273 PAGE_SIZE) >> PAGE_SHIFT;
268 274
269 BUG_ON(npages == 0); 275 BUG_ON(npages == 0);
270 276
271 spin_lock_irqsave(&tbl->it_lock, flags); 277 spin_lock_irqsave(&tbl->it_lock, flags);
272 278
273 offset = find_next_zero_string(tbl->it_map, tbl->it_hint, 279 offset = iommu_area_alloc(tbl->it_map, tbl->it_size, tbl->it_hint,
274 tbl->it_size, npages); 280 npages, 0, boundary_size, 0);
275 if (offset == ~0UL) { 281 if (offset == ~0UL) {
276 tbl->chip_ops->tce_cache_blast(tbl); 282 tbl->chip_ops->tce_cache_blast(tbl);
277 offset = find_next_zero_string(tbl->it_map, 0, 283
278 tbl->it_size, npages); 284 offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
285 npages, 0, boundary_size, 0);
279 if (offset == ~0UL) { 286 if (offset == ~0UL) {
280 printk(KERN_WARNING "Calgary: IOMMU full.\n"); 287 printk(KERN_WARNING "Calgary: IOMMU full.\n");
281 spin_unlock_irqrestore(&tbl->it_lock, flags); 288 spin_unlock_irqrestore(&tbl->it_lock, flags);
@@ -286,7 +293,6 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
286 } 293 }
287 } 294 }
288 295
289 set_bit_string(tbl->it_map, offset, npages);
290 tbl->it_hint = offset + npages; 296 tbl->it_hint = offset + npages;
291 BUG_ON(tbl->it_hint > tbl->it_size); 297 BUG_ON(tbl->it_hint > tbl->it_size);
292 298
@@ -295,13 +301,13 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
295 return offset; 301 return offset;
296} 302}
297 303
298static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr, 304static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
299 unsigned int npages, int direction) 305 void *vaddr, unsigned int npages, int direction)
300{ 306{
301 unsigned long entry; 307 unsigned long entry;
302 dma_addr_t ret = bad_dma_address; 308 dma_addr_t ret = bad_dma_address;
303 309
304 entry = iommu_range_alloc(tbl, npages); 310 entry = iommu_range_alloc(dev, tbl, npages);
305 311
306 if (unlikely(entry == bad_dma_address)) 312 if (unlikely(entry == bad_dma_address))
307 goto error; 313 goto error;
@@ -354,7 +360,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
354 badbit, tbl, dma_addr, entry, npages); 360 badbit, tbl, dma_addr, entry, npages);
355 } 361 }
356 362
357 __clear_bit_string(tbl->it_map, entry, npages); 363 iommu_area_free(tbl->it_map, entry, npages);
358 364
359 spin_unlock_irqrestore(&tbl->it_lock, flags); 365 spin_unlock_irqrestore(&tbl->it_lock, flags);
360} 366}
@@ -438,7 +444,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
438 vaddr = (unsigned long) sg_virt(s); 444 vaddr = (unsigned long) sg_virt(s);
439 npages = num_dma_pages(vaddr, s->length); 445 npages = num_dma_pages(vaddr, s->length);
440 446
441 entry = iommu_range_alloc(tbl, npages); 447 entry = iommu_range_alloc(dev, tbl, npages);
442 if (entry == bad_dma_address) { 448 if (entry == bad_dma_address) {
443 /* makes sure unmap knows to stop */ 449 /* makes sure unmap knows to stop */
444 s->dma_length = 0; 450 s->dma_length = 0;
@@ -476,7 +482,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
476 npages = num_dma_pages(uaddr, size); 482 npages = num_dma_pages(uaddr, size);
477 483
478 if (translation_enabled(tbl)) 484 if (translation_enabled(tbl))
479 dma_handle = iommu_alloc(tbl, vaddr, npages, direction); 485 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction);
480 else 486 else
481 dma_handle = virt_to_bus(vaddr); 487 dma_handle = virt_to_bus(vaddr);
482 488
@@ -516,7 +522,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
516 522
517 if (translation_enabled(tbl)) { 523 if (translation_enabled(tbl)) {
518 /* set up tces to cover the allocated range */ 524 /* set up tces to cover the allocated range */
519 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL); 525 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
520 if (mapping == bad_dma_address) 526 if (mapping == bad_dma_address)
521 goto free; 527 goto free;
522 528