aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2008-02-05 01:28:11 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:11 -0500
commitfde9a1094ddf2892188a8a0eccda527de47cba8e (patch)
tree245609ebc4ddc69c3428a59625b63dc2dced7fa0
parent1b39b077789955c8389488d53d075518fdcd582e (diff)
iommu sg: x86: convert gart IOMMU to use the IOMMU helper
This patch converts gart IOMMU to use the IOMMU helper functions. The IOMMU doesn't allocate a memory area spanning LLD's segment boundary anymore. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/pci-gart_64.c41
2 files changed, 26 insertions, 17 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c976eb41c5c8..434821187cfc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -466,7 +466,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
466 If unsure, say Y. 466 If unsure, say Y.
467 467
468config IOMMU_HELPER 468config IOMMU_HELPER
469 def_bool CALGARY_IOMMU 469 def_bool (CALGARY_IOMMU || GART_IOMMU)
470 470
471# need this always selected by IOMMU for the VIA workaround 471# need this always selected by IOMMU for the VIA workaround
472config SWIOTLB 472config SWIOTLB
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 5ee700f0844d..65f6acb025c8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -25,6 +25,7 @@
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/kdebug.h> 26#include <linux/kdebug.h>
27#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
28#include <linux/iommu-helper.h>
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/mtrr.h> 31#include <asm/mtrr.h>
@@ -82,17 +83,24 @@ AGPEXTERN __u32 *agp_gatt_table;
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 83static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 84static int need_flush; /* global flush state. set for each gart wrap */
84 85
85static unsigned long alloc_iommu(int size) 86static unsigned long alloc_iommu(struct device *dev, int size)
86{ 87{
87 unsigned long offset, flags; 88 unsigned long offset, flags;
89 unsigned long boundary_size;
90 unsigned long base_index;
91
92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
93 PAGE_SIZE) >> PAGE_SHIFT;
94 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
95 PAGE_SIZE) >> PAGE_SHIFT;
88 96
89 spin_lock_irqsave(&iommu_bitmap_lock, flags); 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
90 offset = find_next_zero_string(iommu_gart_bitmap, next_bit, 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
91 iommu_pages, size); 99 size, base_index, boundary_size, 0);
92 if (offset == -1) { 100 if (offset == -1) {
93 need_flush = 1; 101 need_flush = 1;
94 offset = find_next_zero_string(iommu_gart_bitmap, 0, 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
95 iommu_pages, size); 103 size, base_index, boundary_size, 0);
96 } 104 }
97 if (offset != -1) { 105 if (offset != -1) {
98 set_bit_string(iommu_gart_bitmap, offset, size); 106 set_bit_string(iommu_gart_bitmap, offset, size);
@@ -114,7 +122,7 @@ static void free_iommu(unsigned long offset, int size)
114 unsigned long flags; 122 unsigned long flags;
115 123
116 spin_lock_irqsave(&iommu_bitmap_lock, flags); 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
117 __clear_bit_string(iommu_gart_bitmap, offset, size); 125 iommu_area_free(iommu_gart_bitmap, offset, size);
118 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
119} 127}
120 128
@@ -235,7 +243,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
235 size_t size, int dir) 243 size_t size, int dir)
236{ 244{
237 unsigned long npages = to_pages(phys_mem, size); 245 unsigned long npages = to_pages(phys_mem, size);
238 unsigned long iommu_page = alloc_iommu(npages); 246 unsigned long iommu_page = alloc_iommu(dev, npages);
239 int i; 247 int i;
240 248
241 if (iommu_page == -1) { 249 if (iommu_page == -1) {
@@ -355,10 +363,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
355} 363}
356 364
357/* Map multiple scatterlist entries continuous into the first. */ 365/* Map multiple scatterlist entries continuous into the first. */
358static int __dma_map_cont(struct scatterlist *start, int nelems, 366static int __dma_map_cont(struct device *dev, struct scatterlist *start,
359 struct scatterlist *sout, unsigned long pages) 367 int nelems, struct scatterlist *sout,
368 unsigned long pages)
360{ 369{
361 unsigned long iommu_start = alloc_iommu(pages); 370 unsigned long iommu_start = alloc_iommu(dev, pages);
362 unsigned long iommu_page = iommu_start; 371 unsigned long iommu_page = iommu_start;
363 struct scatterlist *s; 372 struct scatterlist *s;
364 int i; 373 int i;
@@ -394,8 +403,8 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
394} 403}
395 404
396static inline int 405static inline int
397dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, 406dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
398 unsigned long pages, int need) 407 struct scatterlist *sout, unsigned long pages, int need)
399{ 408{
400 if (!need) { 409 if (!need) {
401 BUG_ON(nelems != 1); 410 BUG_ON(nelems != 1);
@@ -403,7 +412,7 @@ dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
403 sout->dma_length = start->length; 412 sout->dma_length = start->length;
404 return 0; 413 return 0;
405 } 414 }
406 return __dma_map_cont(start, nelems, sout, pages); 415 return __dma_map_cont(dev, start, nelems, sout, pages);
407} 416}
408 417
409/* 418/*
@@ -449,8 +458,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
449 if (!iommu_merge || !nextneed || !need || s->offset || 458 if (!iommu_merge || !nextneed || !need || s->offset ||
450 (s->length + seg_size > max_seg_size) || 459 (s->length + seg_size > max_seg_size) ||
451 (ps->offset + ps->length) % PAGE_SIZE) { 460 (ps->offset + ps->length) % PAGE_SIZE) {
452 if (dma_map_cont(start_sg, i - start, sgmap, 461 if (dma_map_cont(dev, start_sg, i - start,
453 pages, need) < 0) 462 sgmap, pages, need) < 0)
454 goto error; 463 goto error;
455 out++; 464 out++;
456 seg_size = 0; 465 seg_size = 0;
@@ -466,7 +475,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
466 pages += to_pages(s->offset, s->length); 475 pages += to_pages(s->offset, s->length);
467 ps = s; 476 ps = s;
468 } 477 }
469 if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) 478 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
470 goto error; 479 goto error;
471 out++; 480 out++;
472 flush_gart(); 481 flush_gart();