aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-gart_64.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-11-15 07:19:52 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-17 01:53:20 -0500
commit42109197eb7c01080eea6d9cd48ca23cbc3c566c (patch)
tree792330fbadd6f277111a0f65bd14a96e284811f5 /arch/x86/kernel/pci-gart_64.c
parent99f4c9de2b707795acb215e2e94df7ea266042b5 (diff)
x86: gart: Add own dma_mapping_error function
GART IOMMU is the only user of bad_dma_address variable. This patch converts GART to use the newer mechanism, fill in ->mapping_error() in struct dma_map_ops, to make dma_mapping_error() work in IOMMU specific way. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: muli@il.ibm.com Cc: joerg.roedel@amd.com LKML-Reference: <1258287594-8777-2-git-send-email-fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r--arch/x86/kernel/pci-gart_64.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 919182e15d1e..61c4d1e41a6b 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -47,6 +47,8 @@ static unsigned long iommu_pages; /* .. and in pages */
47 47
48static u32 *iommu_gatt_base; /* Remapping table */ 48static u32 *iommu_gatt_base; /* Remapping table */
49 49
50static dma_addr_t bad_dma_addr;
51
50/* 52/*
51 * If this is disabled the IOMMU will use an optimized flushing strategy 53 * If this is disabled the IOMMU will use an optimized flushing strategy
52 * of only flushing when an mapping is reused. With it true the GART is 54 * of only flushing when an mapping is reused. With it true the GART is
@@ -217,7 +219,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
217 if (panic_on_overflow) 219 if (panic_on_overflow)
218 panic("dma_map_area overflow %lu bytes\n", size); 220 panic("dma_map_area overflow %lu bytes\n", size);
219 iommu_full(dev, size, dir); 221 iommu_full(dev, size, dir);
220 return bad_dma_address; 222 return bad_dma_addr;
221 } 223 }
222 224
223 for (i = 0; i < npages; i++) { 225 for (i = 0; i < npages; i++) {
@@ -303,7 +305,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
303 305
304 if (nonforced_iommu(dev, addr, s->length)) { 306 if (nonforced_iommu(dev, addr, s->length)) {
305 addr = dma_map_area(dev, addr, s->length, dir, 0); 307 addr = dma_map_area(dev, addr, s->length, dir, 0);
306 if (addr == bad_dma_address) { 308 if (addr == bad_dma_addr) {
307 if (i > 0) 309 if (i > 0)
308 gart_unmap_sg(dev, sg, i, dir, NULL); 310 gart_unmap_sg(dev, sg, i, dir, NULL);
309 nents = 0; 311 nents = 0;
@@ -456,7 +458,7 @@ error:
456 458
457 iommu_full(dev, pages << PAGE_SHIFT, dir); 459 iommu_full(dev, pages << PAGE_SHIFT, dir);
458 for_each_sg(sg, s, nents, i) 460 for_each_sg(sg, s, nents, i)
459 s->dma_address = bad_dma_address; 461 s->dma_address = bad_dma_addr;
460 return 0; 462 return 0;
461} 463}
462 464
@@ -480,7 +482,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
480 DMA_BIDIRECTIONAL, align_mask); 482 DMA_BIDIRECTIONAL, align_mask);
481 483
482 flush_gart(); 484 flush_gart();
483 if (paddr != bad_dma_address) { 485 if (paddr != bad_dma_addr) {
484 *dma_addr = paddr; 486 *dma_addr = paddr;
485 return page_address(page); 487 return page_address(page);
486 } 488 }
@@ -500,6 +502,11 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
500 free_pages((unsigned long)vaddr, get_order(size)); 502 free_pages((unsigned long)vaddr, get_order(size));
501} 503}
502 504
505static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
506{
507 return (dma_addr == bad_dma_addr);
508}
509
503static int no_agp; 510static int no_agp;
504 511
505static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 512static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -687,6 +694,7 @@ static struct dma_map_ops gart_dma_ops = {
687 .unmap_page = gart_unmap_page, 694 .unmap_page = gart_unmap_page,
688 .alloc_coherent = gart_alloc_coherent, 695 .alloc_coherent = gart_alloc_coherent,
689 .free_coherent = gart_free_coherent, 696 .free_coherent = gart_free_coherent,
697 .mapping_error = gart_mapping_error,
690}; 698};
691 699
692static void gart_iommu_shutdown(void) 700static void gart_iommu_shutdown(void)
@@ -785,7 +793,7 @@ int __init gart_iommu_init(void)
785 793
786 iommu_start = aper_size - iommu_size; 794 iommu_start = aper_size - iommu_size;
787 iommu_bus_base = info.aper_base + iommu_start; 795 iommu_bus_base = info.aper_base + iommu_start;
788 bad_dma_address = iommu_bus_base; 796 bad_dma_addr = iommu_bus_base;
789 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 797 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
790 798
791 /* 799 /*