aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-nommu.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-09-09 12:06:48 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-10 05:33:44 -0400
commit49fbf4e9f982c704dc365698c5b5efa780aadcb5 (patch)
tree64e8a4eb861a40b71ea559743c91b702a9fab95a /arch/x86/kernel/pci-nommu.c
parentac4ff656c07ada78316307b0c0ce8a8eb48aa6dd (diff)
x86: convert pci-nommu to use is_buffer_dma_capable helper function
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-nommu.c')
-rw-r--r--arch/x86/kernel/pci-nommu.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 8e398b56f50b..1c1c98a31d57 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -14,7 +14,7 @@
14static int 14static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
16{ 16{
17 if (hwdev && bus + size > *hwdev->dma_mask) { 17 if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) {
18 if (*hwdev->dma_mask >= DMA_32BIT_MASK) 18 if (*hwdev->dma_mask >= DMA_32BIT_MASK)
19 printk(KERN_ERR 19 printk(KERN_ERR
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", 20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
@@ -79,6 +79,7 @@ nommu_alloc_coherent(struct device *hwdev, size_t size,
79 unsigned long dma_mask; 79 unsigned long dma_mask;
80 int node; 80 int node;
81 struct page *page; 81 struct page *page;
82 dma_addr_t addr;
82 83
83 dma_mask = dma_alloc_coherent_mask(hwdev, gfp); 84 dma_mask = dma_alloc_coherent_mask(hwdev, gfp);
84 85
@@ -90,14 +91,15 @@ again:
90 if (!page) 91 if (!page)
91 return NULL; 92 return NULL;
92 93
93 if ((page_to_phys(page) + size > dma_mask) && !(gfp & GFP_DMA)) { 94 addr = page_to_phys(page);
95 if (!is_buffer_dma_capable(dma_mask, addr, size) && !(gfp & GFP_DMA)) {
94 free_pages((unsigned long)page_address(page), get_order(size)); 96 free_pages((unsigned long)page_address(page), get_order(size));
95 gfp |= GFP_DMA; 97 gfp |= GFP_DMA;
96 goto again; 98 goto again;
97 } 99 }
98 100
99 *dma_addr = page_to_phys(page); 101 if (check_addr("alloc_coherent", hwdev, addr, size)) {
100 if (check_addr("alloc_coherent", hwdev, *dma_addr, size)) { 102 *dma_addr = addr;
101 flush_write_buffers(); 103 flush_write_buffers();
102 return page_address(page); 104 return page_address(page);
103 } 105 }