aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-04-09 12:18:05 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:19:58 -0400
commit5fa78ca75d8e67063948a01b51594a0904af5710 (patch)
treec5ae1dbb74266fe3a321a0bf850a89f9043dc0d8
parent8779f2fc3b84ebb6c5181fb13d702e9944c16069 (diff)
x86: retry allocation if failed
This patch puts in the code to retry allocation in case it fails. By its own, it does not make much sense but making the code look like x86_64. But later patches in this series will make we try to allocate from zones other than DMA first, which will possibly fail. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/pci-dma_32.c34
1 files changed, 29 insertions, 5 deletions
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
index debe9119b724..11f100a5f034 100644
--- a/arch/x86/kernel/pci-dma_32.c
+++ b/arch/x86/kernel/pci-dma_32.c
@@ -76,6 +76,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
76 struct page *page; 76 struct page *page;
77 dma_addr_t bus; 77 dma_addr_t bus;
78 int order = get_order(size); 78 int order = get_order(size);
79 unsigned long dma_mask = 0;
80
79 /* ignore region specifiers */ 81 /* ignore region specifiers */
80 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 82 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
81 83
@@ -85,15 +87,37 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
85 if (!dev) 87 if (!dev)
86 dev = &fallback_dev; 88 dev = &fallback_dev;
87 89
90 dma_mask = dev->coherent_dma_mask;
91 if (dma_mask == 0)
92 dma_mask = DMA_32BIT_MASK;
93
94 again:
88 page = dma_alloc_pages(dev, gfp, order); 95 page = dma_alloc_pages(dev, gfp, order);
89 if (page == NULL) 96 if (page == NULL)
90 return NULL; 97 return NULL;
91 98
92 ret = page_address(page); 99 {
93 bus = page_to_phys(page); 100 int high, mmu;
94 101 bus = page_to_phys(page);
95 memset(ret, 0, size); 102 ret = page_address(page);
96 *dma_handle = bus; 103 high = (bus + size) >= dma_mask;
104 mmu = high;
105 if (force_iommu && !(gfp & GFP_DMA))
106 mmu = 1;
107 else if (high) {
108 free_pages((unsigned long)ret,
109 get_order(size));
110
111 /* Don't use the 16MB ZONE_DMA unless absolutely
112 needed. It's better to use remapping first. */
113 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
114 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
115 goto again;
116 }
117 }
118 memset(ret, 0, size);
119 *dma_handle = bus;
120 }
97 121
98 return ret; 122 return ret;
99} 123}