aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-12-16 15:17:26 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-16 15:31:38 -0500
commit8c5df16bec8a60bb8589fc232b9e26cac0ed4b2c (patch)
treeb85d46d552ba86da332ebad328761998171332c5
parenta79b7a2a758c39315344f0d86b5adb21d90d786e (diff)
swiotlb: allow architectures to override swiotlb pool allocation
Impact: generalize swiotlb allocation code Architectures may need to allocate memory specially for use with the swiotlb. Create the weak function swiotlb_alloc_boot() and swiotlb_alloc() defaulting to the current behaviour. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/swiotlb.h3
-rw-r--r--lib/swiotlb.c16
2 files changed, 16 insertions, 3 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b18ec5533e8c..b8c5fc766a56 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -10,6 +10,9 @@ struct scatterlist;
10extern void 10extern void
11swiotlb_init(void); 11swiotlb_init(void);
12 12
13extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
14extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
15
13extern void 16extern void
14*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 17*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flags); 18 dma_addr_t *dma_handle, gfp_t flags);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5f6c629a924d..abecb2857556 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -21,6 +21,7 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/swiotlb.h>
24#include <linux/string.h> 25#include <linux/string.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/ctype.h> 27#include <linux/ctype.h>
@@ -126,6 +127,16 @@ setup_io_tlb_npages(char *str)
126__setup("swiotlb=", setup_io_tlb_npages); 127__setup("swiotlb=", setup_io_tlb_npages);
127/* make io_tlb_overflow tunable too? */ 128/* make io_tlb_overflow tunable too? */
128 129
130void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
131{
132 return alloc_bootmem_low_pages(size);
133}
134
135void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
136{
137 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
138}
139
129/* 140/*
130 * Statically reserve bounce buffer space and initialize bounce buffer data 141 * Statically reserve bounce buffer space and initialize bounce buffer data
131 * structures for the software IO TLB used to implement the DMA API. 142 * structures for the software IO TLB used to implement the DMA API.
@@ -145,7 +156,7 @@ swiotlb_init_with_default_size(size_t default_size)
145 /* 156 /*
146 * Get IO TLB memory from the low pages 157 * Get IO TLB memory from the low pages
147 */ 158 */
148 io_tlb_start = alloc_bootmem_low_pages(bytes); 159 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
149 if (!io_tlb_start) 160 if (!io_tlb_start)
150 panic("Cannot allocate SWIOTLB buffer"); 161 panic("Cannot allocate SWIOTLB buffer");
151 io_tlb_end = io_tlb_start + bytes; 162 io_tlb_end = io_tlb_start + bytes;
@@ -202,8 +213,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
202 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 213 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
203 214
204 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 215 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
205 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 216 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
206 order);
207 if (io_tlb_start) 217 if (io_tlb_start)
208 break; 218 break;
209 order--; 219 order--;