aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/lib/swiotlb.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
committerPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
commit23fd07750a789a66fe88cf173d52a18f1a387da4 (patch)
tree06fdd6df35fdb835abdaa9b754d62f6b84b97250 /arch/ia64/lib/swiotlb.c
parentbd787d438a59266af3c9f6351644c85ef1dd21fe (diff)
parented28f96ac1960f30f818374d65be71d2fdf811b0 (diff)
Merge ../linux-2.6 by hand
Diffstat (limited to 'arch/ia64/lib/swiotlb.c')
-rw-r--r--arch/ia64/lib/swiotlb.c104
1 files changed, 103 insertions, 1 deletions
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c
index a604efc7f6c9..96edcc0fdcd9 100644
--- a/arch/ia64/lib/swiotlb.c
+++ b/arch/ia64/lib/swiotlb.c
@@ -49,6 +49,15 @@
49 */ 49 */
50#define IO_TLB_SHIFT 11 50#define IO_TLB_SHIFT 11
51 51
52#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
53
54/*
55 * Minimum IO TLB size to bother booting with. Systems with mainly
56 * 64bit capable cards will only lightly use the swiotlb. If we can't
57 * allocate a contiguous 1MB, we're probably in trouble anyway.
58 */
59#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
60
52int swiotlb_force; 61int swiotlb_force;
53 62
54/* 63/*
@@ -154,6 +163,99 @@ swiotlb_init (void)
154 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 163 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
155} 164}
156 165
166/*
167 * Systems with larger DMA zones (those that don't support ISA) can
168 * initialize the swiotlb later using the slab allocator if needed.
169 * This should be just like above, but with some error catching.
170 */
171int
172swiotlb_late_init_with_default_size (size_t default_size)
173{
174 unsigned long i, req_nslabs = io_tlb_nslabs;
175 unsigned int order;
176
177 if (!io_tlb_nslabs) {
178 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
179 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
180 }
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
186 io_tlb_nslabs = SLABS_PER_PAGE << order;
187
188 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
189 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
190 order);
191 if (io_tlb_start)
192 break;
193 order--;
194 }
195
196 if (!io_tlb_start)
197 goto cleanup1;
198
199 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
200 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
201 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
202 io_tlb_nslabs = SLABS_PER_PAGE << order;
203 }
204 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
205 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
206
207 /*
208 * Allocate and initialize the free list array. This array is used
209 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
210 * between io_tlb_start and io_tlb_end.
211 */
212 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
213 get_order(io_tlb_nslabs * sizeof(int)));
214 if (!io_tlb_list)
215 goto cleanup2;
216
217 for (i = 0; i < io_tlb_nslabs; i++)
218 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
219 io_tlb_index = 0;
220
221 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
222 get_order(io_tlb_nslabs * sizeof(char *)));
223 if (!io_tlb_orig_addr)
224 goto cleanup3;
225
226 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
227
228 /*
229 * Get the overflow emergency buffer
230 */
231 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
232 get_order(io_tlb_overflow));
233 if (!io_tlb_overflow_buffer)
234 goto cleanup4;
235
236 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
237 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
238 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
239
240 return 0;
241
242cleanup4:
243 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
244 sizeof(char *)));
245 io_tlb_orig_addr = NULL;
246cleanup3:
247 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
248 sizeof(int)));
249 io_tlb_list = NULL;
250 io_tlb_end = NULL;
251cleanup2:
252 free_pages((unsigned long)io_tlb_start, order);
253 io_tlb_start = NULL;
254cleanup1:
255 io_tlb_nslabs = req_nslabs;
256 return -ENOMEM;
257}
258
157static inline int 259static inline int
158address_needs_mapping(struct device *hwdev, dma_addr_t addr) 260address_needs_mapping(struct device *hwdev, dma_addr_t addr)
159{ 261{
@@ -314,7 +416,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
314 416
315void * 417void *
316swiotlb_alloc_coherent(struct device *hwdev, size_t size, 418swiotlb_alloc_coherent(struct device *hwdev, size_t size,
317 dma_addr_t *dma_handle, int flags) 419 dma_addr_t *dma_handle, gfp_t flags)
318{ 420{
319 unsigned long dev_addr; 421 unsigned long dev_addr;
320 void *ret; 422 void *ret;