aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/lib/swiotlb.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 00:09:26 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 00:09:26 -0400
commit8a212ab6b8a4ccc6f3c3d1beba5f92655c576404 (patch)
tree525271129ff9c692defdd20566f1f7203b18ff24 /arch/ia64/lib/swiotlb.c
parent1f419cadff55f548e7356ffebdb9e1b5a8c22275 (diff)
parent0e1f60609258e18ec0a0477c646101212822d387 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
Diffstat (limited to 'arch/ia64/lib/swiotlb.c')
-rw-r--r--arch/ia64/lib/swiotlb.c102
1 files changed, 102 insertions, 0 deletions
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c
index 3ebbb3c8ba36..96edcc0fdcd9 100644
--- a/arch/ia64/lib/swiotlb.c
+++ b/arch/ia64/lib/swiotlb.c
@@ -49,6 +49,15 @@
49 */ 49 */
50#define IO_TLB_SHIFT 11 50#define IO_TLB_SHIFT 11
51 51
52#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
53
54/*
55 * Minimum IO TLB size to bother booting with. Systems with mainly
56 * 64bit capable cards will only lightly use the swiotlb. If we can't
57 * allocate a contiguous 1MB, we're probably in trouble anyway.
58 */
59#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
60
52int swiotlb_force; 61int swiotlb_force;
53 62
54/* 63/*
@@ -154,6 +163,99 @@ swiotlb_init (void)
154 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 163 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
155} 164}
156 165
166/*
167 * Systems with larger DMA zones (those that don't support ISA) can
168 * initialize the swiotlb later using the slab allocator if needed.
169 * This should be just like above, but with some error catching.
170 */
171int
172swiotlb_late_init_with_default_size (size_t default_size)
173{
174 unsigned long i, req_nslabs = io_tlb_nslabs;
175 unsigned int order;
176
177 if (!io_tlb_nslabs) {
178 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
179 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
180 }
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
186 io_tlb_nslabs = SLABS_PER_PAGE << order;
187
188 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
189 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
190 order);
191 if (io_tlb_start)
192 break;
193 order--;
194 }
195
196 if (!io_tlb_start)
197 goto cleanup1;
198
199 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
200 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
201 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
202 io_tlb_nslabs = SLABS_PER_PAGE << order;
203 }
204 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
205 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
206
207 /*
208 * Allocate and initialize the free list array. This array is used
209 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
210 * between io_tlb_start and io_tlb_end.
211 */
212 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
213 get_order(io_tlb_nslabs * sizeof(int)));
214 if (!io_tlb_list)
215 goto cleanup2;
216
217 for (i = 0; i < io_tlb_nslabs; i++)
218 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
219 io_tlb_index = 0;
220
221 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
222 get_order(io_tlb_nslabs * sizeof(char *)));
223 if (!io_tlb_orig_addr)
224 goto cleanup3;
225
226 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
227
228 /*
229 * Get the overflow emergency buffer
230 */
231 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
232 get_order(io_tlb_overflow));
233 if (!io_tlb_overflow_buffer)
234 goto cleanup4;
235
236 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
237 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
238 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
239
240 return 0;
241
242cleanup4:
243 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
244 sizeof(char *)));
245 io_tlb_orig_addr = NULL;
246cleanup3:
247 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
248 sizeof(int)));
249 io_tlb_list = NULL;
250 io_tlb_end = NULL;
251cleanup2:
252 free_pages((unsigned long)io_tlb_start, order);
253 io_tlb_start = NULL;
254cleanup1:
255 io_tlb_nslabs = req_nslabs;
256 return -ENOMEM;
257}
258
157static inline int 259static inline int
158address_needs_mapping(struct device *hwdev, dma_addr_t addr) 260address_needs_mapping(struct device *hwdev, dma_addr_t addr)
159{ 261{