aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorSantosh Shilimkar <santosh.shilimkar@ti.com>2014-01-21 18:50:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:47 -0500
commit457ff1de2d247d9b8917c4664c2325321a35e313 (patch)
treef9e8c63a0eac966658d76ee9ba616e8c8716e615 /lib/swiotlb.c
parentc2f69cdafebb3a46e43b5ac57ca12b539a2c790f (diff)
lib/swiotlb.c: use memblock apis for early memory allocations
Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Paul Walmsley <paul@pwsan.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tejun Heo <tj@kernel.org> Cc: Tony Lindgren <tony@atomide.com> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c35
1 files changed, 20 insertions, 15 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index e4399fa65ad6..615f3de4b5ce 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -172,8 +172,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
172 /* 172 /*
173 * Get the overflow emergency buffer 173 * Get the overflow emergency buffer
174 */ 174 */
175 v_overflow_buffer = alloc_bootmem_low_pages_nopanic( 175 v_overflow_buffer = memblock_virt_alloc_nopanic(
176 PAGE_ALIGN(io_tlb_overflow)); 176 PAGE_ALIGN(io_tlb_overflow),
177 PAGE_SIZE);
177 if (!v_overflow_buffer) 178 if (!v_overflow_buffer)
178 return -ENOMEM; 179 return -ENOMEM;
179 180
@@ -184,11 +185,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
184 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 185 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
185 * between io_tlb_start and io_tlb_end. 186 * between io_tlb_start and io_tlb_end.
186 */ 187 */
187 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 188 io_tlb_list = memblock_virt_alloc(
189 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
190 PAGE_SIZE);
188 for (i = 0; i < io_tlb_nslabs; i++) 191 for (i = 0; i < io_tlb_nslabs; i++)
189 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 192 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
190 io_tlb_index = 0; 193 io_tlb_index = 0;
191 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 194 io_tlb_orig_addr = memblock_virt_alloc(
195 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
196 PAGE_SIZE);
192 197
193 if (verbose) 198 if (verbose)
194 swiotlb_print_info(); 199 swiotlb_print_info();
@@ -215,13 +220,13 @@ swiotlb_init(int verbose)
215 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 220 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
216 221
217 /* Get IO TLB memory from the low pages */ 222 /* Get IO TLB memory from the low pages */
218 vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); 223 vstart = memblock_virt_alloc_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
219 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) 224 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
220 return; 225 return;
221 226
222 if (io_tlb_start) 227 if (io_tlb_start)
223 free_bootmem(io_tlb_start, 228 memblock_free_early(io_tlb_start,
224 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 229 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
225 pr_warn("Cannot allocate SWIOTLB buffer"); 230 pr_warn("Cannot allocate SWIOTLB buffer");
226 no_iotlb_memory = true; 231 no_iotlb_memory = true;
227} 232}
@@ -357,14 +362,14 @@ void __init swiotlb_free(void)
357 free_pages((unsigned long)phys_to_virt(io_tlb_start), 362 free_pages((unsigned long)phys_to_virt(io_tlb_start),
358 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 363 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
359 } else { 364 } else {
360 free_bootmem_late(io_tlb_overflow_buffer, 365 memblock_free_late(io_tlb_overflow_buffer,
361 PAGE_ALIGN(io_tlb_overflow)); 366 PAGE_ALIGN(io_tlb_overflow));
362 free_bootmem_late(__pa(io_tlb_orig_addr), 367 memblock_free_late(__pa(io_tlb_orig_addr),
363 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 368 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
364 free_bootmem_late(__pa(io_tlb_list), 369 memblock_free_late(__pa(io_tlb_list),
365 PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 370 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
366 free_bootmem_late(io_tlb_start, 371 memblock_free_late(io_tlb_start,
367 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 372 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
368 } 373 }
369 io_tlb_nslabs = 0; 374 io_tlb_nslabs = 0;
370} 375}