aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-10-11 13:40:25 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-10-11 17:08:36 -0400
commite79f86b2ef9c0a8c47225217c1018b7d3d90101c (patch)
tree53ba03cad303a566831fa03ebbfc408ab36ae36c /lib/swiotlb.c
parent03620b2d7545fee29ecd3bb270f206b9cff75e58 (diff)
swiotlb: Use page alignment for early buffer allocation
We could call free_bootmem_late() if swiotlb is not used, and it will shrink to page alignment. So alloc them with page alignment at first, to avoid lose two pages before patch: [ 0.000000] memblock_x86_reserve_range: [00d3600000, 00d7600000] swiotlb buffer [ 0.000000] memblock_x86_reserve_range: [00d7e7ef40, 00d7e9ef40] swiotlb list [ 0.000000] memblock_x86_reserve_range: [00d7e3ef40, 00d7e7ef40] swiotlb orig_ad [ 0.000000] memblock_x86_reserve_range: [000008a000, 0000092000] swiotlb overflo after patch will get [ 0.000000] memblock_x86_reserve_range: [00d3600000, 00d7600000] swiotlb buffer [ 0.000000] memblock_x86_reserve_range: [00d7e7e000, 00d7e9e000] swiotlb list [ 0.000000] memblock_x86_reserve_range: [00d7e3e000, 00d7e7e000] swiotlb orig_ad [ 0.000000] memblock_x86_reserve_range: [000008a000, 0000092000] swiotlb overflo Signed-off-by: Yinghai Lu <yinghai@kernel.org> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Becky Bruce <beckyb@kernel.crashing.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index b237e649cd5d..7c06ee51a29a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
148 * between io_tlb_start and io_tlb_end. 148 * between io_tlb_start and io_tlb_end.
149 */ 149 */
150 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 150 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
151 for (i = 0; i < io_tlb_nslabs; i++) 151 for (i = 0; i < io_tlb_nslabs; i++)
152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
153 io_tlb_index = 0; 153 io_tlb_index = 0;
154 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 154 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
155 155
156 /* 156 /*
157 * Get the overflow emergency buffer 157 * Get the overflow emergency buffer
158 */ 158 */
159 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
160 if (!io_tlb_overflow_buffer) 160 if (!io_tlb_overflow_buffer)
161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 161 panic("Cannot allocate SWIOTLB overflow buffer!\n");
162 if (verbose) 162 if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
182 /* 182 /*
183 * Get IO TLB memory from the low pages 183 * Get IO TLB memory from the low pages
184 */ 184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes); 185 io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
186 if (!io_tlb_start) 186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer"); 187 panic("Cannot allocate SWIOTLB buffer");
188 188
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
309 } else { 309 } else {
310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 310 free_bootmem_late(__pa(io_tlb_overflow_buffer),
311 io_tlb_overflow); 311 PAGE_ALIGN(io_tlb_overflow));
312 free_bootmem_late(__pa(io_tlb_orig_addr), 312 free_bootmem_late(__pa(io_tlb_orig_addr),
313 io_tlb_nslabs * sizeof(phys_addr_t)); 313 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
314 free_bootmem_late(__pa(io_tlb_list), 314 free_bootmem_late(__pa(io_tlb_list),
315 io_tlb_nslabs * sizeof(int)); 315 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
316 free_bootmem_late(__pa(io_tlb_start), 316 free_bootmem_late(__pa(io_tlb_start),
317 io_tlb_nslabs << IO_TLB_SHIFT); 317 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
318 } 318 }
319} 319}
320 320