aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2013-01-24 15:19:42 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-01-29 18:12:23 -0500
commitc9b3234a6abadaa12684083d39552939baaed1f4 (patch)
treeb85facef3c5bfe4b5bc15940a067abf6ee8e236b /arch
parentde65d816aa44f9ddd79861ae21d75010cc1fd003 (diff)
x86, mm: Fix page table early allocation offset checking
During debugging loading kernel above 4G, found that one page is not used in pre-allocated BRK area for early page allocation. pgt_buf_top is address that can not be used, so should check if that new end is above that top, otherwise last page will not be used. Fix that checking and also add print out for allocation from pre-allocated BRK area to catch possible bugs later. But after we get back that page for pgt, it tiggers one bug in pgt allocation with xen: We need to avoid to use page as pgt to map range that is overlapping with that pgt page. Add checking about overlapping, when it happens, use memblock allocation instead. That fixes crash on Xen PV guest with 2G that Stefan found. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1359058816-7615-2-git-send-email-yinghai@kernel.org Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Tested-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/init.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 6f85de8a1f28..78d1ef3eab66 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -25,6 +25,8 @@ static unsigned long __initdata pgt_buf_top;
25 25
26static unsigned long min_pfn_mapped; 26static unsigned long min_pfn_mapped;
27 27
28static bool __initdata can_use_brk_pgt = true;
29
28/* 30/*
29 * Pages returned are already directly mapped. 31 * Pages returned are already directly mapped.
30 * 32 *
@@ -47,7 +49,7 @@ __ref void *alloc_low_pages(unsigned int num)
47 __GFP_ZERO, order); 49 __GFP_ZERO, order);
48 } 50 }
49 51
50 if ((pgt_buf_end + num) >= pgt_buf_top) { 52 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
51 unsigned long ret; 53 unsigned long ret;
52 if (min_pfn_mapped >= max_pfn_mapped) 54 if (min_pfn_mapped >= max_pfn_mapped)
53 panic("alloc_low_page: ran out of memory"); 55 panic("alloc_low_page: ran out of memory");
@@ -61,6 +63,8 @@ __ref void *alloc_low_pages(unsigned int num)
61 } else { 63 } else {
62 pfn = pgt_buf_end; 64 pfn = pgt_buf_end;
63 pgt_buf_end += num; 65 pgt_buf_end += num;
66 printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
67 pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
64 } 68 }
65 69
66 for (i = 0; i < num; i++) { 70 for (i = 0; i < num; i++) {
@@ -370,8 +374,15 @@ static unsigned long __init init_range_memory_mapping(
370 if (start >= end) 374 if (start >= end)
371 continue; 375 continue;
372 376
377 /*
378 * if it is overlapping with brk pgt, we need to
379 * alloc pgt buf from memblock instead.
380 */
381 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
382 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
373 init_memory_mapping(start, end); 383 init_memory_mapping(start, end);
374 mapped_ram_size += end - start; 384 mapped_ram_size += end - start;
385 can_use_brk_pgt = true;
375 } 386 }
376 387
377 return mapped_ram_size; 388 return mapped_ram_size;