aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-03-16 15:07:54 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2009-03-17 14:42:05 -0400
commitc090f532db3ab5b7be503f8ac84b0d33a18646c6 (patch)
treeabac222a32abc03730ccfa260d28af88581f6fb6
parent2bd2753ff46346543ab92e80df9d96366e21baa5 (diff)
x86-32: make sure we map enough to fit linear map pagetables
Impact: crash fix head_32.S needs to map the kernel itself, and enough space so that mm/init.c can allocate space from the e820 allocator for the linear map of low memory. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/kernel/head_32.S29
1 files changed, 19 insertions, 10 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c79741cfb078..d383a7c0e49f 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -38,8 +38,8 @@
38#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 38#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
39 39
40/* 40/*
41 * This is how much memory *in addition to the memory covered up to 41 * This is how much memory in addition to the memory covered up to
42 * and including _end* we need mapped initially. 42 * and including _end we need mapped initially.
43 * We need: 43 * We need:
44 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) 44 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
45 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) 45 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
@@ -52,16 +52,25 @@
52 * KERNEL_IMAGE_SIZE should be greater than pa(_end) 52 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
53 * and small than max_low_pfn, otherwise will waste some page table entries 53 * and small than max_low_pfn, otherwise will waste some page table entries
54 */ 54 */
55LOW_PAGES = (KERNEL_IMAGE_SIZE + PAGE_SIZE_asm - 1)>>PAGE_SHIFT
56 55
57#if PTRS_PER_PMD > 1 56#if PTRS_PER_PMD > 1
58PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD 57#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
59#else 58#else
60PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD) 59#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
61#endif 60#endif
62ALLOCATOR_SLOP = 4 61ALLOCATOR_SLOP = 4
63 62
64INIT_MAP_SIZE = (PAGE_TABLE_SIZE + ALLOCATOR_SLOP) * PAGE_SIZE_asm 63/* Enough space to fit pagetables for the low memory linear map */
64MAPPING_BEYOND_END = (PAGE_TABLE_SIZE(1 << (32 - PAGE_SHIFT)) * PAGE_SIZE)
65
66/*
67 * Worst-case size of the kernel mapping we need to make:
68 * the worst-case size of the kernel itself, plus the extra we need
69 * to map for the linear map.
70 */
71KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT
72
73INIT_MAP_SIZE = (PAGE_TABLE_SIZE(KERNEL_PAGES) + ALLOCATOR_SLOP) * PAGE_SIZE_asm
65RESERVE_BRK(pagetables, INIT_MAP_SIZE) 74RESERVE_BRK(pagetables, INIT_MAP_SIZE)
66 75
67/* 76/*
@@ -197,9 +206,9 @@ default_entry:
197 loop 11b 206 loop 11b
198 207
199 /* 208 /*
200 * End condition: we must map up to the end. 209 * End condition: we must map up to the end + MAPPING_BEYOND_END.
201 */ 210 */
202 movl $pa(_end) + PTE_IDENT_ATTR, %ebp 211 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
203 cmpl %ebp,%eax 212 cmpl %ebp,%eax
204 jb 10b 213 jb 10b
2051: 2141:
@@ -229,9 +238,9 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
229 addl $0x1000,%eax 238 addl $0x1000,%eax
230 loop 11b 239 loop 11b
231 /* 240 /*
232 * End condition: we must map up to end 241 * End condition: we must map up to the end + MAPPING_BEYOND_END.
233 */ 242 */
234 movl $pa(_end) + PTE_IDENT_ATTR, %ebp 243 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
235 cmpl %ebp,%eax 244 cmpl %ebp,%eax
236 jb 10b 245 jb 10b
237 addl $__PAGE_OFFSET, %edi 246 addl $__PAGE_OFFSET, %edi