diff options
author | Yinghai Lu <yinghai@kernel.org> | 2009-03-09 04:15:57 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-03-14 20:23:47 -0400 |
commit | 2bd2753ff46346543ab92e80df9d96366e21baa5 (patch) | |
tree | 4a83c0b4012abe33a95715d9890bf07f81a3d547 /arch | |
parent | 796216a57fe45c04adc35bda1f0782efec78a713 (diff) |
x86: put initial_pg_tables into .bss
Impact: makes vmlinux section information more useful
Don't use ram after _end blindly for pagetables. aka init pages is before _end
put those pg table into .bss
[Adapted to use brk segment - Jeremy]
v2: keep initial page table up to 512M only.
v4: put initial page tables just before _end
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/head_32.S | 43 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux_32.lds.S | 6 |
2 files changed, 20 insertions, 29 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 9e89f2a14b90..c79741cfb078 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -41,41 +41,28 @@ | |||
41 | * This is how much memory *in addition to the memory covered up to | 41 | * This is how much memory *in addition to the memory covered up to |
42 | * and including _end* we need mapped initially. | 42 | * and including _end* we need mapped initially. |
43 | * We need: | 43 | * We need: |
44 | * - one bit for each possible page, but only in low memory, which means | 44 | * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) |
45 | * 2^32/4096/8 = 128K worst case (4G/4G split.) | 45 | * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) |
46 | * - enough space to map all low memory, which means | ||
47 | * (2^32/4096) / 1024 pages (worst case, non PAE) | ||
48 | * (2^32/4096) / 512 + 4 pages (worst case for PAE) | ||
49 | * - a few pages for allocator use before the kernel pagetable has | ||
50 | * been set up | ||
51 | * | 46 | * |
52 | * Modulo rounding, each megabyte assigned here requires a kilobyte of | 47 | * Modulo rounding, each megabyte assigned here requires a kilobyte of |
53 | * memory, which is currently unreclaimed. | 48 | * memory, which is currently unreclaimed. |
54 | * | 49 | * |
55 | * This should be a multiple of a page. | 50 | * This should be a multiple of a page. |
51 | * | ||
52 | * KERNEL_IMAGE_SIZE should be greater than pa(_end) | ||
53 | * and small than max_low_pfn, otherwise will waste some page table entries | ||
56 | */ | 54 | */ |
57 | LOW_PAGES = (KERNEL_IMAGE_SIZE + PAGE_SIZE_asm - 1)>>PAGE_SHIFT | 55 | LOW_PAGES = (KERNEL_IMAGE_SIZE + PAGE_SIZE_asm - 1)>>PAGE_SHIFT |
58 | 56 | ||
59 | /* | ||
60 | * To preserve the DMA pool in PAGEALLOC kernels, we'll allocate | ||
61 | * pagetables from above the 16MB DMA limit, so we'll have to set | ||
62 | * up pagetables 16MB more (worst-case): | ||
63 | */ | ||
64 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
65 | LOW_PAGES = LOW_PAGES + 0x1000000 | ||
66 | #endif | ||
67 | |||
68 | #if PTRS_PER_PMD > 1 | 57 | #if PTRS_PER_PMD > 1 |
69 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD | 58 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD |
70 | #else | 59 | #else |
71 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD) | 60 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD) |
72 | #endif | 61 | #endif |
73 | BOOTBITMAP_SIZE = LOW_PAGES / 8 | ||
74 | ALLOCATOR_SLOP = 4 | 62 | ALLOCATOR_SLOP = 4 |
75 | 63 | ||
76 | INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_SIZE_asm | 64 | INIT_MAP_SIZE = (PAGE_TABLE_SIZE + ALLOCATOR_SLOP) * PAGE_SIZE_asm |
77 | 65 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) | |
78 | RESERVE_BRK(pagetables, PAGE_TABLE_SIZE * PAGE_SIZE) | ||
79 | 66 | ||
80 | /* | 67 | /* |
81 | * 32-bit kernel entrypoint; only used by the boot CPU. On entry, | 68 | * 32-bit kernel entrypoint; only used by the boot CPU. On entry, |
@@ -168,10 +155,10 @@ num_subarch_entries = (. - subarch_entries) / 4 | |||
168 | 155 | ||
169 | /* | 156 | /* |
170 | * Initialize page tables. This creates a PDE and a set of page | 157 | * Initialize page tables. This creates a PDE and a set of page |
171 | * tables, which are located immediately beyond _end. The variable | 158 | * tables, which are located immediately beyond __brk_base. The variable |
172 | * _brk_end is set up to point to the first "safe" location. | 159 | * _brk_end is set up to point to the first "safe" location. |
173 | * Mappings are created both at virtual address 0 (identity mapping) | 160 | * Mappings are created both at virtual address 0 (identity mapping) |
174 | * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. | 161 | * and PAGE_OFFSET for up to _end. |
175 | * | 162 | * |
176 | * Note that the stack is not yet set up! | 163 | * Note that the stack is not yet set up! |
177 | */ | 164 | */ |
@@ -210,10 +197,9 @@ default_entry: | |||
210 | loop 11b | 197 | loop 11b |
211 | 198 | ||
212 | /* | 199 | /* |
213 | * End condition: we must map up to and including INIT_MAP_BEYOND_END | 200 | * End condition: we must map up to the end. |
214 | * bytes beyond the end of our own page tables. | ||
215 | */ | 201 | */ |
216 | leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp | 202 | movl $pa(_end) + PTE_IDENT_ATTR, %ebp |
217 | cmpl %ebp,%eax | 203 | cmpl %ebp,%eax |
218 | jb 10b | 204 | jb 10b |
219 | 1: | 205 | 1: |
@@ -243,11 +229,9 @@ page_pde_offset = (__PAGE_OFFSET >> 20); | |||
243 | addl $0x1000,%eax | 229 | addl $0x1000,%eax |
244 | loop 11b | 230 | loop 11b |
245 | /* | 231 | /* |
246 | * End condition: we must map up to and including INIT_MAP_BEYOND_END | 232 | * End condition: we must map up to end |
247 | * bytes beyond the end of our own page tables; the +0x007 is | ||
248 | * the attribute bits | ||
249 | */ | 233 | */ |
250 | leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp | 234 | movl $pa(_end) + PTE_IDENT_ATTR, %ebp |
251 | cmpl %ebp,%eax | 235 | cmpl %ebp,%eax |
252 | jb 10b | 236 | jb 10b |
253 | addl $__PAGE_OFFSET, %edi | 237 | addl $__PAGE_OFFSET, %edi |
@@ -638,6 +622,7 @@ swapper_pg_fixmap: | |||
638 | .fill 1024,4,0 | 622 | .fill 1024,4,0 |
639 | ENTRY(empty_zero_page) | 623 | ENTRY(empty_zero_page) |
640 | .fill 4096,1,0 | 624 | .fill 4096,1,0 |
625 | |||
641 | /* | 626 | /* |
642 | * This starts the data section. | 627 | * This starts the data section. |
643 | */ | 628 | */ |
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index a1f28b85fb34..98424f33e077 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S | |||
@@ -210,6 +210,12 @@ SECTIONS | |||
210 | DWARF_DEBUG | 210 | DWARF_DEBUG |
211 | } | 211 | } |
212 | 212 | ||
213 | /* | ||
214 | * Build-time check on the image size: | ||
215 | */ | ||
216 | ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), | ||
217 | "kernel image bigger than KERNEL_IMAGE_SIZE") | ||
218 | |||
213 | #ifdef CONFIG_KEXEC | 219 | #ifdef CONFIG_KEXEC |
214 | /* Link time checks */ | 220 | /* Link time checks */ |
215 | #include <asm/kexec.h> | 221 | #include <asm/kexec.h> |