diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2010-12-16 18:03:15 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2010-12-16 01:33:15 -0500 |
commit | da32dac101263fb5b155407507c548e3ac2a6a2a (patch) | |
tree | fdb5fa8e874fdbe59878481b128b026719045523 | |
parent | bb4093deb259ea9c92415796a6a139e35272f8a8 (diff) |
lguest: populate initial_page_table
Two x86 patches broke lguest:
1) v2.6.35-492-g72d7c3b, which changed x86 to use the memblock allocator.
In lguest, the host places linear page tables at the top of mem, which
used to be enough to get us up to the swapper_pg_dir page tables. With
the first patch, the direct mapping tables used that memory:
Before: kernel direct mapping tables up to 4000000 @ 7000-1a000
After: kernel direct mapping tables up to 4000000 @ 3fed000-4000000
I initially fixed this by lying about the amount of memory we had, so
the kernel wouldn't blatt the lguest boot pagetables (yuk!), but then...
2) v2.6.36-rc8-54-gb40827f, which made x86 boot use initial_page_table.
This was initialized in a part of head_32.S which isn't executed by
lguest; it is then copied into swapper_pg_dir. So we have to initialize
it; and anyway we switch to it before we blatt the old tables, so that
fixes the previous damage as well.
For the moment, I cut & pasted the code into lguest's boot code, but
next merge window I will merge them.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: x86@kernel.org
-rw-r--r-- | arch/x86/kernel/head_32.S | 4 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 3 | ||||
-rw-r--r-- | arch/x86/lguest/i386_head.S | 105 |
3 files changed, 107 insertions, 5 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index bcece91dd311..f0bea76f6ea5 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -620,13 +620,13 @@ ENTRY(initial_code) | |||
620 | __PAGE_ALIGNED_BSS | 620 | __PAGE_ALIGNED_BSS |
621 | .align PAGE_SIZE_asm | 621 | .align PAGE_SIZE_asm |
622 | #ifdef CONFIG_X86_PAE | 622 | #ifdef CONFIG_X86_PAE |
623 | initial_pg_pmd: | 623 | ENTRY(initial_pg_pmd) |
624 | .fill 1024*KPMDS,4,0 | 624 | .fill 1024*KPMDS,4,0 |
625 | #else | 625 | #else |
626 | ENTRY(initial_page_table) | 626 | ENTRY(initial_page_table) |
627 | .fill 1024,4,0 | 627 | .fill 1024,4,0 |
628 | #endif | 628 | #endif |
629 | initial_pg_fixmap: | 629 | ENTRY(initial_pg_fixmap) |
630 | .fill 1024,4,0 | 630 | .fill 1024,4,0 |
631 | ENTRY(empty_zero_page) | 631 | ENTRY(empty_zero_page) |
632 | .fill 4096,1,0 | 632 | .fill 4096,1,0 |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 24e49737df7a..4996cf5f73a0 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -1352,9 +1352,6 @@ __init void lguest_init(void) | |||
1352 | */ | 1352 | */ |
1353 | switch_to_new_gdt(0); | 1353 | switch_to_new_gdt(0); |
1354 | 1354 | ||
1355 | /* We actually boot with all memory mapped, but let's say 128MB. */ | ||
1356 | max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; | ||
1357 | |||
1358 | /* | 1355 | /* |
1359 | * The Host<->Guest Switcher lives at the top of our address space, and | 1356 | * The Host<->Guest Switcher lives at the top of our address space, and |
1360 | * the Host told us how big it is when we made LGUEST_INIT hypercall: | 1357 | * the Host told us how big it is when we made LGUEST_INIT hypercall: |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 4f420c2f2d55..e7d5382ef263 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <asm/asm-offsets.h> | 4 | #include <asm/asm-offsets.h> |
5 | #include <asm/thread_info.h> | 5 | #include <asm/thread_info.h> |
6 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
7 | #include <asm/pgtable.h> | ||
7 | 8 | ||
8 | /*G:020 | 9 | /*G:020 |
9 | * Our story starts with the kernel booting into startup_32 in | 10 | * Our story starts with the kernel booting into startup_32 in |
@@ -37,9 +38,113 @@ ENTRY(lguest_entry) | |||
37 | /* Set up the initial stack so we can run C code. */ | 38 | /* Set up the initial stack so we can run C code. */ |
38 | movl $(init_thread_union+THREAD_SIZE),%esp | 39 | movl $(init_thread_union+THREAD_SIZE),%esp |
39 | 40 | ||
41 | call init_pagetables | ||
42 | |||
40 | /* Jumps are relative: we're running __PAGE_OFFSET too low. */ | 43 | /* Jumps are relative: we're running __PAGE_OFFSET too low. */ |
41 | jmp lguest_init+__PAGE_OFFSET | 44 | jmp lguest_init+__PAGE_OFFSET |
42 | 45 | ||
46 | /* | ||
47 | * Initialize page tables. This creates a PDE and a set of page | ||
48 | * tables, which are located immediately beyond __brk_base. The variable | ||
49 | * _brk_end is set up to point to the first "safe" location. | ||
50 | * Mappings are created both at virtual address 0 (identity mapping) | ||
51 | * and PAGE_OFFSET for up to _end. | ||
52 | * | ||
53 | * FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they | ||
54 | * don't have a stack at this point, so we can't just use call and ret. | ||
55 | */ | ||
56 | init_pagetables: | ||
57 | #if PTRS_PER_PMD > 1 | ||
58 | #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) | ||
59 | #else | ||
60 | #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) | ||
61 | #endif | ||
62 | #define pa(X) ((X) - __PAGE_OFFSET) | ||
63 | |||
64 | /* Enough space to fit pagetables for the low memory linear map */ | ||
65 | MAPPING_BEYOND_END = \ | ||
66 | PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT | ||
67 | #ifdef CONFIG_X86_PAE | ||
68 | |||
69 | /* | ||
70 | * In PAE mode initial_page_table is statically defined to contain | ||
71 | * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 | ||
72 | * entries). The identity mapping is handled by pointing two PGD entries | ||
73 | * to the first kernel PMD. | ||
74 | * | ||
75 | * Note the upper half of each PMD or PTE are always zero at this stage. | ||
76 | */ | ||
77 | |||
78 | #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ | ||
79 | |||
80 | xorl %ebx,%ebx /* %ebx is kept at zero */ | ||
81 | |||
82 | movl $pa(__brk_base), %edi | ||
83 | movl $pa(initial_pg_pmd), %edx | ||
84 | movl $PTE_IDENT_ATTR, %eax | ||
85 | 10: | ||
86 | leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ | ||
87 | movl %ecx,(%edx) /* Store PMD entry */ | ||
88 | /* Upper half already zero */ | ||
89 | addl $8,%edx | ||
90 | movl $512,%ecx | ||
91 | 11: | ||
92 | stosl | ||
93 | xchgl %eax,%ebx | ||
94 | stosl | ||
95 | xchgl %eax,%ebx | ||
96 | addl $0x1000,%eax | ||
97 | loop 11b | ||
98 | |||
99 | /* | ||
100 | * End condition: we must map up to the end + MAPPING_BEYOND_END. | ||
101 | */ | ||
102 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp | ||
103 | cmpl %ebp,%eax | ||
104 | jb 10b | ||
105 | 1: | ||
106 | addl $__PAGE_OFFSET, %edi | ||
107 | movl %edi, pa(_brk_end) | ||
108 | shrl $12, %eax | ||
109 | movl %eax, pa(max_pfn_mapped) | ||
110 | |||
111 | /* Do early initialization of the fixmap area */ | ||
112 | movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax | ||
113 | movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) | ||
114 | #else /* Not PAE */ | ||
115 | |||
116 | page_pde_offset = (__PAGE_OFFSET >> 20); | ||
117 | |||
118 | movl $pa(__brk_base), %edi | ||
119 | movl $pa(initial_page_table), %edx | ||
120 | movl $PTE_IDENT_ATTR, %eax | ||
121 | 10: | ||
122 | leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ | ||
123 | movl %ecx,(%edx) /* Store identity PDE entry */ | ||
124 | movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ | ||
125 | addl $4,%edx | ||
126 | movl $1024, %ecx | ||
127 | 11: | ||
128 | stosl | ||
129 | addl $0x1000,%eax | ||
130 | loop 11b | ||
131 | /* | ||
132 | * End condition: we must map up to the end + MAPPING_BEYOND_END. | ||
133 | */ | ||
134 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp | ||
135 | cmpl %ebp,%eax | ||
136 | jb 10b | ||
137 | addl $__PAGE_OFFSET, %edi | ||
138 | movl %edi, pa(_brk_end) | ||
139 | shrl $12, %eax | ||
140 | movl %eax, pa(max_pfn_mapped) | ||
141 | |||
142 | /* Do early initialization of the fixmap area */ | ||
143 | movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax | ||
144 | movl %eax,pa(initial_page_table+0xffc) | ||
145 | #endif | ||
146 | ret | ||
147 | |||
43 | /*G:055 | 148 | /*G:055 |
44 | * We create a macro which puts the assembler code between lgstart_ and lgend_ | 149 | * We create a macro which puts the assembler code between lgstart_ and lgend_ |
45 | * markers. These templates are put in the .text section: they can't be | 150 | * markers. These templates are put in the .text section: they can't be |