diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-06-13 00:27:08 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-06-12 08:57:08 -0400 |
commit | 92b4d8df8436cdd74d22a2a5b6b23b9abc737a3e (patch) | |
tree | bba0dce08d1fde432f370b7ef2845e0ae195a34d /drivers | |
parent | acdd0b6292b282c4511897ac2691a47befbf1c6a (diff) |
lguest: PAE fixes
1) j wasn't initialized in setup_pagetables, so they weren't set up for me
causing immediate guest crashes.
2) gpte_addr should not re-read the pmd from the Guest. Especially
not BUG_ON() based on the value. If we ever supported SMP guests,
they could trigger that. And the Launcher could also trigger it
(tho currently root-only).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/lguest/page_tables.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 5e2c26adcf06..a6fe1abda240 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -154,26 +154,25 @@ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) | |||
154 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | 154 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); |
155 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); | 155 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); |
156 | } | 156 | } |
157 | #endif | ||
158 | 157 | ||
159 | static unsigned long gpte_addr(struct lg_cpu *cpu, | 158 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
160 | pgd_t gpgd, unsigned long vaddr) | 159 | pmd_t gpmd, unsigned long vaddr) |
161 | { | 160 | { |
162 | #ifdef CONFIG_X86_PAE | 161 | unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; |
163 | pmd_t gpmd; | ||
164 | #endif | ||
165 | unsigned long gpage; | ||
166 | 162 | ||
167 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | ||
168 | #ifdef CONFIG_X86_PAE | ||
169 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); | ||
170 | gpage = pmd_pfn(gpmd) << PAGE_SHIFT; | ||
171 | BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); | 163 | BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); |
164 | return gpage + pte_index(vaddr) * sizeof(pte_t); | ||
165 | } | ||
172 | #else | 166 | #else |
173 | gpage = pgd_pfn(gpgd) << PAGE_SHIFT; | 167 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
174 | #endif | 168 | pgd_t gpgd, unsigned long vaddr) |
169 | { | ||
170 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; | ||
171 | |||
172 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | ||
175 | return gpage + pte_index(vaddr) * sizeof(pte_t); | 173 | return gpage + pte_index(vaddr) * sizeof(pte_t); |
176 | } | 174 | } |
175 | #endif | ||
177 | /*:*/ | 176 | /*:*/ |
178 | 177 | ||
179 | /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as | 178 | /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as |
@@ -339,10 +338,15 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
339 | * number in the shadow PMD is the page we just allocated. */ | 338 | * number in the shadow PMD is the page we just allocated. */ |
340 | native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); | 339 | native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); |
341 | } | 340 | } |
342 | #endif | 341 | |
342 | /* OK, now we look at the lower level in the Guest page table: keep its | ||
343 | * address, because we might update it later. */ | ||
344 | gpte_ptr = gpte_addr(cpu, gpmd, vaddr); | ||
345 | #else | ||
343 | /* OK, now we look at the lower level in the Guest page table: keep its | 346 | /* OK, now we look at the lower level in the Guest page table: keep its |
344 | * address, because we might update it later. */ | 347 | * address, because we might update it later. */ |
345 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); | 348 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); |
349 | #endif | ||
346 | gpte = lgread(cpu, gpte_ptr, pte_t); | 350 | gpte = lgread(cpu, gpte_ptr, pte_t); |
347 | 351 | ||
348 | /* If this page isn't in the Guest page tables, we can't page it in. */ | 352 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
@@ -522,7 +526,6 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) | |||
522 | { | 526 | { |
523 | pgd_t gpgd; | 527 | pgd_t gpgd; |
524 | pte_t gpte; | 528 | pte_t gpte; |
525 | |||
526 | #ifdef CONFIG_X86_PAE | 529 | #ifdef CONFIG_X86_PAE |
527 | pmd_t gpmd; | 530 | pmd_t gpmd; |
528 | #endif | 531 | #endif |
@@ -534,13 +537,14 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) | |||
534 | return -1UL; | 537 | return -1UL; |
535 | } | 538 | } |
536 | 539 | ||
537 | gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); | ||
538 | #ifdef CONFIG_X86_PAE | 540 | #ifdef CONFIG_X86_PAE |
539 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); | 541 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); |
540 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) | 542 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) |
541 | kill_guest(cpu, "Bad address %#lx", vaddr); | 543 | kill_guest(cpu, "Bad address %#lx", vaddr); |
542 | #endif | 544 | gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); |
545 | #else | ||
543 | gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); | 546 | gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); |
547 | #endif | ||
544 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) | 548 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
545 | kill_guest(cpu, "Bad address %#lx", vaddr); | 549 | kill_guest(cpu, "Bad address %#lx", vaddr); |
546 | 550 | ||
@@ -847,7 +851,7 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
847 | /* The top level points to the linear page table pages above. | 851 | /* The top level points to the linear page table pages above. |
848 | * We setup the identity and linear mappings here. */ | 852 | * We setup the identity and linear mappings here. */ |
849 | #ifdef CONFIG_X86_PAE | 853 | #ifdef CONFIG_X86_PAE |
850 | for (i = 0, j; i < mapped_pages && j < PTRS_PER_PMD; | 854 | for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; |
851 | i += PTRS_PER_PTE, j++) { | 855 | i += PTRS_PER_PTE, j++) { |
852 | native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) | 856 | native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) |
853 | - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | 857 | - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); |