aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatias Zabaljauregui <zabaljauregui@gmail.com>2009-06-13 00:27:06 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-06-12 08:57:06 -0400
commit90603d15fa95605d1d08235b73e220d766f04bb0 (patch)
tree669b5a41ebdb368e578898409d8a48021074746a
parented1dc77810159a733240ba6751c1b31023bf8dd7 (diff)
lguest: use native_set_* macros, which properly handle 64-bit entries when PAE is activated
Some cleanups and replace direct assignment with native_set_* macros which properly handle 64-bit entries when PAE is activated Signed-off-by: Matias Zabaljauregui <zabaljauregui@gmail.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--arch/x86/lguest/boot.c8
-rw-r--r--drivers/lguest/page_tables.c35
2 files changed, 22 insertions, 21 deletions
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 514f4d0d2bf..4f311e40d0a 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -525,7 +525,7 @@ static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
525static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, 525static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
526 pte_t *ptep, pte_t pteval) 526 pte_t *ptep, pte_t pteval)
527{ 527{
528 *ptep = pteval; 528 native_set_pte(ptep, pteval);
529 lguest_pte_update(mm, addr, ptep); 529 lguest_pte_update(mm, addr, ptep);
530} 530}
531 531
@@ -534,9 +534,9 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
534 * changed. */ 534 * changed. */
535static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) 535static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
536{ 536{
537 *pmdp = pmdval; 537 native_set_pmd(pmdp, pmdval);
538 lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, 538 lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
539 (__pa(pmdp) & (PAGE_SIZE - 1)) / 4); 539 (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
540} 540}
541 541
542/* There are a couple of legacy places where the kernel sets a PTE, but we 542/* There are a couple of legacy places where the kernel sets a PTE, but we
@@ -550,7 +550,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
550 * which brings boot back to 0.25 seconds. */ 550 * which brings boot back to 0.25 seconds. */
551static void lguest_set_pte(pte_t *ptep, pte_t pteval) 551static void lguest_set_pte(pte_t *ptep, pte_t pteval)
552{ 552{
553 *ptep = pteval; 553 native_set_pte(ptep, pteval);
554 if (cr3_changed) 554 if (cr3_changed)
555 lazy_hcall1(LHCALL_FLUSH_TLB, 1); 555 lazy_hcall1(LHCALL_FLUSH_TLB, 1);
556} 556}
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 496995370fb..ffba723cd98 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -90,7 +90,7 @@ static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
90 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); 90 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
91 /* You should never call this if the PGD entry wasn't valid */ 91 /* You should never call this if the PGD entry wasn't valid */
92 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); 92 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
93 return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; 93 return &page[pte_index(vaddr)];
94} 94}
95 95
96/* These two functions just like the above two, except they access the Guest 96/* These two functions just like the above two, except they access the Guest
@@ -105,7 +105,7 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
105{ 105{
106 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; 106 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
107 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); 107 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
108 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); 108 return gpage + pte_index(vaddr) * sizeof(pte_t);
109} 109}
110/*:*/ 110/*:*/
111 111
@@ -171,7 +171,7 @@ static void release_pte(pte_t pte)
171 /* Remember that get_user_pages_fast() took a reference to the page, in 171 /* Remember that get_user_pages_fast() took a reference to the page, in
172 * get_pfn()? We have to put it back now. */ 172 * get_pfn()? We have to put it back now. */
173 if (pte_flags(pte) & _PAGE_PRESENT) 173 if (pte_flags(pte) & _PAGE_PRESENT)
174 put_page(pfn_to_page(pte_pfn(pte))); 174 put_page(pte_page(pte));
175} 175}
176/*:*/ 176/*:*/
177 177
@@ -273,7 +273,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
273 * table entry, even if the Guest says it's writable. That way 273 * table entry, even if the Guest says it's writable. That way
274 * we will come back here when a write does actually occur, so 274 * we will come back here when a write does actually occur, so
275 * we can update the Guest's _PAGE_DIRTY flag. */ 275 * we can update the Guest's _PAGE_DIRTY flag. */
276 *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); 276 native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
277 277
278 /* Finally, we write the Guest PTE entry back: we've set the 278 /* Finally, we write the Guest PTE entry back: we've set the
279 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ 279 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
@@ -323,7 +323,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
323} 323}
324 324
325/*H:450 If we chase down the release_pgd() code, it looks like this: */ 325/*H:450 If we chase down the release_pgd() code, it looks like this: */
326static void release_pgd(struct lguest *lg, pgd_t *spgd) 326static void release_pgd(pgd_t *spgd)
327{ 327{
328 /* If the entry's not present, there's nothing to release. */ 328 /* If the entry's not present, there's nothing to release. */
329 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 329 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
@@ -350,7 +350,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
350 unsigned int i; 350 unsigned int i;
351 /* Release every pgd entry up to the kernel's address. */ 351 /* Release every pgd entry up to the kernel's address. */
352 for (i = 0; i < pgd_index(lg->kernel_address); i++) 352 for (i = 0; i < pgd_index(lg->kernel_address); i++)
353 release_pgd(lg, lg->pgdirs[idx].pgdir + i); 353 release_pgd(lg->pgdirs[idx].pgdir + i);
354} 354}
355 355
356/*H:440 (v) Flushing (throwing away) page tables, 356/*H:440 (v) Flushing (throwing away) page tables,
@@ -431,7 +431,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
431 431
432/*H:430 (iv) Switching page tables 432/*H:430 (iv) Switching page tables
433 * 433 *
434 * Now we've seen all the page table setting and manipulation, let's see what 434 * Now we've seen all the page table setting and manipulation, let's see
435 * what happens when the Guest changes page tables (ie. changes the top-level 435 * what happens when the Guest changes page tables (ie. changes the top-level
436 * pgdir). This occurs on almost every context switch. */ 436 * pgdir). This occurs on almost every context switch. */
437void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) 437void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
@@ -463,7 +463,7 @@ static void release_all_pagetables(struct lguest *lg)
463 if (lg->pgdirs[i].pgdir) 463 if (lg->pgdirs[i].pgdir)
464 /* Every PGD entry except the Switcher at the top */ 464 /* Every PGD entry except the Switcher at the top */
465 for (j = 0; j < SWITCHER_PGD_INDEX; j++) 465 for (j = 0; j < SWITCHER_PGD_INDEX; j++)
466 release_pgd(lg, lg->pgdirs[i].pgdir + j); 466 release_pgd(lg->pgdirs[i].pgdir + j);
467} 467}
468 468
469/* We also throw away everything when a Guest tells us it's changed a kernel 469/* We also throw away everything when a Guest tells us it's changed a kernel
@@ -581,7 +581,7 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
581 pgdir = find_pgdir(lg, gpgdir); 581 pgdir = find_pgdir(lg, gpgdir);
582 if (pgdir < ARRAY_SIZE(lg->pgdirs)) 582 if (pgdir < ARRAY_SIZE(lg->pgdirs))
583 /* ... throw it away. */ 583 /* ... throw it away. */
584 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); 584 release_pgd(lg->pgdirs[pgdir].pgdir + idx);
585} 585}
586 586
587/* Once we know how much memory we have we can construct simple identity 587/* Once we know how much memory we have we can construct simple identity
@@ -726,8 +726,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
726 * page is already mapped there, we don't have to copy them out 726 * page is already mapped there, we don't have to copy them out
727 * again. */ 727 * again. */
728 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; 728 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
729 regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); 729 native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
730 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; 730 native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
731 regs_pte);
731} 732}
732/*:*/ 733/*:*/
733 734
@@ -752,21 +753,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
752 753
753 /* The first entries are easy: they map the Switcher code. */ 754 /* The first entries are easy: they map the Switcher code. */
754 for (i = 0; i < pages; i++) { 755 for (i = 0; i < pages; i++) {
755 pte[i] = mk_pte(switcher_page[i], 756 native_set_pte(&pte[i], mk_pte(switcher_page[i],
756 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 757 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
757 } 758 }
758 759
759 /* The only other thing we map is this CPU's pair of pages. */ 760 /* The only other thing we map is this CPU's pair of pages. */
760 i = pages + cpu*2; 761 i = pages + cpu*2;
761 762
762 /* First page (Guest registers) is writable from the Guest */ 763 /* First page (Guest registers) is writable from the Guest */
763 pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), 764 native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
764 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); 765 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
765 766
766 /* The second page contains the "struct lguest_ro_state", and is 767 /* The second page contains the "struct lguest_ro_state", and is
767 * read-only. */ 768 * read-only. */
768 pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), 769 native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
769 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 770 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
770} 771}
771 772
772/* We've made it through the page table code. Perhaps our tired brains are 773/* We've made it through the page table code. Perhaps our tired brains are