diff options
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r-- | drivers/lguest/page_tables.c | 32 |
1 files changed, 18 insertions, 14 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index a7f64a9d67e0..d93500f24fbb 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -2,8 +2,8 @@ | |||
2 | * previous encounters. It's functional, and as neat as it can be in the | 2 | * previous encounters. It's functional, and as neat as it can be in the |
3 | * circumstances, but be wary, for these things are subtle and break easily. | 3 | * circumstances, but be wary, for these things are subtle and break easily. |
4 | * The Guest provides a virtual to physical mapping, but we can neither trust | 4 | * The Guest provides a virtual to physical mapping, but we can neither trust |
5 | * it nor use it: we verify and convert it here to point the hardware to the | 5 | * it nor use it: we verify and convert it here then point the CPU to the |
6 | * actual Guest pages when running the Guest. :*/ | 6 | * converted Guest pages when running the Guest. :*/ |
7 | 7 | ||
8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. | 8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. |
9 | * GPL v2 and any later version */ | 9 | * GPL v2 and any later version */ |
@@ -106,6 +106,11 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) | |||
106 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | 106 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); |
107 | return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); | 107 | return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); |
108 | } | 108 | } |
109 | /*:*/ | ||
110 | |||
111 | /*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We | ||
112 | * could probably try to grab batches of pages here as an optimization | ||
113 | * (ie. pre-faulting). :*/ | ||
109 | 114 | ||
110 | /*H:350 This routine takes a page number given by the Guest and converts it to | 115 | /*H:350 This routine takes a page number given by the Guest and converts it to |
111 | * an actual, physical page number. It can fail for several reasons: the | 116 | * an actual, physical page number. It can fail for several reasons: the |
@@ -113,8 +118,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) | |||
113 | * and the page is read-only, or the write flag was set and the page was | 118 | * and the page is read-only, or the write flag was set and the page was |
114 | * shared so had to be copied, but we ran out of memory. | 119 | * shared so had to be copied, but we ran out of memory. |
115 | * | 120 | * |
116 | * This holds a reference to the page, so release_pte() is careful to | 121 | * This holds a reference to the page, so release_pte() is careful to put that |
117 | * put that back. */ | 122 | * back. */ |
118 | static unsigned long get_pfn(unsigned long virtpfn, int write) | 123 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
119 | { | 124 | { |
120 | struct page *page; | 125 | struct page *page; |
@@ -532,13 +537,13 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, | |||
532 | * all processes. So when the page table above that address changes, we update | 537 | * all processes. So when the page table above that address changes, we update |
533 | * all the page tables, not just the current one. This is rare. | 538 | * all the page tables, not just the current one. This is rare. |
534 | * | 539 | * |
535 | * The benefit is that when we have to track a new page table, we can copy keep | 540 | * The benefit is that when we have to track a new page table, we can keep all |
536 | * all the kernel mappings. This speeds up context switch immensely. */ | 541 | * the kernel mappings. This speeds up context switch immensely. */ |
537 | void guest_set_pte(struct lg_cpu *cpu, | 542 | void guest_set_pte(struct lg_cpu *cpu, |
538 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) | 543 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) |
539 | { | 544 | { |
540 | /* Kernel mappings must be changed on all top levels. Slow, but | 545 | /* Kernel mappings must be changed on all top levels. Slow, but doesn't |
541 | * doesn't happen often. */ | 546 | * happen often. */ |
542 | if (vaddr >= cpu->lg->kernel_address) { | 547 | if (vaddr >= cpu->lg->kernel_address) { |
543 | unsigned int i; | 548 | unsigned int i; |
544 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) | 549 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) |
@@ -704,12 +709,11 @@ static __init void populate_switcher_pte_page(unsigned int cpu, | |||
704 | /* We've made it through the page table code. Perhaps our tired brains are | 709 | /* We've made it through the page table code. Perhaps our tired brains are |
705 | * still processing the details, or perhaps we're simply glad it's over. | 710 | * still processing the details, or perhaps we're simply glad it's over. |
706 | * | 711 | * |
707 | * If nothing else, note that all this complexity in juggling shadow page | 712 | * If nothing else, note that all this complexity in juggling shadow page tables |
708 | * tables in sync with the Guest's page tables is for one reason: for most | 713 | * in sync with the Guest's page tables is for one reason: for most Guests this |
709 | * Guests this page table dance determines how bad performance will be. This | 714 | * page table dance determines how bad performance will be. This is why Xen |
710 | * is why Xen uses exotic direct Guest pagetable manipulation, and why both | 715 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD |
711 | * Intel and AMD have implemented shadow page table support directly into | 716 | * have implemented shadow page table support directly into hardware. |
712 | * hardware. | ||
713 | * | 717 | * |
714 | * There is just one file remaining in the Host. */ | 718 | * There is just one file remaining in the Host. */ |
715 | 719 | ||