aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/page_tables.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-04-03 17:33:42 -0400
committerDavid S. Miller <davem@davemloft.net>2008-04-03 17:33:42 -0400
commit3bb5da3837cc1aa17736b05139c9a22c3794851a (patch)
treec92d5684a866542b1cb20641607ac1643ce03a47 /drivers/lguest/page_tables.c
parent7feb49c82a74bc7c091b8ab2a3f96baa33d08ece (diff)
parent9597362d354f8655ece324b01d0c640a0e99c077 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r--drivers/lguest/page_tables.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index a7f64a9d67e0..d93500f24fbb 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -2,8 +2,8 @@
2 * previous encounters. It's functional, and as neat as it can be in the 2 * previous encounters. It's functional, and as neat as it can be in the
3 * circumstances, but be wary, for these things are subtle and break easily. 3 * circumstances, but be wary, for these things are subtle and break easily.
4 * The Guest provides a virtual to physical mapping, but we can neither trust 4 * The Guest provides a virtual to physical mapping, but we can neither trust
5 * it nor use it: we verify and convert it here to point the hardware to the 5 * it nor use it: we verify and convert it here then point the CPU to the
6 * actual Guest pages when running the Guest. :*/ 6 * converted Guest pages when running the Guest. :*/
7 7
8/* Copyright (C) Rusty Russell IBM Corporation 2006. 8/* Copyright (C) Rusty Russell IBM Corporation 2006.
9 * GPL v2 and any later version */ 9 * GPL v2 and any later version */
@@ -106,6 +106,11 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
106 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); 106 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
107 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); 107 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
108} 108}
109/*:*/
110
111/*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We
112 * could probably try to grab batches of pages here as an optimization
113 * (ie. pre-faulting). :*/
109 114
110/*H:350 This routine takes a page number given by the Guest and converts it to 115/*H:350 This routine takes a page number given by the Guest and converts it to
111 * an actual, physical page number. It can fail for several reasons: the 116 * an actual, physical page number. It can fail for several reasons: the
@@ -113,8 +118,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
113 * and the page is read-only, or the write flag was set and the page was 118 * and the page is read-only, or the write flag was set and the page was
114 * shared so had to be copied, but we ran out of memory. 119 * shared so had to be copied, but we ran out of memory.
115 * 120 *
116 * This holds a reference to the page, so release_pte() is careful to 121 * This holds a reference to the page, so release_pte() is careful to put that
117 * put that back. */ 122 * back. */
118static unsigned long get_pfn(unsigned long virtpfn, int write) 123static unsigned long get_pfn(unsigned long virtpfn, int write)
119{ 124{
120 struct page *page; 125 struct page *page;
@@ -532,13 +537,13 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
532 * all processes. So when the page table above that address changes, we update 537 * all processes. So when the page table above that address changes, we update
533 * all the page tables, not just the current one. This is rare. 538 * all the page tables, not just the current one. This is rare.
534 * 539 *
535 * The benefit is that when we have to track a new page table, we can copy keep 540 * The benefit is that when we have to track a new page table, we can keep all
536 * all the kernel mappings. This speeds up context switch immensely. */ 541 * the kernel mappings. This speeds up context switch immensely. */
537void guest_set_pte(struct lg_cpu *cpu, 542void guest_set_pte(struct lg_cpu *cpu,
538 unsigned long gpgdir, unsigned long vaddr, pte_t gpte) 543 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
539{ 544{
540 /* Kernel mappings must be changed on all top levels. Slow, but 545 /* Kernel mappings must be changed on all top levels. Slow, but doesn't
541 * doesn't happen often. */ 546 * happen often. */
542 if (vaddr >= cpu->lg->kernel_address) { 547 if (vaddr >= cpu->lg->kernel_address) {
543 unsigned int i; 548 unsigned int i;
544 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) 549 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
@@ -704,12 +709,11 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
704/* We've made it through the page table code. Perhaps our tired brains are 709/* We've made it through the page table code. Perhaps our tired brains are
705 * still processing the details, or perhaps we're simply glad it's over. 710 * still processing the details, or perhaps we're simply glad it's over.
706 * 711 *
707 * If nothing else, note that all this complexity in juggling shadow page 712 * If nothing else, note that all this complexity in juggling shadow page tables
708 * tables in sync with the Guest's page tables is for one reason: for most 713 * in sync with the Guest's page tables is for one reason: for most Guests this
709 * Guests this page table dance determines how bad performance will be. This 714 * page table dance determines how bad performance will be. This is why Xen
710 * is why Xen uses exotic direct Guest pagetable manipulation, and why both 715 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
711 * Intel and AMD have implemented shadow page table support directly into 716 * have implemented shadow page table support directly into hardware.
712 * hardware.
713 * 717 *
714 * There is just one file remaining in the Host. */ 718 * There is just one file remaining in the Host. */
715 719