aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/page_tables.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r--drivers/lguest/page_tables.c75
1 files changed, 59 insertions, 16 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index e8b55c3a6170..e3abebc912c0 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -250,6 +250,16 @@ static void release_pte(pte_t pte)
250} 250}
251/*:*/ 251/*:*/
252 252
253static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
254{
255 /* We don't handle large pages. */
256 if (pte_flags(gpte) & _PAGE_PSE)
257 return false;
258
259 return (pte_pfn(gpte) >= cpu->lg->pfn_limit
260 && pte_pfn(gpte) < cpu->lg->device_limit);
261}
262
253static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) 263static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
254{ 264{
255 if ((pte_flags(gpte) & _PAGE_PSE) || 265 if ((pte_flags(gpte) & _PAGE_PSE) ||
@@ -374,8 +384,14 @@ static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
374 * 384 *
375 * If we fixed up the fault (ie. we mapped the address), this routine returns 385 * If we fixed up the fault (ie. we mapped the address), this routine returns
376 * true. Otherwise, it was a real fault and we need to tell the Guest. 386 * true. Otherwise, it was a real fault and we need to tell the Guest.
387 *
388 * There's a corner case: they're trying to access memory between
389 * pfn_limit and device_limit, which is I/O memory. In this case, we
390 * return false and set @iomem to the physical address, so the the
391 * Launcher can handle the instruction manually.
377 */ 392 */
378bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 393bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
394 unsigned long *iomem)
379{ 395{
380 unsigned long gpte_ptr; 396 unsigned long gpte_ptr;
381 pte_t gpte; 397 pte_t gpte;
@@ -383,6 +399,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
383 pmd_t gpmd; 399 pmd_t gpmd;
384 pgd_t gpgd; 400 pgd_t gpgd;
385 401
402 *iomem = 0;
403
386 /* We never demand page the Switcher, so trying is a mistake. */ 404 /* We never demand page the Switcher, so trying is a mistake. */
387 if (vaddr >= switcher_addr) 405 if (vaddr >= switcher_addr)
388 return false; 406 return false;
@@ -459,6 +477,12 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
459 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 477 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
460 return false; 478 return false;
461 479
480 /* If they're accessing io memory, we expect a fault. */
481 if (gpte_in_iomem(cpu, gpte)) {
482 *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
483 return false;
484 }
485
462 /* 486 /*
463 * Check that the Guest PTE flags are OK, and the page number is below 487 * Check that the Guest PTE flags are OK, and the page number is below
464 * the pfn_limit (ie. not mapping the Launcher binary). 488 * the pfn_limit (ie. not mapping the Launcher binary).
@@ -553,7 +577,9 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
553 */ 577 */
554void pin_page(struct lg_cpu *cpu, unsigned long vaddr) 578void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
555{ 579{
556 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) 580 unsigned long iomem;
581
582 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
557 kill_guest(cpu, "bad stack page %#lx", vaddr); 583 kill_guest(cpu, "bad stack page %#lx", vaddr);
558} 584}
559/*:*/ 585/*:*/
@@ -647,7 +673,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu)
647/*:*/ 673/*:*/
648 674
649/* We walk down the guest page tables to get a guest-physical address */ 675/* We walk down the guest page tables to get a guest-physical address */
650unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) 676bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
651{ 677{
652 pgd_t gpgd; 678 pgd_t gpgd;
653 pte_t gpte; 679 pte_t gpte;
@@ -656,31 +682,47 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
656#endif 682#endif
657 683
658 /* Still not set up? Just map 1:1. */ 684 /* Still not set up? Just map 1:1. */
659 if (unlikely(cpu->linear_pages)) 685 if (unlikely(cpu->linear_pages)) {
660 return vaddr; 686 *paddr = vaddr;
687 return true;
688 }
661 689
662 /* First step: get the top-level Guest page table entry. */ 690 /* First step: get the top-level Guest page table entry. */
663 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 691 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
664 /* Toplevel not present? We can't map it in. */ 692 /* Toplevel not present? We can't map it in. */
665 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { 693 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
666 kill_guest(cpu, "Bad address %#lx", vaddr); 694 goto fail;
667 return -1UL;
668 }
669 695
670#ifdef CONFIG_X86_PAE 696#ifdef CONFIG_X86_PAE
671 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); 697 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
672 if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) { 698 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
673 kill_guest(cpu, "Bad address %#lx", vaddr); 699 goto fail;
674 return -1UL;
675 }
676 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); 700 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
677#else 701#else
678 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); 702 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
679#endif 703#endif
680 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 704 if (!(pte_flags(gpte) & _PAGE_PRESENT))
681 kill_guest(cpu, "Bad address %#lx", vaddr); 705 goto fail;
706
707 *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
708 return true;
709
710fail:
711 *paddr = -1UL;
712 return false;
713}
682 714
683 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 715/*
716 * This is the version we normally use: kills the Guest if it uses a
717 * bad address
718 */
719unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
720{
721 unsigned long paddr;
722
723 if (!__guest_pa(cpu, vaddr, &paddr))
724 kill_guest(cpu, "Bad address %#lx", vaddr);
725 return paddr;
684} 726}
685 727
686/* 728/*
@@ -912,7 +954,8 @@ static void __guest_set_pte(struct lg_cpu *cpu, int idx,
912 * now. This shaves 10% off a copy-on-write 954 * now. This shaves 10% off a copy-on-write
913 * micro-benchmark. 955 * micro-benchmark.
914 */ 956 */
915 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 957 if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
958 && !gpte_in_iomem(cpu, gpte)) {
916 if (!check_gpte(cpu, gpte)) 959 if (!check_gpte(cpu, gpte))
917 return; 960 return;
918 set_pte(spte, 961 set_pte(spte,