diff options
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 31 |
1 files changed, 22 insertions, 9 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d4d52f5a1cf7..688936044dc9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
246 | { | 246 | { |
247 | unsigned long address = (unsigned long)vaddr; | 247 | unsigned long address = (unsigned long)vaddr; |
248 | unsigned int level; | 248 | unsigned int level; |
249 | pte_t *pte = lookup_address(address, &level); | 249 | pte_t *pte; |
250 | unsigned offset = address & ~PAGE_MASK; | 250 | unsigned offset; |
251 | 251 | ||
252 | BUG_ON(pte == NULL); | 252 | /* |
253 | * if the PFN is in the linear mapped vaddr range, we can just use | ||
254 | * the (quick) virt_to_machine() p2m lookup | ||
255 | */ | ||
256 | if (virt_addr_valid(vaddr)) | ||
257 | return virt_to_machine(vaddr); | ||
253 | 258 | ||
259 | /* otherwise we have to do a (slower) full page-table walk */ | ||
260 | |||
261 | pte = lookup_address(address, &level); | ||
262 | BUG_ON(pte == NULL); | ||
263 | offset = address & ~PAGE_MASK; | ||
254 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); | 264 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); |
255 | } | 265 | } |
256 | 266 | ||
@@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
410 | 420 | ||
411 | xen_mc_batch(); | 421 | xen_mc_batch(); |
412 | 422 | ||
413 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 423 | u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
414 | u.val = pte_val_ma(pte); | 424 | u.val = pte_val_ma(pte); |
415 | xen_extend_mmu_update(&u); | 425 | xen_extend_mmu_update(&u); |
416 | 426 | ||
@@ -840,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, | |||
840 | read-only, and can be pinned. */ | 850 | read-only, and can be pinned. */ |
841 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | 851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
842 | { | 852 | { |
853 | vm_unmap_aliases(); | ||
854 | |||
843 | xen_mc_batch(); | 855 | xen_mc_batch(); |
844 | 856 | ||
845 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { | 857 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { |
846 | /* re-enable interrupts for kmap_flush_unused */ | 858 | /* re-enable interrupts for flushing */ |
847 | xen_mc_issue(0); | 859 | xen_mc_issue(0); |
860 | |||
848 | kmap_flush_unused(); | 861 | kmap_flush_unused(); |
849 | vm_unmap_aliases(); | 862 | |
850 | xen_mc_batch(); | 863 | xen_mc_batch(); |
851 | } | 864 | } |
852 | 865 | ||
@@ -864,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | |||
864 | #else /* CONFIG_X86_32 */ | 877 | #else /* CONFIG_X86_32 */ |
865 | #ifdef CONFIG_X86_PAE | 878 | #ifdef CONFIG_X86_PAE |
866 | /* Need to make sure unshared kernel PMD is pinnable */ | 879 | /* Need to make sure unshared kernel PMD is pinnable */ |
867 | xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 880 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
868 | PT_PMD); | 881 | PT_PMD); |
869 | #endif | 882 | #endif |
870 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); | 883 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
@@ -981,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | |||
981 | 994 | ||
982 | #ifdef CONFIG_X86_PAE | 995 | #ifdef CONFIG_X86_PAE |
983 | /* Need to make sure unshared kernel PMD is unpinned */ | 996 | /* Need to make sure unshared kernel PMD is unpinned */ |
984 | xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 997 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
985 | PT_PMD); | 998 | PT_PMD); |
986 | #endif | 999 | #endif |
987 | 1000 | ||