aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-12-01 18:13:34 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-05-20 17:14:31 -0400
commita99ac5e8619c27dbb8e7fb5a4e0ca8c8aa214909 (patch)
tree3ebc55308915871b7589e9a430b7eddc5d1df26e /arch/x86
parent331468b11b94428a9eb2ed8b3240c17612533a99 (diff)
xen: use mmu_update for xen_set_pte_at()
In principle update_va_mapping is a good match for set_pte_at, since it gets the address being mapped, which allows Xen to use its linear pagetable mapping. However that assumes that the pmd for the address is attached to the current pagetable, which may not be true for a given user address space because the kernel pmd is not shared (at least on 32-bit guests). Normally the kernel will automatically sync a missing part of the pagetable with the init_mm pagetable transparently via faults, but that fails when a missing address is passed to Xen. And while the linear pagetable mapping is very useful for 32-bit Xen (as it avoids an explicit domain mapping), 32-bit Xen is deprecated. 64-bit Xen has all memory mapped all the time, so it makes no real difference. The upshot is that we should use mmu_update, since it can operate on non-current pagetables or detached pagetables. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 4f5e0dc5f6e5..fb3e92e077e2 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -342,22 +342,18 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
342 ADD_STATS(set_pte_at_current, mm == current->mm); 342 ADD_STATS(set_pte_at_current, mm == current->mm);
343 ADD_STATS(set_pte_at_kernel, mm == &init_mm); 343 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
344 344
345 if (mm == current->mm || mm == &init_mm) { 345 if(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
346 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 346 struct mmu_update u;
347 struct multicall_space mcs; 347
348 mcs = xen_mc_entry(0); 348 xen_mc_batch();
349 349
350 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); 350 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
351 ADD_STATS(set_pte_at_batched, 1); 351 u.val = pte_val_ma(pteval);
352 xen_mc_issue(PARAVIRT_LAZY_MMU); 352 xen_extend_mmu_update(&u);
353 goto out;
354 } else
355 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
356 goto out;
357 }
358 xen_set_pte(ptep, pteval);
359 353
360out: return; 354 xen_mc_issue(PARAVIRT_LAZY_MMU);
355 } else
356 native_set_pte(ptep, pteval);
361} 357}
362 358
363pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 359pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,