aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2012-07-09 06:39:05 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-07-19 15:51:43 -0400
commitd095d43e78dd811d5c02c25e207c3364019b5a77 (patch)
tree4d104a6b39487e3cddd3ba2490fe257ed3f5793d /arch/x86/xen/mmu.c
parent37a80bf560786d96c5e8370bff45d867e43fd5c3 (diff)
xen/mm: do direct hypercall in xen_set_pte() if batching is unavailable
In xen_set_pte() if batching is unavailable (because the caller is in an interrupt context such as handling a page fault) it would fall back to using native_set_pte() and trapping and emulating the PTE write. On 32-bit guests this requires two traps for each PTE write (one for each dword of the PTE). Instead, do one mmu_update hypercall directly. During construction of the initial page tables, continue to use native_set_pte() because most of the PTEs being set are in writable and unpinned pages (see phys_pmd_init() in arch/x86/mm/init_64.c) and using a hypercall for this is very expensive. This significantly improves page fault performance in 32-bit PV guests. lmbench3 test Before After Improvement ---------------------------------------------- lat_pagefault 3.18 us 2.32 us 27% lat_proc fork 356 us 313.3 us 11% Signed-off-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3a73785631ce..3f1783a79a3c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -308,8 +308,20 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
308 308
309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) 309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
310{ 310{
311 if (!xen_batched_set_pte(ptep, pteval)) 311 if (!xen_batched_set_pte(ptep, pteval)) {
312 native_set_pte(ptep, pteval); 312 /*
313 * Could call native_set_pte() here and trap and
314 * emulate the PTE write but with 32-bit guests this
315 * needs two traps (one for each of the two 32-bit
316 * words in the PTE) so do one hypercall directly
317 * instead.
318 */
319 struct mmu_update u;
320
321 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
322 u.val = pte_val_ma(pteval);
323 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
324 }
313} 325}
314 326
315static void xen_set_pte(pte_t *ptep, pte_t pteval) 327static void xen_set_pte(pte_t *ptep, pte_t pteval)
@@ -1416,13 +1428,21 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1416} 1428}
1417#endif /* CONFIG_X86_64 */ 1429#endif /* CONFIG_X86_64 */
1418 1430
1419/* Init-time set_pte while constructing initial pagetables, which 1431/*
1420 doesn't allow RO pagetable pages to be remapped RW */ 1432 * Init-time set_pte while constructing initial pagetables, which
1433 * doesn't allow RO page table pages to be remapped RW.
1434 *
1435 * Many of these PTE updates are done on unpinned and writable pages
1436 * and doing a hypercall for these is unnecessary and expensive. At
1437 * this point it is not possible to tell if a page is pinned or not,
1438 * so always write the PTE directly and rely on Xen trapping and
1439 * emulating any updates as necessary.
1440 */
1421static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) 1441static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1422{ 1442{
1423 pte = mask_rw_pte(ptep, pte); 1443 pte = mask_rw_pte(ptep, pte);
1424 1444
1425 xen_set_pte(ptep, pte); 1445 native_set_pte(ptep, pte);
1426} 1446}
1427 1447
1428static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1448static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)