aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-12-01 18:30:41 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-05-20 17:14:32 -0400
commit4a35c13cb808c63dd151bdd507b749e97231ef91 (patch)
tree3e51b4c615cbc3545cdcb27234e4bcc25e642154 /arch/x86
parenta99ac5e8619c27dbb8e7fb5a4e0ca8c8aa214909 (diff)
xen: condense everything onto xen_set_pte
xen_set_pte_at and xen_clear_pte are essentially identical to xen_set_pte, so just make them all common. When batched set_pte and pte_clear are the same, but the unbatch operation must be different: they need to update the two halves of the pte in different order. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c73
1 files changed, 27 insertions, 46 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index fb3e92e077e2..11d7ef07d623 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -108,12 +108,6 @@ static struct {
108 108
109 u32 prot_commit; 109 u32 prot_commit;
110 u32 prot_commit_batched; 110 u32 prot_commit_batched;
111
112 u32 set_pte_at;
113 u32 set_pte_at_batched;
114 u32 set_pte_at_pinned;
115 u32 set_pte_at_current;
116 u32 set_pte_at_kernel;
117} mmu_stats; 111} mmu_stats;
118 112
119static u8 zero_stats; 113static u8 zero_stats;
@@ -334,28 +328,39 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
334 set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); 328 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
335} 329}
336 330
337void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 331static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
338 pte_t *ptep, pte_t pteval)
339{ 332{
340 ADD_STATS(set_pte_at, 1); 333 struct mmu_update u;
341// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
342 ADD_STATS(set_pte_at_current, mm == current->mm);
343 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
344 334
345 if(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 335 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
346 struct mmu_update u; 336 return false;
347 337
348 xen_mc_batch(); 338 xen_mc_batch();
339
340 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
341 u.val = pte_val_ma(pteval);
342 xen_extend_mmu_update(&u);
349 343
350 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; 344 xen_mc_issue(PARAVIRT_LAZY_MMU);
351 u.val = pte_val_ma(pteval); 345
352 xen_extend_mmu_update(&u); 346 return true;
347}
348
349void xen_set_pte(pte_t *ptep, pte_t pteval)
350{
351 ADD_STATS(pte_update, 1);
352// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
353 353
354 xen_mc_issue(PARAVIRT_LAZY_MMU); 354 if (!xen_batched_set_pte(ptep, pteval))
355 } else
356 native_set_pte(ptep, pteval); 355 native_set_pte(ptep, pteval);
357} 356}
358 357
358void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
359 pte_t *ptep, pte_t pteval)
360{
361 xen_set_pte(ptep, pteval);
362}
363
359pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 364pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
360 unsigned long addr, pte_t *ptep) 365 unsigned long addr, pte_t *ptep)
361{ 366{
@@ -611,21 +616,6 @@ void xen_set_pud(pud_t *ptr, pud_t val)
611 xen_set_pud_hyper(ptr, val); 616 xen_set_pud_hyper(ptr, val);
612} 617}
613 618
614void xen_set_pte(pte_t *ptep, pte_t pte)
615{
616 ADD_STATS(pte_update, 1);
617// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
618 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
619
620#ifdef CONFIG_X86_PAE
621 ptep->pte_high = pte.pte_high;
622 smp_wmb();
623 ptep->pte_low = pte.pte_low;
624#else
625 *ptep = pte;
626#endif
627}
628
629#ifdef CONFIG_X86_PAE 619#ifdef CONFIG_X86_PAE
630void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 620void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
631{ 621{
@@ -634,9 +624,8 @@ void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
634 624
635void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 625void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
636{ 626{
637 ptep->pte_low = 0; 627 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
638 smp_wmb(); /* make sure low gets written first */ 628 native_pte_clear(mm, addr, ptep);
639 ptep->pte_high = 0;
640} 629}
641 630
642void xen_pmd_clear(pmd_t *pmdp) 631void xen_pmd_clear(pmd_t *pmdp)
@@ -2452,14 +2441,6 @@ static int __init xen_mmu_debugfs(void)
2452 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, 2441 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2453 mmu_stats.mmu_update_histo, 20); 2442 mmu_stats.mmu_update_histo, 20);
2454 2443
2455 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2456 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2457 &mmu_stats.set_pte_at_batched);
2458 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2459 &mmu_stats.set_pte_at_current);
2460 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2461 &mmu_stats.set_pte_at_kernel);
2462
2463 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); 2444 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2464 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, 2445 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2465 &mmu_stats.prot_commit_batched); 2446 &mmu_stats.prot_commit_batched);