aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/pgtable-2level.h
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2007-05-02 13:27:19 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:19 -0400
commitc2c1accd4b2f9c82fb89d40611c7f581948db255 (patch)
tree0d4c6b8c2c53ff3d5657fdabe029f14f655fdac7 /include/asm-i386/pgtable-2level.h
parentdf3624aa293dfa2d46089747d919711089a702eb (diff)
[PATCH] i386: pte clear optimization
When exiting from an address space, no special hypervisor notification of page table updates needs to occur; direct page table hypervisors, such as Xen, switch to another address space first (init_mm) and unprotects the page tables to avoid the cost of trapping to the hypervisor for each pte_clear. Shadow mode hypervisors, such as VMI and lhype don't need to do the extra work of calling through paravirt-ops, and can just directly clear the page table entries without notifiying the hypervisor, since all the page tables are about to be freed. So introduce native_pte_clear functions which bypass any paravirt-ops notification. This results in a significant performance win for VMI and removes some indirect calls from zap_pte_range. Note the 3-level paging already had a native_pte_clear function, thus demanding argument conformance and extra args for the 2-level definition. Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include/asm-i386/pgtable-2level.h')
-rw-r--r--include/asm-i386/pgtable-2level.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 781fe4bcc962..85d9005c0cdf 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -36,6 +36,11 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
36#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 36#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
37#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 37#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
38 38
39static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
40{
41 *xp = __pte(0);
42}
43
39static inline pte_t native_ptep_get_and_clear(pte_t *xp) 44static inline pte_t native_ptep_get_and_clear(pte_t *xp)
40{ 45{
41 return __pte(xchg(&xp->pte_low, 0)); 46 return __pte(xchg(&xp->pte_low, 0));