aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-05-30 20:24:27 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-02 07:24:40 -0400
commite2426cf85f8db5891fb5831323d2d0c176c4dadc (patch)
tree630dbc2032df606566f7249aa326c1e2a120c1ce /arch/x86/xen/mmu.c
parent15ce60056b24a65b65e28de973a9fd8ac0750a2f (diff)
xen: avoid hypercalls when updating unpinned pud/pmd
When operating on an unpinned pagetable (ie, one under construction or destruction), it isn't necessary to use a hypercall to update a pud/pmd entry. Jan Beulich observed that a similar optimisation avoided many thousands of hypercalls while doing a kernel build. One tricky part is that early in the kernel boot there's no page structure, so we can't check to see if the page is pinned. In that case, we just always use the hypercall. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 17f374eb1faa..4fa0934db925 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -223,7 +223,14 @@ void make_lowmem_page_readwrite(void *vaddr)
223} 223}
224 224
225 225
226void xen_set_pmd(pmd_t *ptr, pmd_t val) 226static bool page_pinned(void *ptr)
227{
228 struct page *page = virt_to_page(ptr);
229
230 return PagePinned(page);
231}
232
233void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
227{ 234{
228 struct multicall_space mcs; 235 struct multicall_space mcs;
229 struct mmu_update *u; 236 struct mmu_update *u;
@@ -241,6 +248,18 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
241 preempt_enable(); 248 preempt_enable();
242} 249}
243 250
251void xen_set_pmd(pmd_t *ptr, pmd_t val)
252{
253 /* If page is not pinned, we can just update the entry
254 directly */
255 if (!page_pinned(ptr)) {
256 *ptr = val;
257 return;
258 }
259
260 xen_set_pmd_hyper(ptr, val);
261}
262
244/* 263/*
245 * Associate a virtual page frame with a given physical page frame 264 * Associate a virtual page frame with a given physical page frame
246 * and protection flags for that frame. 265 * and protection flags for that frame.
@@ -348,7 +367,7 @@ pmdval_t xen_pmd_val(pmd_t pmd)
348 return ret; 367 return ret;
349} 368}
350 369
351void xen_set_pud(pud_t *ptr, pud_t val) 370void xen_set_pud_hyper(pud_t *ptr, pud_t val)
352{ 371{
353 struct multicall_space mcs; 372 struct multicall_space mcs;
354 struct mmu_update *u; 373 struct mmu_update *u;
@@ -366,6 +385,18 @@ void xen_set_pud(pud_t *ptr, pud_t val)
366 preempt_enable(); 385 preempt_enable();
367} 386}
368 387
388void xen_set_pud(pud_t *ptr, pud_t val)
389{
390 /* If page is not pinned, we can just update the entry
391 directly */
392 if (!page_pinned(ptr)) {
393 *ptr = val;
394 return;
395 }
396
397 xen_set_pud_hyper(ptr, val);
398}
399
369void xen_set_pte(pte_t *ptep, pte_t pte) 400void xen_set_pte(pte_t *ptep, pte_t pte)
370{ 401{
371 ptep->pte_high = pte.pte_high; 402 ptep->pte_high = pte.pte_high;
@@ -387,7 +418,7 @@ void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
387 418
388void xen_pmd_clear(pmd_t *pmdp) 419void xen_pmd_clear(pmd_t *pmdp)
389{ 420{
390 xen_set_pmd(pmdp, __pmd(0)); 421 set_pmd(pmdp, __pmd(0));
391} 422}
392 423
393pmd_t xen_make_pmd(pmdval_t pmd) 424pmd_t xen_make_pmd(pmdval_t pmd)
@@ -758,7 +789,7 @@ void xen_exit_mmap(struct mm_struct *mm)
758 spin_lock(&mm->page_table_lock); 789 spin_lock(&mm->page_table_lock);
759 790
760 /* pgd may not be pinned in the error exit path of execve */ 791 /* pgd may not be pinned in the error exit path of execve */
761 if (PagePinned(virt_to_page(mm->pgd))) 792 if (page_pinned(mm->pgd))
762 xen_pgd_unpin(mm->pgd); 793 xen_pgd_unpin(mm->pgd);
763 794
764 spin_unlock(&mm->page_table_lock); 795 spin_unlock(&mm->page_table_lock);