aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-05-30 20:24:27 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-02 07:24:40 -0400
commite2426cf85f8db5891fb5831323d2d0c176c4dadc (patch)
tree630dbc2032df606566f7249aa326c1e2a120c1ce
parent15ce60056b24a65b65e28de973a9fd8ac0750a2f (diff)
xen: avoid hypercalls when updating unpinned pud/pmd
When operating on an unpinned pagetable (ie, one under construction or destruction), it isn't necessary to use a hypercall to update a pud/pmd entry. Jan Beulich observed that a similar optimisation avoided many thousands of hypercalls while doing a kernel build. One tricky part is that early in the kernel boot there's no page structure, so we can't check to see if the page is pinned. In that case, we just always use the hypercall. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/xen/enlighten.c14
-rw-r--r--arch/x86/xen/mmu.c39
-rw-r--r--arch/x86/xen/mmu.h8
3 files changed, 50 insertions, 11 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b94f63ac228b..ed9f04b3836d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -901,6 +901,14 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
901 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base))); 901 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
902} 902}
903 903
904static __init void xen_post_allocator_init(void)
905{
906 pv_mmu_ops.set_pmd = xen_set_pmd;
907 pv_mmu_ops.set_pud = xen_set_pud;
908
909 xen_mark_init_mm_pinned();
910}
911
904/* This is called once we have the cpu_possible_map */ 912/* This is called once we have the cpu_possible_map */
905void xen_setup_vcpu_info_placement(void) 913void xen_setup_vcpu_info_placement(void)
906{ 914{
@@ -988,7 +996,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
988 .banner = xen_banner, 996 .banner = xen_banner,
989 .memory_setup = xen_memory_setup, 997 .memory_setup = xen_memory_setup,
990 .arch_setup = xen_arch_setup, 998 .arch_setup = xen_arch_setup,
991 .post_allocator_init = xen_mark_init_mm_pinned, 999 .post_allocator_init = xen_post_allocator_init,
992}; 1000};
993 1001
994static const struct pv_time_ops xen_time_ops __initdata = { 1002static const struct pv_time_ops xen_time_ops __initdata = {
@@ -1100,7 +1108,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1100 1108
1101 .set_pte = NULL, /* see xen_pagetable_setup_* */ 1109 .set_pte = NULL, /* see xen_pagetable_setup_* */
1102 .set_pte_at = xen_set_pte_at, 1110 .set_pte_at = xen_set_pte_at,
1103 .set_pmd = xen_set_pmd, 1111 .set_pmd = xen_set_pmd_hyper,
1104 1112
1105 .pte_val = xen_pte_val, 1113 .pte_val = xen_pte_val,
1106 .pte_flags = native_pte_val, 1114 .pte_flags = native_pte_val,
@@ -1111,7 +1119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1111 1119
1112 .set_pte_atomic = xen_set_pte_atomic, 1120 .set_pte_atomic = xen_set_pte_atomic,
1113 .set_pte_present = xen_set_pte_at, 1121 .set_pte_present = xen_set_pte_at,
1114 .set_pud = xen_set_pud, 1122 .set_pud = xen_set_pud_hyper,
1115 .pte_clear = xen_pte_clear, 1123 .pte_clear = xen_pte_clear,
1116 .pmd_clear = xen_pmd_clear, 1124 .pmd_clear = xen_pmd_clear,
1117 1125
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 17f374eb1faa..4fa0934db925 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -223,7 +223,14 @@ void make_lowmem_page_readwrite(void *vaddr)
223} 223}
224 224
225 225
226void xen_set_pmd(pmd_t *ptr, pmd_t val) 226static bool page_pinned(void *ptr)
227{
228 struct page *page = virt_to_page(ptr);
229
230 return PagePinned(page);
231}
232
233void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
227{ 234{
228 struct multicall_space mcs; 235 struct multicall_space mcs;
229 struct mmu_update *u; 236 struct mmu_update *u;
@@ -241,6 +248,18 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
241 preempt_enable(); 248 preempt_enable();
242} 249}
243 250
251void xen_set_pmd(pmd_t *ptr, pmd_t val)
252{
253 /* If page is not pinned, we can just update the entry
254 directly */
255 if (!page_pinned(ptr)) {
256 *ptr = val;
257 return;
258 }
259
260 xen_set_pmd_hyper(ptr, val);
261}
262
244/* 263/*
245 * Associate a virtual page frame with a given physical page frame 264 * Associate a virtual page frame with a given physical page frame
246 * and protection flags for that frame. 265 * and protection flags for that frame.
@@ -348,7 +367,7 @@ pmdval_t xen_pmd_val(pmd_t pmd)
348 return ret; 367 return ret;
349} 368}
350 369
351void xen_set_pud(pud_t *ptr, pud_t val) 370void xen_set_pud_hyper(pud_t *ptr, pud_t val)
352{ 371{
353 struct multicall_space mcs; 372 struct multicall_space mcs;
354 struct mmu_update *u; 373 struct mmu_update *u;
@@ -366,6 +385,18 @@ void xen_set_pud(pud_t *ptr, pud_t val)
366 preempt_enable(); 385 preempt_enable();
367} 386}
368 387
388void xen_set_pud(pud_t *ptr, pud_t val)
389{
390 /* If page is not pinned, we can just update the entry
391 directly */
392 if (!page_pinned(ptr)) {
393 *ptr = val;
394 return;
395 }
396
397 xen_set_pud_hyper(ptr, val);
398}
399
369void xen_set_pte(pte_t *ptep, pte_t pte) 400void xen_set_pte(pte_t *ptep, pte_t pte)
370{ 401{
371 ptep->pte_high = pte.pte_high; 402 ptep->pte_high = pte.pte_high;
@@ -387,7 +418,7 @@ void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
387 418
388void xen_pmd_clear(pmd_t *pmdp) 419void xen_pmd_clear(pmd_t *pmdp)
389{ 420{
390 xen_set_pmd(pmdp, __pmd(0)); 421 set_pmd(pmdp, __pmd(0));
391} 422}
392 423
393pmd_t xen_make_pmd(pmdval_t pmd) 424pmd_t xen_make_pmd(pmdval_t pmd)
@@ -758,7 +789,7 @@ void xen_exit_mmap(struct mm_struct *mm)
758 spin_lock(&mm->page_table_lock); 789 spin_lock(&mm->page_table_lock);
759 790
760 /* pgd may not be pinned in the error exit path of execve */ 791 /* pgd may not be pinned in the error exit path of execve */
761 if (PagePinned(virt_to_page(mm->pgd))) 792 if (page_pinned(mm->pgd))
762 xen_pgd_unpin(mm->pgd); 793 xen_pgd_unpin(mm->pgd);
763 794
764 spin_unlock(&mm->page_table_lock); 795 spin_unlock(&mm->page_table_lock);
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 5fe961caffd4..e3dd09e25c63 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -25,10 +25,6 @@ enum pt_level {
25 25
26void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 26void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
27 27
28void xen_set_pte(pte_t *ptep, pte_t pteval);
29void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
30 pte_t *ptep, pte_t pteval);
31void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
32 28
33void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next); 29void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
34void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); 30void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
@@ -45,10 +41,14 @@ pte_t xen_make_pte(pteval_t);
45pmd_t xen_make_pmd(pmdval_t); 41pmd_t xen_make_pmd(pmdval_t);
46pgd_t xen_make_pgd(pgdval_t); 42pgd_t xen_make_pgd(pgdval_t);
47 43
44void xen_set_pte(pte_t *ptep, pte_t pteval);
48void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 45void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
49 pte_t *ptep, pte_t pteval); 46 pte_t *ptep, pte_t pteval);
50void xen_set_pte_atomic(pte_t *ptep, pte_t pte); 47void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
48void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
51void xen_set_pud(pud_t *ptr, pud_t val); 49void xen_set_pud(pud_t *ptr, pud_t val);
50void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
51void xen_set_pud_hyper(pud_t *ptr, pud_t val);
52void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 52void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
53void xen_pmd_clear(pmd_t *pmdp); 53void xen_pmd_clear(pmd_t *pmdp);
54 54