aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-12-02 01:57:39 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-05-20 17:25:24 -0400
commit4c13629f816b1aeff92971a40819b4c25b0622f5 (patch)
treeaeeea5d4d18fb7bcac8dad54721ae10e5e27f672 /arch/x86
parentef691947d8a3d479e67652312783aedcf629320a (diff)
xen: make a pile of mmu pvop functions static
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c46
-rw-r--r--arch/x86/xen/mmu.h37
2 files changed, 23 insertions, 60 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 11d7ef07d623..a87b6b4caa74 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -283,7 +283,7 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
283 *u = *update; 283 *u = *update;
284} 284}
285 285
286void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) 286static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
287{ 287{
288 struct mmu_update u; 288 struct mmu_update u;
289 289
@@ -303,7 +303,7 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
303 preempt_enable(); 303 preempt_enable();
304} 304}
305 305
306void xen_set_pmd(pmd_t *ptr, pmd_t val) 306static void xen_set_pmd(pmd_t *ptr, pmd_t val)
307{ 307{
308 ADD_STATS(pmd_update, 1); 308 ADD_STATS(pmd_update, 1);
309 309
@@ -346,7 +346,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
346 return true; 346 return true;
347} 347}
348 348
349void xen_set_pte(pte_t *ptep, pte_t pteval) 349static void xen_set_pte(pte_t *ptep, pte_t pteval)
350{ 350{
351 ADD_STATS(pte_update, 1); 351 ADD_STATS(pte_update, 1);
352// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); 352// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
@@ -355,7 +355,7 @@ void xen_set_pte(pte_t *ptep, pte_t pteval)
355 native_set_pte(ptep, pteval); 355 native_set_pte(ptep, pteval);
356} 356}
357 357
358void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 358static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
359 pte_t *ptep, pte_t pteval) 359 pte_t *ptep, pte_t pteval)
360{ 360{
361 xen_set_pte(ptep, pteval); 361 xen_set_pte(ptep, pteval);
@@ -449,7 +449,7 @@ static pteval_t iomap_pte(pteval_t val)
449 return val; 449 return val;
450} 450}
451 451
452pteval_t xen_pte_val(pte_t pte) 452static pteval_t xen_pte_val(pte_t pte)
453{ 453{
454 pteval_t pteval = pte.pte; 454 pteval_t pteval = pte.pte;
455 455
@@ -466,7 +466,7 @@ pteval_t xen_pte_val(pte_t pte)
466} 466}
467PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 467PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
468 468
469pgdval_t xen_pgd_val(pgd_t pgd) 469static pgdval_t xen_pgd_val(pgd_t pgd)
470{ 470{
471 return pte_mfn_to_pfn(pgd.pgd); 471 return pte_mfn_to_pfn(pgd.pgd);
472} 472}
@@ -497,7 +497,7 @@ void xen_set_pat(u64 pat)
497 WARN_ON(pat != 0x0007010600070106ull); 497 WARN_ON(pat != 0x0007010600070106ull);
498} 498}
499 499
500pte_t xen_make_pte(pteval_t pte) 500static pte_t xen_make_pte(pteval_t pte)
501{ 501{
502 phys_addr_t addr = (pte & PTE_PFN_MASK); 502 phys_addr_t addr = (pte & PTE_PFN_MASK);
503 503
@@ -567,20 +567,20 @@ pte_t xen_make_pte_debug(pteval_t pte)
567PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); 567PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
568#endif 568#endif
569 569
570pgd_t xen_make_pgd(pgdval_t pgd) 570static pgd_t xen_make_pgd(pgdval_t pgd)
571{ 571{
572 pgd = pte_pfn_to_mfn(pgd); 572 pgd = pte_pfn_to_mfn(pgd);
573 return native_make_pgd(pgd); 573 return native_make_pgd(pgd);
574} 574}
575PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); 575PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
576 576
577pmdval_t xen_pmd_val(pmd_t pmd) 577static pmdval_t xen_pmd_val(pmd_t pmd)
578{ 578{
579 return pte_mfn_to_pfn(pmd.pmd); 579 return pte_mfn_to_pfn(pmd.pmd);
580} 580}
581PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); 581PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
582 582
583void xen_set_pud_hyper(pud_t *ptr, pud_t val) 583static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
584{ 584{
585 struct mmu_update u; 585 struct mmu_update u;
586 586
@@ -600,7 +600,7 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
600 preempt_enable(); 600 preempt_enable();
601} 601}
602 602
603void xen_set_pud(pud_t *ptr, pud_t val) 603static void xen_set_pud(pud_t *ptr, pud_t val)
604{ 604{
605 ADD_STATS(pud_update, 1); 605 ADD_STATS(pud_update, 1);
606 606
@@ -617,24 +617,24 @@ void xen_set_pud(pud_t *ptr, pud_t val)
617} 617}
618 618
619#ifdef CONFIG_X86_PAE 619#ifdef CONFIG_X86_PAE
620void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 620static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
621{ 621{
622 set_64bit((u64 *)ptep, native_pte_val(pte)); 622 set_64bit((u64 *)ptep, native_pte_val(pte));
623} 623}
624 624
625void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 625static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
626{ 626{
627 if (!xen_batched_set_pte(ptep, native_make_pte(0))) 627 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
628 native_pte_clear(mm, addr, ptep); 628 native_pte_clear(mm, addr, ptep);
629} 629}
630 630
631void xen_pmd_clear(pmd_t *pmdp) 631static void xen_pmd_clear(pmd_t *pmdp)
632{ 632{
633 set_pmd(pmdp, __pmd(0)); 633 set_pmd(pmdp, __pmd(0));
634} 634}
635#endif /* CONFIG_X86_PAE */ 635#endif /* CONFIG_X86_PAE */
636 636
637pmd_t xen_make_pmd(pmdval_t pmd) 637static pmd_t xen_make_pmd(pmdval_t pmd)
638{ 638{
639 pmd = pte_pfn_to_mfn(pmd); 639 pmd = pte_pfn_to_mfn(pmd);
640 return native_make_pmd(pmd); 640 return native_make_pmd(pmd);
@@ -642,13 +642,13 @@ pmd_t xen_make_pmd(pmdval_t pmd)
642PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 642PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
643 643
644#if PAGETABLE_LEVELS == 4 644#if PAGETABLE_LEVELS == 4
645pudval_t xen_pud_val(pud_t pud) 645static pudval_t xen_pud_val(pud_t pud)
646{ 646{
647 return pte_mfn_to_pfn(pud.pud); 647 return pte_mfn_to_pfn(pud.pud);
648} 648}
649PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); 649PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
650 650
651pud_t xen_make_pud(pudval_t pud) 651static pud_t xen_make_pud(pudval_t pud)
652{ 652{
653 pud = pte_pfn_to_mfn(pud); 653 pud = pte_pfn_to_mfn(pud);
654 654
@@ -656,7 +656,7 @@ pud_t xen_make_pud(pudval_t pud)
656} 656}
657PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); 657PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
658 658
659pgd_t *xen_get_user_pgd(pgd_t *pgd) 659static pgd_t *xen_get_user_pgd(pgd_t *pgd)
660{ 660{
661 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); 661 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
662 unsigned offset = pgd - pgd_page; 662 unsigned offset = pgd - pgd_page;
@@ -688,7 +688,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
688 * 2. It is always pinned 688 * 2. It is always pinned
689 * 3. It has no user pagetable attached to it 689 * 3. It has no user pagetable attached to it
690 */ 690 */
691void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) 691static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
692{ 692{
693 preempt_disable(); 693 preempt_disable();
694 694
@@ -701,7 +701,7 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
701 preempt_enable(); 701 preempt_enable();
702} 702}
703 703
704void xen_set_pgd(pgd_t *ptr, pgd_t val) 704static void xen_set_pgd(pgd_t *ptr, pgd_t val)
705{ 705{
706 pgd_t *user_ptr = xen_get_user_pgd(ptr); 706 pgd_t *user_ptr = xen_get_user_pgd(ptr);
707 707
@@ -1122,14 +1122,14 @@ void xen_mm_unpin_all(void)
1122 spin_unlock(&pgd_lock); 1122 spin_unlock(&pgd_lock);
1123} 1123}
1124 1124
1125void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 1125static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1126{ 1126{
1127 spin_lock(&next->page_table_lock); 1127 spin_lock(&next->page_table_lock);
1128 xen_pgd_pin(next); 1128 xen_pgd_pin(next);
1129 spin_unlock(&next->page_table_lock); 1129 spin_unlock(&next->page_table_lock);
1130} 1130}
1131 1131
1132void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 1132static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1133{ 1133{
1134 spin_lock(&mm->page_table_lock); 1134 spin_lock(&mm->page_table_lock);
1135 xen_pgd_pin(mm); 1135 xen_pgd_pin(mm);
@@ -1216,7 +1216,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1216 * pagetable because of lazy tlb flushing. This means we need need to 1216 * pagetable because of lazy tlb flushing. This means we need need to
1217 * switch all CPUs off this pagetable before we can unpin it. 1217 * switch all CPUs off this pagetable before we can unpin it.
1218 */ 1218 */
1219void xen_exit_mmap(struct mm_struct *mm) 1219static void xen_exit_mmap(struct mm_struct *mm)
1220{ 1220{
1221 get_cpu(); /* make sure we don't move around */ 1221 get_cpu(); /* make sure we don't move around */
1222 xen_drop_mm_ref(mm); 1222 xen_drop_mm_ref(mm);
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 537bb9aab777..73809bb951b4 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
15 15
16void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 16void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
17 17
18
19void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
20void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
21void xen_exit_mmap(struct mm_struct *mm);
22
23pteval_t xen_pte_val(pte_t);
24pmdval_t xen_pmd_val(pmd_t);
25pgdval_t xen_pgd_val(pgd_t);
26
27pte_t xen_make_pte(pteval_t);
28pmd_t xen_make_pmd(pmdval_t);
29pgd_t xen_make_pgd(pgdval_t);
30
31void xen_set_pte(pte_t *ptep, pte_t pteval);
32void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
33 pte_t *ptep, pte_t pteval);
34
35#ifdef CONFIG_X86_PAE
36void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
37void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
38void xen_pmd_clear(pmd_t *pmdp);
39#endif /* CONFIG_X86_PAE */
40
41void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
42void xen_set_pud(pud_t *ptr, pud_t val);
43void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
44void xen_set_pud_hyper(pud_t *ptr, pud_t val);
45
46#if PAGETABLE_LEVELS == 4
47pudval_t xen_pud_val(pud_t pud);
48pud_t xen_make_pud(pudval_t pudval);
49void xen_set_pgd(pgd_t *pgdp, pgd_t pgd);
50void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd);
51#endif
52
53pgd_t *xen_get_user_pgd(pgd_t *pgd);
54
55pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 18pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
56void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 19void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
57 pte_t *ptep, pte_t pte); 20 pte_t *ptep, pte_t pte);