diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-12-02 01:57:39 -0500 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2011-05-20 17:25:24 -0400 |
commit | 4c13629f816b1aeff92971a40819b4c25b0622f5 (patch) | |
tree | aeeea5d4d18fb7bcac8dad54721ae10e5e27f672 | |
parent | ef691947d8a3d479e67652312783aedcf629320a (diff) |
xen: make a pile of mmu pvop functions static
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
-rw-r--r-- | arch/x86/xen/mmu.c | 46 | ||||
-rw-r--r-- | arch/x86/xen/mmu.h | 37 |
2 files changed, 23 insertions, 60 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 11d7ef07d623..a87b6b4caa74 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -283,7 +283,7 @@ static void xen_extend_mmu_update(const struct mmu_update *update) | |||
283 | *u = *update; | 283 | *u = *update; |
284 | } | 284 | } |
285 | 285 | ||
286 | void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | 286 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) |
287 | { | 287 | { |
288 | struct mmu_update u; | 288 | struct mmu_update u; |
289 | 289 | ||
@@ -303,7 +303,7 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | |||
303 | preempt_enable(); | 303 | preempt_enable(); |
304 | } | 304 | } |
305 | 305 | ||
306 | void xen_set_pmd(pmd_t *ptr, pmd_t val) | 306 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) |
307 | { | 307 | { |
308 | ADD_STATS(pmd_update, 1); | 308 | ADD_STATS(pmd_update, 1); |
309 | 309 | ||
@@ -346,7 +346,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) | |||
346 | return true; | 346 | return true; |
347 | } | 347 | } |
348 | 348 | ||
349 | void xen_set_pte(pte_t *ptep, pte_t pteval) | 349 | static void xen_set_pte(pte_t *ptep, pte_t pteval) |
350 | { | 350 | { |
351 | ADD_STATS(pte_update, 1); | 351 | ADD_STATS(pte_update, 1); |
352 | // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); | 352 | // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); |
@@ -355,7 +355,7 @@ void xen_set_pte(pte_t *ptep, pte_t pteval) | |||
355 | native_set_pte(ptep, pteval); | 355 | native_set_pte(ptep, pteval); |
356 | } | 356 | } |
357 | 357 | ||
358 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | 358 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, |
359 | pte_t *ptep, pte_t pteval) | 359 | pte_t *ptep, pte_t pteval) |
360 | { | 360 | { |
361 | xen_set_pte(ptep, pteval); | 361 | xen_set_pte(ptep, pteval); |
@@ -449,7 +449,7 @@ static pteval_t iomap_pte(pteval_t val) | |||
449 | return val; | 449 | return val; |
450 | } | 450 | } |
451 | 451 | ||
452 | pteval_t xen_pte_val(pte_t pte) | 452 | static pteval_t xen_pte_val(pte_t pte) |
453 | { | 453 | { |
454 | pteval_t pteval = pte.pte; | 454 | pteval_t pteval = pte.pte; |
455 | 455 | ||
@@ -466,7 +466,7 @@ pteval_t xen_pte_val(pte_t pte) | |||
466 | } | 466 | } |
467 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | 467 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
468 | 468 | ||
469 | pgdval_t xen_pgd_val(pgd_t pgd) | 469 | static pgdval_t xen_pgd_val(pgd_t pgd) |
470 | { | 470 | { |
471 | return pte_mfn_to_pfn(pgd.pgd); | 471 | return pte_mfn_to_pfn(pgd.pgd); |
472 | } | 472 | } |
@@ -497,7 +497,7 @@ void xen_set_pat(u64 pat) | |||
497 | WARN_ON(pat != 0x0007010600070106ull); | 497 | WARN_ON(pat != 0x0007010600070106ull); |
498 | } | 498 | } |
499 | 499 | ||
500 | pte_t xen_make_pte(pteval_t pte) | 500 | static pte_t xen_make_pte(pteval_t pte) |
501 | { | 501 | { |
502 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 502 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
503 | 503 | ||
@@ -567,20 +567,20 @@ pte_t xen_make_pte_debug(pteval_t pte) | |||
567 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); | 567 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); |
568 | #endif | 568 | #endif |
569 | 569 | ||
570 | pgd_t xen_make_pgd(pgdval_t pgd) | 570 | static pgd_t xen_make_pgd(pgdval_t pgd) |
571 | { | 571 | { |
572 | pgd = pte_pfn_to_mfn(pgd); | 572 | pgd = pte_pfn_to_mfn(pgd); |
573 | return native_make_pgd(pgd); | 573 | return native_make_pgd(pgd); |
574 | } | 574 | } |
575 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); | 575 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); |
576 | 576 | ||
577 | pmdval_t xen_pmd_val(pmd_t pmd) | 577 | static pmdval_t xen_pmd_val(pmd_t pmd) |
578 | { | 578 | { |
579 | return pte_mfn_to_pfn(pmd.pmd); | 579 | return pte_mfn_to_pfn(pmd.pmd); |
580 | } | 580 | } |
581 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); | 581 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); |
582 | 582 | ||
583 | void xen_set_pud_hyper(pud_t *ptr, pud_t val) | 583 | static void xen_set_pud_hyper(pud_t *ptr, pud_t val) |
584 | { | 584 | { |
585 | struct mmu_update u; | 585 | struct mmu_update u; |
586 | 586 | ||
@@ -600,7 +600,7 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val) | |||
600 | preempt_enable(); | 600 | preempt_enable(); |
601 | } | 601 | } |
602 | 602 | ||
603 | void xen_set_pud(pud_t *ptr, pud_t val) | 603 | static void xen_set_pud(pud_t *ptr, pud_t val) |
604 | { | 604 | { |
605 | ADD_STATS(pud_update, 1); | 605 | ADD_STATS(pud_update, 1); |
606 | 606 | ||
@@ -617,24 +617,24 @@ void xen_set_pud(pud_t *ptr, pud_t val) | |||
617 | } | 617 | } |
618 | 618 | ||
619 | #ifdef CONFIG_X86_PAE | 619 | #ifdef CONFIG_X86_PAE |
620 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | 620 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
621 | { | 621 | { |
622 | set_64bit((u64 *)ptep, native_pte_val(pte)); | 622 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
623 | } | 623 | } |
624 | 624 | ||
625 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 625 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
626 | { | 626 | { |
627 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) | 627 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) |
628 | native_pte_clear(mm, addr, ptep); | 628 | native_pte_clear(mm, addr, ptep); |
629 | } | 629 | } |
630 | 630 | ||
631 | void xen_pmd_clear(pmd_t *pmdp) | 631 | static void xen_pmd_clear(pmd_t *pmdp) |
632 | { | 632 | { |
633 | set_pmd(pmdp, __pmd(0)); | 633 | set_pmd(pmdp, __pmd(0)); |
634 | } | 634 | } |
635 | #endif /* CONFIG_X86_PAE */ | 635 | #endif /* CONFIG_X86_PAE */ |
636 | 636 | ||
637 | pmd_t xen_make_pmd(pmdval_t pmd) | 637 | static pmd_t xen_make_pmd(pmdval_t pmd) |
638 | { | 638 | { |
639 | pmd = pte_pfn_to_mfn(pmd); | 639 | pmd = pte_pfn_to_mfn(pmd); |
640 | return native_make_pmd(pmd); | 640 | return native_make_pmd(pmd); |
@@ -642,13 +642,13 @@ pmd_t xen_make_pmd(pmdval_t pmd) | |||
642 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); | 642 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
643 | 643 | ||
644 | #if PAGETABLE_LEVELS == 4 | 644 | #if PAGETABLE_LEVELS == 4 |
645 | pudval_t xen_pud_val(pud_t pud) | 645 | static pudval_t xen_pud_val(pud_t pud) |
646 | { | 646 | { |
647 | return pte_mfn_to_pfn(pud.pud); | 647 | return pte_mfn_to_pfn(pud.pud); |
648 | } | 648 | } |
649 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); | 649 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); |
650 | 650 | ||
651 | pud_t xen_make_pud(pudval_t pud) | 651 | static pud_t xen_make_pud(pudval_t pud) |
652 | { | 652 | { |
653 | pud = pte_pfn_to_mfn(pud); | 653 | pud = pte_pfn_to_mfn(pud); |
654 | 654 | ||
@@ -656,7 +656,7 @@ pud_t xen_make_pud(pudval_t pud) | |||
656 | } | 656 | } |
657 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); | 657 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); |
658 | 658 | ||
659 | pgd_t *xen_get_user_pgd(pgd_t *pgd) | 659 | static pgd_t *xen_get_user_pgd(pgd_t *pgd) |
660 | { | 660 | { |
661 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); | 661 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); |
662 | unsigned offset = pgd - pgd_page; | 662 | unsigned offset = pgd - pgd_page; |
@@ -688,7 +688,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |||
688 | * 2. It is always pinned | 688 | * 2. It is always pinned |
689 | * 3. It has no user pagetable attached to it | 689 | * 3. It has no user pagetable attached to it |
690 | */ | 690 | */ |
691 | void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | 691 | static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) |
692 | { | 692 | { |
693 | preempt_disable(); | 693 | preempt_disable(); |
694 | 694 | ||
@@ -701,7 +701,7 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |||
701 | preempt_enable(); | 701 | preempt_enable(); |
702 | } | 702 | } |
703 | 703 | ||
704 | void xen_set_pgd(pgd_t *ptr, pgd_t val) | 704 | static void xen_set_pgd(pgd_t *ptr, pgd_t val) |
705 | { | 705 | { |
706 | pgd_t *user_ptr = xen_get_user_pgd(ptr); | 706 | pgd_t *user_ptr = xen_get_user_pgd(ptr); |
707 | 707 | ||
@@ -1122,14 +1122,14 @@ void xen_mm_unpin_all(void) | |||
1122 | spin_unlock(&pgd_lock); | 1122 | spin_unlock(&pgd_lock); |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1125 | static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
1126 | { | 1126 | { |
1127 | spin_lock(&next->page_table_lock); | 1127 | spin_lock(&next->page_table_lock); |
1128 | xen_pgd_pin(next); | 1128 | xen_pgd_pin(next); |
1129 | spin_unlock(&next->page_table_lock); | 1129 | spin_unlock(&next->page_table_lock); |
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | 1132 | static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
1133 | { | 1133 | { |
1134 | spin_lock(&mm->page_table_lock); | 1134 | spin_lock(&mm->page_table_lock); |
1135 | xen_pgd_pin(mm); | 1135 | xen_pgd_pin(mm); |
@@ -1216,7 +1216,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1216 | * pagetable because of lazy tlb flushing. This means we need need to | 1216 | * pagetable because of lazy tlb flushing. This means we need need to |
1217 | * switch all CPUs off this pagetable before we can unpin it. | 1217 | * switch all CPUs off this pagetable before we can unpin it. |
1218 | */ | 1218 | */ |
1219 | void xen_exit_mmap(struct mm_struct *mm) | 1219 | static void xen_exit_mmap(struct mm_struct *mm) |
1220 | { | 1220 | { |
1221 | get_cpu(); /* make sure we don't move around */ | 1221 | get_cpu(); /* make sure we don't move around */ |
1222 | xen_drop_mm_ref(mm); | 1222 | xen_drop_mm_ref(mm); |
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 537bb9aab777..73809bb951b4 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h | |||
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | |||
15 | 15 | ||
16 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | 16 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
17 | 17 | ||
18 | |||
19 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next); | ||
20 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); | ||
21 | void xen_exit_mmap(struct mm_struct *mm); | ||
22 | |||
23 | pteval_t xen_pte_val(pte_t); | ||
24 | pmdval_t xen_pmd_val(pmd_t); | ||
25 | pgdval_t xen_pgd_val(pgd_t); | ||
26 | |||
27 | pte_t xen_make_pte(pteval_t); | ||
28 | pmd_t xen_make_pmd(pmdval_t); | ||
29 | pgd_t xen_make_pgd(pgdval_t); | ||
30 | |||
31 | void xen_set_pte(pte_t *ptep, pte_t pteval); | ||
32 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
33 | pte_t *ptep, pte_t pteval); | ||
34 | |||
35 | #ifdef CONFIG_X86_PAE | ||
36 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte); | ||
37 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
38 | void xen_pmd_clear(pmd_t *pmdp); | ||
39 | #endif /* CONFIG_X86_PAE */ | ||
40 | |||
41 | void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval); | ||
42 | void xen_set_pud(pud_t *ptr, pud_t val); | ||
43 | void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval); | ||
44 | void xen_set_pud_hyper(pud_t *ptr, pud_t val); | ||
45 | |||
46 | #if PAGETABLE_LEVELS == 4 | ||
47 | pudval_t xen_pud_val(pud_t pud); | ||
48 | pud_t xen_make_pud(pudval_t pudval); | ||
49 | void xen_set_pgd(pgd_t *pgdp, pgd_t pgd); | ||
50 | void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd); | ||
51 | #endif | ||
52 | |||
53 | pgd_t *xen_get_user_pgd(pgd_t *pgd); | ||
54 | |||
55 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 18 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
56 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | 19 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
57 | pte_t *ptep, pte_t pte); | 20 | pte_t *ptep, pte_t pte); |