diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-08-19 16:34:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-20 06:40:08 -0400 |
commit | 7708ad64a24a674f7905aa7a5099a50f055debec (patch) | |
tree | e7789c12a8fb0dd1c5d17fc89f6ee541a138211f /arch/x86/xen/mmu.c | |
parent | 11ad93e59d114f4b218873f1c93261be725d2e22 (diff) |
xen: add xen_ prefixes to make tracing with ftrace easier
It's easier to pattern match on Xen function if they all start with xen_.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 66 |
1 files changed, 34 insertions, 32 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d3752b6ce6e6..d9a35a363095 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -229,14 +229,14 @@ void make_lowmem_page_readwrite(void *vaddr) | |||
229 | } | 229 | } |
230 | 230 | ||
231 | 231 | ||
232 | static bool page_pinned(void *ptr) | 232 | static bool xen_page_pinned(void *ptr) |
233 | { | 233 | { |
234 | struct page *page = virt_to_page(ptr); | 234 | struct page *page = virt_to_page(ptr); |
235 | 235 | ||
236 | return PagePinned(page); | 236 | return PagePinned(page); |
237 | } | 237 | } |
238 | 238 | ||
239 | static void extend_mmu_update(const struct mmu_update *update) | 239 | static void xen_extend_mmu_update(const struct mmu_update *update) |
240 | { | 240 | { |
241 | struct multicall_space mcs; | 241 | struct multicall_space mcs; |
242 | struct mmu_update *u; | 242 | struct mmu_update *u; |
@@ -265,7 +265,7 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | |||
265 | /* ptr may be ioremapped for 64-bit pagetable setup */ | 265 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
266 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | 266 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; |
267 | u.val = pmd_val_ma(val); | 267 | u.val = pmd_val_ma(val); |
268 | extend_mmu_update(&u); | 268 | xen_extend_mmu_update(&u); |
269 | 269 | ||
270 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 270 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
271 | 271 | ||
@@ -276,7 +276,7 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val) | |||
276 | { | 276 | { |
277 | /* If page is not pinned, we can just update the entry | 277 | /* If page is not pinned, we can just update the entry |
278 | directly */ | 278 | directly */ |
279 | if (!page_pinned(ptr)) { | 279 | if (!xen_page_pinned(ptr)) { |
280 | *ptr = val; | 280 | *ptr = val; |
281 | return; | 281 | return; |
282 | } | 282 | } |
@@ -334,7 +334,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
334 | 334 | ||
335 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 335 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
336 | u.val = pte_val_ma(pte); | 336 | u.val = pte_val_ma(pte); |
337 | extend_mmu_update(&u); | 337 | xen_extend_mmu_update(&u); |
338 | 338 | ||
339 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 339 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
340 | } | 340 | } |
@@ -400,7 +400,7 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val) | |||
400 | /* ptr may be ioremapped for 64-bit pagetable setup */ | 400 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
401 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | 401 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; |
402 | u.val = pud_val_ma(val); | 402 | u.val = pud_val_ma(val); |
403 | extend_mmu_update(&u); | 403 | xen_extend_mmu_update(&u); |
404 | 404 | ||
405 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 405 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
406 | 406 | ||
@@ -411,7 +411,7 @@ void xen_set_pud(pud_t *ptr, pud_t val) | |||
411 | { | 411 | { |
412 | /* If page is not pinned, we can just update the entry | 412 | /* If page is not pinned, we can just update the entry |
413 | directly */ | 413 | directly */ |
414 | if (!page_pinned(ptr)) { | 414 | if (!xen_page_pinned(ptr)) { |
415 | *ptr = val; | 415 | *ptr = val; |
416 | return; | 416 | return; |
417 | } | 417 | } |
@@ -490,7 +490,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |||
490 | 490 | ||
491 | u.ptr = virt_to_machine(ptr).maddr; | 491 | u.ptr = virt_to_machine(ptr).maddr; |
492 | u.val = pgd_val_ma(val); | 492 | u.val = pgd_val_ma(val); |
493 | extend_mmu_update(&u); | 493 | xen_extend_mmu_update(&u); |
494 | } | 494 | } |
495 | 495 | ||
496 | /* | 496 | /* |
@@ -519,10 +519,10 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val) | |||
519 | 519 | ||
520 | /* If page is not pinned, we can just update the entry | 520 | /* If page is not pinned, we can just update the entry |
521 | directly */ | 521 | directly */ |
522 | if (!page_pinned(ptr)) { | 522 | if (!xen_page_pinned(ptr)) { |
523 | *ptr = val; | 523 | *ptr = val; |
524 | if (user_ptr) { | 524 | if (user_ptr) { |
525 | WARN_ON(page_pinned(user_ptr)); | 525 | WARN_ON(xen_page_pinned(user_ptr)); |
526 | *user_ptr = val; | 526 | *user_ptr = val; |
527 | } | 527 | } |
528 | return; | 528 | return; |
@@ -555,8 +555,8 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val) | |||
555 | * For 64-bit, we must skip the Xen hole in the middle of the address | 555 | * For 64-bit, we must skip the Xen hole in the middle of the address |
556 | * space, just after the big x86-64 virtual hole. | 556 | * space, just after the big x86-64 virtual hole. |
557 | */ | 557 | */ |
558 | static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level), | 558 | static int xen_pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level), |
559 | unsigned long limit) | 559 | unsigned long limit) |
560 | { | 560 | { |
561 | int flush = 0; | 561 | int flush = 0; |
562 | unsigned hole_low, hole_high; | 562 | unsigned hole_low, hole_high; |
@@ -644,7 +644,9 @@ out: | |||
644 | return flush; | 644 | return flush; |
645 | } | 645 | } |
646 | 646 | ||
647 | static spinlock_t *lock_pte(struct page *page) | 647 | /* If we're using split pte locks, then take the page's lock and |
648 | return a pointer to it. Otherwise return NULL. */ | ||
649 | static spinlock_t *xen_pte_lock(struct page *page) | ||
648 | { | 650 | { |
649 | spinlock_t *ptl = NULL; | 651 | spinlock_t *ptl = NULL; |
650 | 652 | ||
@@ -656,7 +658,7 @@ static spinlock_t *lock_pte(struct page *page) | |||
656 | return ptl; | 658 | return ptl; |
657 | } | 659 | } |
658 | 660 | ||
659 | static void do_unlock(void *v) | 661 | static void xen_pte_unlock(void *v) |
660 | { | 662 | { |
661 | spinlock_t *ptl = v; | 663 | spinlock_t *ptl = v; |
662 | spin_unlock(ptl); | 664 | spin_unlock(ptl); |
@@ -674,7 +676,7 @@ static void xen_do_pin(unsigned level, unsigned long pfn) | |||
674 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 676 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); |
675 | } | 677 | } |
676 | 678 | ||
677 | static int pin_page(struct page *page, enum pt_level level) | 679 | static int xen_pin_page(struct page *page, enum pt_level level) |
678 | { | 680 | { |
679 | unsigned pgfl = TestSetPagePinned(page); | 681 | unsigned pgfl = TestSetPagePinned(page); |
680 | int flush; | 682 | int flush; |
@@ -715,7 +717,7 @@ static int pin_page(struct page *page, enum pt_level level) | |||
715 | */ | 717 | */ |
716 | ptl = NULL; | 718 | ptl = NULL; |
717 | if (level == PT_PTE) | 719 | if (level == PT_PTE) |
718 | ptl = lock_pte(page); | 720 | ptl = xen_pte_lock(page); |
719 | 721 | ||
720 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | 722 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, |
721 | pfn_pte(pfn, PAGE_KERNEL_RO), | 723 | pfn_pte(pfn, PAGE_KERNEL_RO), |
@@ -726,7 +728,7 @@ static int pin_page(struct page *page, enum pt_level level) | |||
726 | 728 | ||
727 | /* Queue a deferred unlock for when this batch | 729 | /* Queue a deferred unlock for when this batch |
728 | is completed. */ | 730 | is completed. */ |
729 | xen_mc_callback(do_unlock, ptl); | 731 | xen_mc_callback(xen_pte_unlock, ptl); |
730 | } | 732 | } |
731 | } | 733 | } |
732 | 734 | ||
@@ -740,7 +742,7 @@ void xen_pgd_pin(pgd_t *pgd) | |||
740 | { | 742 | { |
741 | xen_mc_batch(); | 743 | xen_mc_batch(); |
742 | 744 | ||
743 | if (pgd_walk(pgd, pin_page, USER_LIMIT)) { | 745 | if (xen_pgd_walk(pgd, xen_pin_page, USER_LIMIT)) { |
744 | /* re-enable interrupts for kmap_flush_unused */ | 746 | /* re-enable interrupts for kmap_flush_unused */ |
745 | xen_mc_issue(0); | 747 | xen_mc_issue(0); |
746 | kmap_flush_unused(); | 748 | kmap_flush_unused(); |
@@ -754,14 +756,14 @@ void xen_pgd_pin(pgd_t *pgd) | |||
754 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); | 756 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); |
755 | 757 | ||
756 | if (user_pgd) { | 758 | if (user_pgd) { |
757 | pin_page(virt_to_page(user_pgd), PT_PGD); | 759 | xen_pin_page(virt_to_page(user_pgd), PT_PGD); |
758 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd))); | 760 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd))); |
759 | } | 761 | } |
760 | } | 762 | } |
761 | #else /* CONFIG_X86_32 */ | 763 | #else /* CONFIG_X86_32 */ |
762 | #ifdef CONFIG_X86_PAE | 764 | #ifdef CONFIG_X86_PAE |
763 | /* Need to make sure unshared kernel PMD is pinnable */ | 765 | /* Need to make sure unshared kernel PMD is pinnable */ |
764 | pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD); | 766 | xen_pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD); |
765 | #endif | 767 | #endif |
766 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); | 768 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
767 | #endif /* CONFIG_X86_64 */ | 769 | #endif /* CONFIG_X86_64 */ |
@@ -796,7 +798,7 @@ void xen_mm_pin_all(void) | |||
796 | * that's before we have page structures to store the bits. So do all | 798 | * that's before we have page structures to store the bits. So do all |
797 | * the book-keeping now. | 799 | * the book-keeping now. |
798 | */ | 800 | */ |
799 | static __init int mark_pinned(struct page *page, enum pt_level level) | 801 | static __init int xen_mark_pinned(struct page *page, enum pt_level level) |
800 | { | 802 | { |
801 | SetPagePinned(page); | 803 | SetPagePinned(page); |
802 | return 0; | 804 | return 0; |
@@ -804,10 +806,10 @@ static __init int mark_pinned(struct page *page, enum pt_level level) | |||
804 | 806 | ||
805 | void __init xen_mark_init_mm_pinned(void) | 807 | void __init xen_mark_init_mm_pinned(void) |
806 | { | 808 | { |
807 | pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP); | 809 | xen_pgd_walk(init_mm.pgd, xen_mark_pinned, FIXADDR_TOP); |
808 | } | 810 | } |
809 | 811 | ||
810 | static int unpin_page(struct page *page, enum pt_level level) | 812 | static int xen_unpin_page(struct page *page, enum pt_level level) |
811 | { | 813 | { |
812 | unsigned pgfl = TestClearPagePinned(page); | 814 | unsigned pgfl = TestClearPagePinned(page); |
813 | 815 | ||
@@ -825,7 +827,7 @@ static int unpin_page(struct page *page, enum pt_level level) | |||
825 | * partially-pinned state. | 827 | * partially-pinned state. |
826 | */ | 828 | */ |
827 | if (level == PT_PTE) { | 829 | if (level == PT_PTE) { |
828 | ptl = lock_pte(page); | 830 | ptl = xen_pte_lock(page); |
829 | 831 | ||
830 | if (ptl) | 832 | if (ptl) |
831 | xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); | 833 | xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); |
@@ -839,7 +841,7 @@ static int unpin_page(struct page *page, enum pt_level level) | |||
839 | 841 | ||
840 | if (ptl) { | 842 | if (ptl) { |
841 | /* unlock when batch completed */ | 843 | /* unlock when batch completed */ |
842 | xen_mc_callback(do_unlock, ptl); | 844 | xen_mc_callback(xen_pte_unlock, ptl); |
843 | } | 845 | } |
844 | } | 846 | } |
845 | 847 | ||
@@ -859,17 +861,17 @@ static void xen_pgd_unpin(pgd_t *pgd) | |||
859 | 861 | ||
860 | if (user_pgd) { | 862 | if (user_pgd) { |
861 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd))); | 863 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd))); |
862 | unpin_page(virt_to_page(user_pgd), PT_PGD); | 864 | xen_unpin_page(virt_to_page(user_pgd), PT_PGD); |
863 | } | 865 | } |
864 | } | 866 | } |
865 | #endif | 867 | #endif |
866 | 868 | ||
867 | #ifdef CONFIG_X86_PAE | 869 | #ifdef CONFIG_X86_PAE |
868 | /* Need to make sure unshared kernel PMD is unpinned */ | 870 | /* Need to make sure unshared kernel PMD is unpinned */ |
869 | unpin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD); | 871 | xen_unpin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD); |
870 | #endif | 872 | #endif |
871 | 873 | ||
872 | pgd_walk(pgd, unpin_page, USER_LIMIT); | 874 | xen_pgd_walk(pgd, xen_unpin_page, USER_LIMIT); |
873 | 875 | ||
874 | xen_mc_issue(0); | 876 | xen_mc_issue(0); |
875 | } | 877 | } |
@@ -936,7 +938,7 @@ static void drop_other_mm_ref(void *info) | |||
936 | } | 938 | } |
937 | } | 939 | } |
938 | 940 | ||
939 | static void drop_mm_ref(struct mm_struct *mm) | 941 | static void xen_drop_mm_ref(struct mm_struct *mm) |
940 | { | 942 | { |
941 | cpumask_t mask; | 943 | cpumask_t mask; |
942 | unsigned cpu; | 944 | unsigned cpu; |
@@ -966,7 +968,7 @@ static void drop_mm_ref(struct mm_struct *mm) | |||
966 | smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); | 968 | smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); |
967 | } | 969 | } |
968 | #else | 970 | #else |
969 | static void drop_mm_ref(struct mm_struct *mm) | 971 | static void xen_drop_mm_ref(struct mm_struct *mm) |
970 | { | 972 | { |
971 | if (current->active_mm == mm) | 973 | if (current->active_mm == mm) |
972 | load_cr3(swapper_pg_dir); | 974 | load_cr3(swapper_pg_dir); |
@@ -990,13 +992,13 @@ static void drop_mm_ref(struct mm_struct *mm) | |||
990 | void xen_exit_mmap(struct mm_struct *mm) | 992 | void xen_exit_mmap(struct mm_struct *mm) |
991 | { | 993 | { |
992 | get_cpu(); /* make sure we don't move around */ | 994 | get_cpu(); /* make sure we don't move around */ |
993 | drop_mm_ref(mm); | 995 | xen_drop_mm_ref(mm); |
994 | put_cpu(); | 996 | put_cpu(); |
995 | 997 | ||
996 | spin_lock(&mm->page_table_lock); | 998 | spin_lock(&mm->page_table_lock); |
997 | 999 | ||
998 | /* pgd may not be pinned in the error exit path of execve */ | 1000 | /* pgd may not be pinned in the error exit path of execve */ |
999 | if (page_pinned(mm->pgd)) | 1001 | if (xen_page_pinned(mm->pgd)) |
1000 | xen_pgd_unpin(mm->pgd); | 1002 | xen_pgd_unpin(mm->pgd); |
1001 | 1003 | ||
1002 | spin_unlock(&mm->page_table_lock); | 1004 | spin_unlock(&mm->page_table_lock); |