diff options
author | Paul Mackerras <paulus@samba.org> | 2008-06-18 01:29:12 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-06-30 21:27:57 -0400 |
commit | 3a8247cc2c856930f34eafce33f6a039227ee175 (patch) | |
tree | aa8599cdf09893f1150a2bc137878d8b8a661780 /arch/powerpc/mm/hash_utils_64.c | |
parent | e952e6c4d6635b36c212c056a9427bd93460178c (diff) |
powerpc: Only demote individual slices rather than whole process
At present, if we have a kernel with a 64kB page size, and some
process maps something that has to be mapped with 4kB pages (such as a
cache-inhibited mapping on POWER5+, or the eHCA infiniband queue-pair
pages), we change the process to use 4kB pages everywhere. This hurts
the performance of HPC programs that access eHCA from userspace.
With this patch, the kernel will only demote the slice(s) containing
the eHCA or cache-inhibited mappings, leaving the remaining slices
able to use 64kB hardware pages.
This also changes the slice_get_unmapped_area code so that it is
willing to place a 64k-page mapping into (or across) a 4k-page slice
if there is no better alternative, i.e. if the program specified
MAP_FIXED or if there is not sufficient space available in slices that
are either empty or already have 64k-page mappings in them.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 36 |
1 files changed, 27 insertions, 9 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index bf5b6d7ed30f..8d3b58ebd38e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -695,6 +695,28 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) | |||
695 | return pp; | 695 | return pp; |
696 | } | 696 | } |
697 | 697 | ||
698 | #ifdef CONFIG_PPC_MM_SLICES | ||
699 | unsigned int get_paca_psize(unsigned long addr) | ||
700 | { | ||
701 | unsigned long index, slices; | ||
702 | |||
703 | if (addr < SLICE_LOW_TOP) { | ||
704 | slices = get_paca()->context.low_slices_psize; | ||
705 | index = GET_LOW_SLICE_INDEX(addr); | ||
706 | } else { | ||
707 | slices = get_paca()->context.high_slices_psize; | ||
708 | index = GET_HIGH_SLICE_INDEX(addr); | ||
709 | } | ||
710 | return (slices >> (index * 4)) & 0xF; | ||
711 | } | ||
712 | |||
713 | #else | ||
714 | unsigned int get_paca_psize(unsigned long addr) | ||
715 | { | ||
716 | return get_paca()->context.user_psize; | ||
717 | } | ||
718 | #endif | ||
719 | |||
698 | /* | 720 | /* |
699 | * Demote a segment to using 4k pages. | 721 | * Demote a segment to using 4k pages. |
700 | * For now this makes the whole process use 4k pages. | 722 | * For now this makes the whole process use 4k pages. |
@@ -702,13 +724,13 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) | |||
702 | #ifdef CONFIG_PPC_64K_PAGES | 724 | #ifdef CONFIG_PPC_64K_PAGES |
703 | void demote_segment_4k(struct mm_struct *mm, unsigned long addr) | 725 | void demote_segment_4k(struct mm_struct *mm, unsigned long addr) |
704 | { | 726 | { |
705 | if (mm->context.user_psize == MMU_PAGE_4K) | 727 | if (get_slice_psize(mm, addr) == MMU_PAGE_4K) |
706 | return; | 728 | return; |
707 | slice_set_user_psize(mm, MMU_PAGE_4K); | 729 | slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); |
708 | #ifdef CONFIG_SPU_BASE | 730 | #ifdef CONFIG_SPU_BASE |
709 | spu_flush_all_slbs(mm); | 731 | spu_flush_all_slbs(mm); |
710 | #endif | 732 | #endif |
711 | if (get_paca()->context.user_psize != MMU_PAGE_4K) { | 733 | if (get_paca_psize(addr) != MMU_PAGE_4K) { |
712 | get_paca()->context = mm->context; | 734 | get_paca()->context = mm->context; |
713 | slb_flush_and_rebolt(); | 735 | slb_flush_and_rebolt(); |
714 | } | 736 | } |
@@ -792,11 +814,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
792 | DBG_LOW(" user region with no mm !\n"); | 814 | DBG_LOW(" user region with no mm !\n"); |
793 | return 1; | 815 | return 1; |
794 | } | 816 | } |
795 | #ifdef CONFIG_PPC_MM_SLICES | ||
796 | psize = get_slice_psize(mm, ea); | 817 | psize = get_slice_psize(mm, ea); |
797 | #else | ||
798 | psize = mm->context.user_psize; | ||
799 | #endif | ||
800 | ssize = user_segment_size(ea); | 818 | ssize = user_segment_size(ea); |
801 | vsid = get_vsid(mm->context.id, ea, ssize); | 819 | vsid = get_vsid(mm->context.id, ea, ssize); |
802 | break; | 820 | break; |
@@ -868,7 +886,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
868 | /* Do actual hashing */ | 886 | /* Do actual hashing */ |
869 | #ifdef CONFIG_PPC_64K_PAGES | 887 | #ifdef CONFIG_PPC_64K_PAGES |
870 | /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ | 888 | /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ |
871 | if (pte_val(*ptep) & _PAGE_4K_PFN) { | 889 | if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) { |
872 | demote_segment_4k(mm, ea); | 890 | demote_segment_4k(mm, ea); |
873 | psize = MMU_PAGE_4K; | 891 | psize = MMU_PAGE_4K; |
874 | } | 892 | } |
@@ -897,7 +915,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
897 | } | 915 | } |
898 | } | 916 | } |
899 | if (user_region) { | 917 | if (user_region) { |
900 | if (psize != get_paca()->context.user_psize) { | 918 | if (psize != get_paca_psize(ea)) { |
901 | get_paca()->context = mm->context; | 919 | get_paca()->context = mm->context; |
902 | slb_flush_and_rebolt(); | 920 | slb_flush_and_rebolt(); |
903 | } | 921 | } |