aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/page_64.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-06-18 01:29:12 -0400
committerPaul Mackerras <paulus@samba.org>2008-06-30 21:27:57 -0400
commit3a8247cc2c856930f34eafce33f6a039227ee175 (patch)
treeaa8599cdf09893f1150a2bc137878d8b8a661780 /include/asm-powerpc/page_64.h
parente952e6c4d6635b36c212c056a9427bd93460178c (diff)
powerpc: Only demote individual slices rather than whole process
At present, if we have a kernel with a 64kB page size, and some process maps something that has to be mapped with 4kB pages (such as a cache-inhibited mapping on POWER5+, or the eHCA infiniband queue-pair pages), we change the process to use 4kB pages everywhere. This hurts the performance of HPC programs that access eHCA from userspace. With this patch, the kernel will only demote the slice(s) containing the eHCA or cache-inhibited mappings, leaving the remaining slices able to use 64kB hardware pages. This also changes the slice_get_unmapped_area code so that it is willing to place a 64k-page mapping into (or across) a 4k-page slice if there is no better alternative, i.e. if the program specified MAP_FIXED or if there is not sufficient space available in slices that are either empty or already have 64k-page mappings in them. Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'include/asm-powerpc/page_64.h')
-rw-r--r--include/asm-powerpc/page_64.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 25af4fc8daf4..02fd80710e9d 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -126,16 +126,22 @@ extern unsigned int get_slice_psize(struct mm_struct *mm,
126 126
127extern void slice_init_context(struct mm_struct *mm, unsigned int psize); 127extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
128extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); 128extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
129extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
130 unsigned long len, unsigned int psize);
131
129#define slice_mm_new_context(mm) ((mm)->context.id == 0) 132#define slice_mm_new_context(mm) ((mm)->context.id == 0)
130 133
131#endif /* __ASSEMBLY__ */ 134#endif /* __ASSEMBLY__ */
132#else 135#else
133#define slice_init() 136#define slice_init()
137#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
134#define slice_set_user_psize(mm, psize) \ 138#define slice_set_user_psize(mm, psize) \
135do { \ 139do { \
136 (mm)->context.user_psize = (psize); \ 140 (mm)->context.user_psize = (psize); \
137 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ 141 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
138} while (0) 142} while (0)
143#define slice_set_range_psize(mm, start, len, psize) \
144 slice_set_user_psize((mm), (psize))
139#define slice_mm_new_context(mm) 1 145#define slice_mm_new_context(mm) 1
140#endif /* CONFIG_PPC_MM_SLICES */ 146#endif /* CONFIG_PPC_MM_SLICES */
141 147