aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2007-12-06 01:24:48 -0500
committerPaul Mackerras <paulus@samba.org>2007-12-10 21:45:56 -0500
commit584f8b71a2e8abdaeb4b6f4fddaf542b61392453 (patch)
treec14f26334e3a3524046f0790f96564a4a4f22d92 /arch/powerpc/mm
parent44ef339073f67d4abcc62ae52a5fbc069d7a4d29 (diff)
[POWERPC] Use SLB size from the device tree
Currently we hardwire the number of SLBs to 64, but PAPR says we should use the ibm,slb-size property to obtain the number of SLB entries. This uses this property instead of assuming 64. If no property is found, we assume 64 entries as before. This soft patches the SLB handler, so it shouldn't change performance at all. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/slb.c3
-rw-r--r--arch/powerpc/mm/slb_low.S5
3 files changed, 7 insertions, 2 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f09730bf3a33..cbbd8b0bc8f4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
96int mmu_io_psize = MMU_PAGE_4K; 96int mmu_io_psize = MMU_PAGE_4K;
97int mmu_kernel_ssize = MMU_SEGSIZE_256M; 97int mmu_kernel_ssize = MMU_SEGSIZE_256M;
98int mmu_highuser_ssize = MMU_SEGSIZE_256M; 98int mmu_highuser_ssize = MMU_SEGSIZE_256M;
99u16 mmu_slb_size = 64;
99#ifdef CONFIG_HUGETLB_PAGE 100#ifdef CONFIG_HUGETLB_PAGE
100int mmu_huge_psize = MMU_PAGE_16M; 101int mmu_huge_psize = MMU_PAGE_16M;
101unsigned int HPAGE_SHIFT; 102unsigned int HPAGE_SHIFT;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 27922dff8b94..3cf0802cd2b6 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -256,6 +256,7 @@ void slb_initialize(void)
256 static int slb_encoding_inited; 256 static int slb_encoding_inited;
257 extern unsigned int *slb_miss_kernel_load_linear; 257 extern unsigned int *slb_miss_kernel_load_linear;
258 extern unsigned int *slb_miss_kernel_load_io; 258 extern unsigned int *slb_miss_kernel_load_io;
259 extern unsigned int *slb_compare_rr_to_size;
259 260
260 /* Prepare our SLB miss handler based on our page size */ 261 /* Prepare our SLB miss handler based on our page size */
261 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 262 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -269,6 +270,8 @@ void slb_initialize(void)
269 SLB_VSID_KERNEL | linear_llp); 270 SLB_VSID_KERNEL | linear_llp);
270 patch_slb_encoding(slb_miss_kernel_load_io, 271 patch_slb_encoding(slb_miss_kernel_load_io,
271 SLB_VSID_KERNEL | io_llp); 272 SLB_VSID_KERNEL | io_llp);
273 patch_slb_encoding(slb_compare_rr_to_size,
274 mmu_slb_size);
272 275
273 DBG("SLB: linear LLP = %04x\n", linear_llp); 276 DBG("SLB: linear LLP = %04x\n", linear_llp);
274 DBG("SLB: io LLP = %04x\n", io_llp); 277 DBG("SLB: io LLP = %04x\n", io_llp);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1328a81a84aa..657f6b37e9df 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
227 227
2287: ld r10,PACASTABRR(r13) 2287: ld r10,PACASTABRR(r13)
229 addi r10,r10,1 229 addi r10,r10,1
230 /* use a cpu feature mask if we ever change our slb size */ 230 /* This gets soft patched on boot. */
231 cmpldi r10,SLB_NUM_ENTRIES 231_GLOBAL(slb_compare_rr_to_size)
232 cmpldi r10,0
232 233
233 blt+ 4f 234 blt+ 4f
234 li r10,SLB_NUM_BOLTED 235 li r10,SLB_NUM_BOLTED