aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/prom.c15
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/slb.c3
-rw-r--r--arch/powerpc/mm/slb_low.S5
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c3
-rw-r--r--arch/powerpc/xmon/xmon.c2
6 files changed, 25 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index acc0d247d3c3..6c2d8836f77d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
583 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 583 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
584} 584}
585 585
586#ifdef CONFIG_PPC64
587static void __init check_cpu_slb_size(unsigned long node)
588{
589 u32 *slb_size_ptr;
590
591 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
592 if (slb_size_ptr != NULL) {
593 mmu_slb_size = *slb_size_ptr;
594 }
595}
596#else
597#define check_cpu_slb_size(node) do { } while(0)
598#endif
599
586static struct feature_property { 600static struct feature_property {
587 const char *name; 601 const char *name;
588 u32 min_value; 602 u32 min_value;
@@ -713,6 +727,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
713 727
714 check_cpu_feature_properties(node); 728 check_cpu_feature_properties(node);
715 check_cpu_pa_features(node); 729 check_cpu_pa_features(node);
730 check_cpu_slb_size(node);
716 731
717#ifdef CONFIG_PPC_PSERIES 732#ifdef CONFIG_PPC_PSERIES
718 if (nthreads > 1) 733 if (nthreads > 1)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f09730bf3a33..cbbd8b0bc8f4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
96int mmu_io_psize = MMU_PAGE_4K; 96int mmu_io_psize = MMU_PAGE_4K;
97int mmu_kernel_ssize = MMU_SEGSIZE_256M; 97int mmu_kernel_ssize = MMU_SEGSIZE_256M;
98int mmu_highuser_ssize = MMU_SEGSIZE_256M; 98int mmu_highuser_ssize = MMU_SEGSIZE_256M;
99u16 mmu_slb_size = 64;
99#ifdef CONFIG_HUGETLB_PAGE 100#ifdef CONFIG_HUGETLB_PAGE
100int mmu_huge_psize = MMU_PAGE_16M; 101int mmu_huge_psize = MMU_PAGE_16M;
101unsigned int HPAGE_SHIFT; 102unsigned int HPAGE_SHIFT;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 27922dff8b94..3cf0802cd2b6 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -256,6 +256,7 @@ void slb_initialize(void)
256 static int slb_encoding_inited; 256 static int slb_encoding_inited;
257 extern unsigned int *slb_miss_kernel_load_linear; 257 extern unsigned int *slb_miss_kernel_load_linear;
258 extern unsigned int *slb_miss_kernel_load_io; 258 extern unsigned int *slb_miss_kernel_load_io;
259 extern unsigned int *slb_compare_rr_to_size;
259 260
260 /* Prepare our SLB miss handler based on our page size */ 261 /* Prepare our SLB miss handler based on our page size */
261 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 262 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -269,6 +270,8 @@ void slb_initialize(void)
269 SLB_VSID_KERNEL | linear_llp); 270 SLB_VSID_KERNEL | linear_llp);
270 patch_slb_encoding(slb_miss_kernel_load_io, 271 patch_slb_encoding(slb_miss_kernel_load_io,
271 SLB_VSID_KERNEL | io_llp); 272 SLB_VSID_KERNEL | io_llp);
273 patch_slb_encoding(slb_compare_rr_to_size,
274 mmu_slb_size);
272 275
273 DBG("SLB: linear LLP = %04x\n", linear_llp); 276 DBG("SLB: linear LLP = %04x\n", linear_llp);
274 DBG("SLB: io LLP = %04x\n", io_llp); 277 DBG("SLB: io LLP = %04x\n", io_llp);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1328a81a84aa..657f6b37e9df 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
227 227
2287: ld r10,PACASTABRR(r13) 2287: ld r10,PACASTABRR(r13)
229 addi r10,r10,1 229 addi r10,r10,1
230 /* use a cpu feature mask if we ever change our slb size */ 230 /* This gets soft patched on boot. */
231 cmpldi r10,SLB_NUM_ENTRIES 231_GLOBAL(slb_compare_rr_to_size)
232 cmpldi r10,0
232 233
233 blt+ 4f 234 blt+ 4f
234 li r10,SLB_NUM_BOLTED 235 li r10,SLB_NUM_BOLTED
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 5748194a667f..6d7d068ceba0 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -36,6 +36,7 @@
36#include <asm/mpic.h> 36#include <asm/mpic.h>
37#include <asm/smp.h> 37#include <asm/smp.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/mmu.h>
39 40
40#include <pcmcia/ss.h> 41#include <pcmcia/ss.h>
41#include <pcmcia/cistpl.h> 42#include <pcmcia/cistpl.h>
@@ -302,7 +303,7 @@ static int pas_machine_check_handler(struct pt_regs *regs)
302 int i; 303 int i;
303 304
304 printk(KERN_ERR "slb contents:\n"); 305 printk(KERN_ERR "slb contents:\n");
305 for (i = 0; i < SLB_NUM_ENTRIES; i++) { 306 for (i = 0; i < mmu_slb_size; i++) {
306 asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i)); 307 asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
307 asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i)); 308 asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
308 printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v); 309 printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 381d467cf55b..c60d123e9f1f 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2543,7 +2543,7 @@ static void dump_slb(void)
2543 2543
2544 printf("SLB contents of cpu %x\n", smp_processor_id()); 2544 printf("SLB contents of cpu %x\n", smp_processor_id());
2545 2545
2546 for (i = 0; i < SLB_NUM_ENTRIES; i++) { 2546 for (i = 0; i < mmu_slb_size; i++) {
2547 asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i)); 2547 asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
2548 printf("%02d %016lx ", i, tmp); 2548 printf("%02d %016lx ", i, tmp);
2549 2549