aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/slb.c3
3 files changed, 9 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0ec134034899..148a3547c9aa 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -408,6 +408,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
408 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 408 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
409 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 409 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
410 410
411 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
412 * we have 1TB segments, the only CPUs known to have the errata
413 * only support less than 1TB of system memory and we'll never
414 * actually hit this code path.
415 */
416
411 slbie r6 417 slbie r6
412 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 418 slbie r6 /* Workaround POWER5 < DD2.1 issue */
413 slbmte r7,r0 419 slbmte r7,r0
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 09da90b53850..c78dc912411f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -212,6 +212,7 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
212 return 1; 212 return 1;
213 } 213 }
214 } 214 }
215 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
215 return 0; 216 return 0;
216} 217}
217 218
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6c164cec9d2c..bbd2c512ee05 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -157,7 +157,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
157 unsigned long stack = KSTK_ESP(tsk); 157 unsigned long stack = KSTK_ESP(tsk);
158 unsigned long unmapped_base; 158 unsigned long unmapped_base;
159 159
160 if (offset <= SLB_CACHE_ENTRIES) { 160 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
161 offset <= SLB_CACHE_ENTRIES) {
161 int i; 162 int i;
162 asm volatile("isync" : : : "memory"); 163 asm volatile("isync" : : : "memory");
163 for (i = 0; i < offset; i++) { 164 for (i = 0; i < offset; i++) {