aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2007-10-15 10:58:59 -0400
committerPaul Mackerras <paulus@samba.org>2007-10-17 08:30:09 -0400
commitf66bce5e6aa1388289c04496c3fcae7bebf5f905 (patch)
tree7e788739a51947f1caff47f9b5226cad739e3805 /arch/powerpc/mm
parent8129535b6bcf40be62af2ae6b9234494f39725dd (diff)
[POWERPC] Add 1TB workaround for PA6T
PA6T has a bug where the slbie instruction does not honor the large segment bit. As a result, we have to always use slbia when switching context. We don't have to worry about changing the slbie's during fault processing, since they should never be replacing one VSID with another using the same ESID. I.e. there's no risk for inserting duplicate entries due to a failed slbie of the old entry. So as long as we clear it out on context switch we should be fine. Signed-off-by: Olof Johansson <olof@lixom.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/slb.c3
2 files changed, 3 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 09da90b53850..c78dc912411f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -212,6 +212,7 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
212 return 1; 212 return 1;
213 } 213 }
214 } 214 }
215 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
215 return 0; 216 return 0;
216} 217}
217 218
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6c164cec9d2c..bbd2c512ee05 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -157,7 +157,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
157 unsigned long stack = KSTK_ESP(tsk); 157 unsigned long stack = KSTK_ESP(tsk);
158 unsigned long unmapped_base; 158 unsigned long unmapped_base;
159 159
160 if (offset <= SLB_CACHE_ENTRIES) { 160 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
161 offset <= SLB_CACHE_ENTRIES) {
161 int i; 162 int i;
162 asm volatile("isync" : : : "memory"); 163 asm volatile("isync" : : : "memory");
163 for (i = 0; i < offset; i++) { 164 for (i = 0; i < offset; i++) {