aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c46
1 files changed, 24 insertions, 22 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a685652effeb..1d98ecc8eecd 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -191,7 +191,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
191 unsigned long slbie_data = 0; 191 unsigned long slbie_data = 0;
192 unsigned long pc = KSTK_EIP(tsk); 192 unsigned long pc = KSTK_EIP(tsk);
193 unsigned long stack = KSTK_ESP(tsk); 193 unsigned long stack = KSTK_ESP(tsk);
194 unsigned long unmapped_base; 194 unsigned long exec_base;
195 195
196 /* 196 /*
197 * We need interrupts hard-disabled here, not just soft-disabled, 197 * We need interrupts hard-disabled here, not just soft-disabled,
@@ -227,42 +227,44 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
227 227
228 /* 228 /*
229 * preload some userspace segments into the SLB. 229 * preload some userspace segments into the SLB.
230 * Almost all 32 and 64bit PowerPC executables are linked at
231 * 0x10000000 so it makes sense to preload this segment.
230 */ 232 */
231 if (test_tsk_thread_flag(tsk, TIF_32BIT)) 233 exec_base = 0x10000000;
232 unmapped_base = TASK_UNMAPPED_BASE_USER32;
233 else
234 unmapped_base = TASK_UNMAPPED_BASE_USER64;
235 234
236 if (is_kernel_addr(pc)) 235 if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
237 return; 236 is_kernel_addr(exec_base))
238 slb_allocate(pc);
239
240 if (esids_match(pc,stack))
241 return; 237 return;
242 238
243 if (is_kernel_addr(stack)) 239 slb_allocate(pc);
244 return;
245 slb_allocate(stack);
246 240
247 if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base)) 241 if (!esids_match(pc, stack))
248 return; 242 slb_allocate(stack);
249 243
250 if (is_kernel_addr(unmapped_base)) 244 if (!esids_match(pc, exec_base) &&
251 return; 245 !esids_match(stack, exec_base))
252 slb_allocate(unmapped_base); 246 slb_allocate(exec_base);
253} 247}
254 248
255static inline void patch_slb_encoding(unsigned int *insn_addr, 249static inline void patch_slb_encoding(unsigned int *insn_addr,
256 unsigned int immed) 250 unsigned int immed)
257{ 251{
258 /* Assume the instruction had a "0" immediate value, just 252 *insn_addr = (*insn_addr & 0xffff0000) | immed;
259 * "or" in the new value
260 */
261 *insn_addr |= immed;
262 flush_icache_range((unsigned long)insn_addr, 4+ 253 flush_icache_range((unsigned long)insn_addr, 4+
263 (unsigned long)insn_addr); 254 (unsigned long)insn_addr);
264} 255}
265 256
257void slb_set_size(u16 size)
258{
259 extern unsigned int *slb_compare_rr_to_size;
260
261 if (mmu_slb_size == size)
262 return;
263
264 mmu_slb_size = size;
265 patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
266}
267
266void slb_initialize(void) 268void slb_initialize(void)
267{ 269{
268 unsigned long linear_llp, vmalloc_llp, io_llp; 270 unsigned long linear_llp, vmalloc_llp, io_llp;