diff options
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r-- | arch/powerpc/mm/slb.c | 83 |
1 files changed, 50 insertions, 33 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5b7038f248b6..1d98ecc8eecd 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
92 | : "memory" ); | 92 | : "memory" ); |
93 | } | 93 | } |
94 | 94 | ||
95 | void slb_flush_and_rebolt(void) | 95 | static void __slb_flush_and_rebolt(void) |
96 | { | 96 | { |
97 | /* If you change this make sure you change SLB_NUM_BOLTED | 97 | /* If you change this make sure you change SLB_NUM_BOLTED |
98 | * appropriately too. */ | 98 | * appropriately too. */ |
99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; | 99 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
100 | unsigned long ksp_esid_data, ksp_vsid_data; | 100 | unsigned long ksp_esid_data, ksp_vsid_data; |
101 | 101 | ||
102 | WARN_ON(!irqs_disabled()); | ||
103 | |||
104 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 102 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
105 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | 103 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
106 | lflags = SLB_VSID_KERNEL | linear_llp; | 104 | lflags = SLB_VSID_KERNEL | linear_llp; |
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void) | |||
117 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; | 115 | ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; |
118 | } | 116 | } |
119 | 117 | ||
120 | /* | ||
121 | * We can't take a PMU exception in the following code, so hard | ||
122 | * disable interrupts. | ||
123 | */ | ||
124 | hard_irq_disable(); | ||
125 | |||
126 | /* We need to do this all in asm, so we're sure we don't touch | 118 | /* We need to do this all in asm, so we're sure we don't touch |
127 | * the stack between the slbia and rebolting it. */ | 119 | * the stack between the slbia and rebolting it. */ |
128 | asm volatile("isync\n" | 120 | asm volatile("isync\n" |
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void) | |||
139 | : "memory"); | 131 | : "memory"); |
140 | } | 132 | } |
141 | 133 | ||
134 | void slb_flush_and_rebolt(void) | ||
135 | { | ||
136 | |||
137 | WARN_ON(!irqs_disabled()); | ||
138 | |||
139 | /* | ||
140 | * We can't take a PMU exception in the following code, so hard | ||
141 | * disable interrupts. | ||
142 | */ | ||
143 | hard_irq_disable(); | ||
144 | |||
145 | __slb_flush_and_rebolt(); | ||
146 | get_paca()->slb_cache_ptr = 0; | ||
147 | } | ||
148 | |||
142 | void slb_vmalloc_update(void) | 149 | void slb_vmalloc_update(void) |
143 | { | 150 | { |
144 | unsigned long vflags; | 151 | unsigned long vflags; |
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
180 | /* Flush all user entries from the segment table of the current processor. */ | 187 | /* Flush all user entries from the segment table of the current processor. */ |
181 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 188 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) |
182 | { | 189 | { |
183 | unsigned long offset = get_paca()->slb_cache_ptr; | 190 | unsigned long offset; |
184 | unsigned long slbie_data = 0; | 191 | unsigned long slbie_data = 0; |
185 | unsigned long pc = KSTK_EIP(tsk); | 192 | unsigned long pc = KSTK_EIP(tsk); |
186 | unsigned long stack = KSTK_ESP(tsk); | 193 | unsigned long stack = KSTK_ESP(tsk); |
187 | unsigned long unmapped_base; | 194 | unsigned long exec_base; |
188 | 195 | ||
196 | /* | ||
197 | * We need interrupts hard-disabled here, not just soft-disabled, | ||
198 | * so that a PMU interrupt can't occur, which might try to access | ||
199 | * user memory (to get a stack trace) and possible cause an SLB miss | ||
200 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | ||
201 | */ | ||
202 | hard_irq_disable(); | ||
203 | offset = get_paca()->slb_cache_ptr; | ||
189 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 204 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && |
190 | offset <= SLB_CACHE_ENTRIES) { | 205 | offset <= SLB_CACHE_ENTRIES) { |
191 | int i; | 206 | int i; |
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
200 | } | 215 | } |
201 | asm volatile("isync" : : : "memory"); | 216 | asm volatile("isync" : : : "memory"); |
202 | } else { | 217 | } else { |
203 | slb_flush_and_rebolt(); | 218 | __slb_flush_and_rebolt(); |
204 | } | 219 | } |
205 | 220 | ||
206 | /* Workaround POWER5 < DD2.1 issue */ | 221 | /* Workaround POWER5 < DD2.1 issue */ |
@@ -212,42 +227,44 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
212 | 227 | ||
213 | /* | 228 | /* |
214 | * preload some userspace segments into the SLB. | 229 | * preload some userspace segments into the SLB. |
230 | * Almost all 32 and 64bit PowerPC executables are linked at | ||
231 | * 0x10000000 so it makes sense to preload this segment. | ||
215 | */ | 232 | */ |
216 | if (test_tsk_thread_flag(tsk, TIF_32BIT)) | 233 | exec_base = 0x10000000; |
217 | unmapped_base = TASK_UNMAPPED_BASE_USER32; | ||
218 | else | ||
219 | unmapped_base = TASK_UNMAPPED_BASE_USER64; | ||
220 | 234 | ||
221 | if (is_kernel_addr(pc)) | 235 | if (is_kernel_addr(pc) || is_kernel_addr(stack) || |
222 | return; | 236 | is_kernel_addr(exec_base)) |
223 | slb_allocate(pc); | ||
224 | |||
225 | if (esids_match(pc,stack)) | ||
226 | return; | 237 | return; |
227 | 238 | ||
228 | if (is_kernel_addr(stack)) | 239 | slb_allocate(pc); |
229 | return; | ||
230 | slb_allocate(stack); | ||
231 | 240 | ||
232 | if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base)) | 241 | if (!esids_match(pc, stack)) |
233 | return; | 242 | slb_allocate(stack); |
234 | 243 | ||
235 | if (is_kernel_addr(unmapped_base)) | 244 | if (!esids_match(pc, exec_base) && |
236 | return; | 245 | !esids_match(stack, exec_base)) |
237 | slb_allocate(unmapped_base); | 246 | slb_allocate(exec_base); |
238 | } | 247 | } |
239 | 248 | ||
240 | static inline void patch_slb_encoding(unsigned int *insn_addr, | 249 | static inline void patch_slb_encoding(unsigned int *insn_addr, |
241 | unsigned int immed) | 250 | unsigned int immed) |
242 | { | 251 | { |
243 | /* Assume the instruction had a "0" immediate value, just | 252 | *insn_addr = (*insn_addr & 0xffff0000) | immed; |
244 | * "or" in the new value | ||
245 | */ | ||
246 | *insn_addr |= immed; | ||
247 | flush_icache_range((unsigned long)insn_addr, 4+ | 253 | flush_icache_range((unsigned long)insn_addr, 4+ |
248 | (unsigned long)insn_addr); | 254 | (unsigned long)insn_addr); |
249 | } | 255 | } |
250 | 256 | ||
257 | void slb_set_size(u16 size) | ||
258 | { | ||
259 | extern unsigned int *slb_compare_rr_to_size; | ||
260 | |||
261 | if (mmu_slb_size == size) | ||
262 | return; | ||
263 | |||
264 | mmu_slb_size = size; | ||
265 | patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); | ||
266 | } | ||
267 | |||
251 | void slb_initialize(void) | 268 | void slb_initialize(void) |
252 | { | 269 | { |
253 | unsigned long linear_llp, vmalloc_llp, io_llp; | 270 | unsigned long linear_llp, vmalloc_llp, io_llp; |