aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/misc_32.S
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-10-29 18:46:06 -0400
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2007-11-01 08:15:09 -0400
commite701d269aa28996f3502780951fe1b12d5d66b49 (patch)
treea55db7df5755bf9c69f466432786de7e7e445ba8 /arch/powerpc/kernel/misc_32.S
parent57d75561be5496289601b2c94787ec38c718fcae (diff)
[POWERPC] 4xx: Fix 4xx flush_tlb_page()
On 4xx CPUs, the current implementation of flush_tlb_page() uses a low level _tlbie() assembly function that only works for the current PID. Thus, invalidations caused by, for example, a COW fault triggered by get_user_pages() from a different context will not work properly, causing among other things, gdb breakpoints to fail. This patch adds a "pid" argument to _tlbie() on 4xx processors, and uses it to flush entries in the right context. FSL BookE also gets the argument but it seems they don't need it (their tlbivax form ignores the PID when invalidating according to the document I have). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch/powerpc/kernel/misc_32.S')
-rw-r--r--arch/powerpc/kernel/misc_32.S23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8533de50347d..0ed2c7eddc9e 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -288,7 +288,16 @@ _GLOBAL(_tlbia)
288 */ 288 */
289_GLOBAL(_tlbie) 289_GLOBAL(_tlbie)
290#if defined(CONFIG_40x) 290#if defined(CONFIG_40x)
291 /* We run the search with interrupts disabled because we have to change
292 * the PID and I don't want to preempt when that happens.
293 */
294 mfmsr r5
295 mfspr r6,SPRN_PID
296 wrteei 0
297 mtspr SPRN_PID,r4
291 tlbsx. r3, 0, r3 298 tlbsx. r3, 0, r3
299 mtspr SPRN_PID,r6
300 wrtee r5
292 bne 10f 301 bne 10f
293 sync 302 sync
294 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. 303 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -297,23 +306,23 @@ _GLOBAL(_tlbie)
297 tlbwe r3, r3, TLB_TAG 306 tlbwe r3, r3, TLB_TAG
298 isync 307 isync
29910: 30810:
309
300#elif defined(CONFIG_44x) 310#elif defined(CONFIG_44x)
301 mfspr r4,SPRN_MMUCR 311 mfspr r5,SPRN_MMUCR
302 mfspr r5,SPRN_PID /* Get PID */ 312 rlwimi r5,r4,0,24,31 /* Set TID */
303 rlwimi r4,r5,0,24,31 /* Set TID */
304 313
305 /* We have to run the search with interrupts disabled, even critical 314 /* We have to run the search with interrupts disabled, even critical
306 * and debug interrupts (in fact the only critical exceptions we have 315 * and debug interrupts (in fact the only critical exceptions we have
307 * are debug and machine check). Otherwise an interrupt which causes 316 * are debug and machine check). Otherwise an interrupt which causes
308 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ 317 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
309 mfmsr r5 318 mfmsr r4
310 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha 319 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
311 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l 320 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
312 andc r6,r5,r6 321 andc r6,r4,r6
313 mtmsr r6 322 mtmsr r6
314 mtspr SPRN_MMUCR,r4 323 mtspr SPRN_MMUCR,r5
315 tlbsx. r3, 0, r3 324 tlbsx. r3, 0, r3
316 mtmsr r5 325 mtmsr r4
317 bne 10f 326 bne 10f
318 sync 327 sync
319 /* There are only 64 TLB entries, so r3 < 64, 328 /* There are only 64 TLB entries, so r3 < 64,