diff options
Diffstat (limited to 'arch/powerpc/kernel/misc_32.S')
-rw-r--r-- | arch/powerpc/kernel/misc_32.S | 32 |
1 files changed, 25 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 8533de50347d..8b642ab26d37 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -288,7 +288,16 @@ _GLOBAL(_tlbia) | |||
288 | */ | 288 | */ |
289 | _GLOBAL(_tlbie) | 289 | _GLOBAL(_tlbie) |
290 | #if defined(CONFIG_40x) | 290 | #if defined(CONFIG_40x) |
291 | /* We run the search with interrupts disabled because we have to change | ||
292 | * the PID and I don't want to preempt when that happens. | ||
293 | */ | ||
294 | mfmsr r5 | ||
295 | mfspr r6,SPRN_PID | ||
296 | wrteei 0 | ||
297 | mtspr SPRN_PID,r4 | ||
291 | tlbsx. r3, 0, r3 | 298 | tlbsx. r3, 0, r3 |
299 | mtspr SPRN_PID,r6 | ||
300 | wrtee r5 | ||
292 | bne 10f | 301 | bne 10f |
293 | sync | 302 | sync |
294 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. | 303 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. |
@@ -297,23 +306,23 @@ _GLOBAL(_tlbie) | |||
297 | tlbwe r3, r3, TLB_TAG | 306 | tlbwe r3, r3, TLB_TAG |
298 | isync | 307 | isync |
299 | 10: | 308 | 10: |
309 | |||
300 | #elif defined(CONFIG_44x) | 310 | #elif defined(CONFIG_44x) |
301 | mfspr r4,SPRN_MMUCR | 311 | mfspr r5,SPRN_MMUCR |
302 | mfspr r5,SPRN_PID /* Get PID */ | 312 | rlwimi r5,r4,0,24,31 /* Set TID */ |
303 | rlwimi r4,r5,0,24,31 /* Set TID */ | ||
304 | 313 | ||
305 | /* We have to run the search with interrupts disabled, even critical | 314 | /* We have to run the search with interrupts disabled, even critical |
306 | * and debug interrupts (in fact the only critical exceptions we have | 315 | * and debug interrupts (in fact the only critical exceptions we have |
307 | * are debug and machine check). Otherwise an interrupt which causes | 316 | * are debug and machine check). Otherwise an interrupt which causes |
308 | * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ | 317 | * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ |
309 | mfmsr r5 | 318 | mfmsr r4 |
310 | lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha | 319 | lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha |
311 | addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | 320 | addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l |
312 | andc r6,r5,r6 | 321 | andc r6,r4,r6 |
313 | mtmsr r6 | 322 | mtmsr r6 |
314 | mtspr SPRN_MMUCR,r4 | 323 | mtspr SPRN_MMUCR,r5 |
315 | tlbsx. r3, 0, r3 | 324 | tlbsx. r3, 0, r3 |
316 | mtmsr r5 | 325 | mtmsr r4 |
317 | bne 10f | 326 | bne 10f |
318 | sync | 327 | sync |
319 | /* There are only 64 TLB entries, so r3 < 64, | 328 | /* There are only 64 TLB entries, so r3 < 64, |
@@ -534,12 +543,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | |||
534 | addi r3,r3,L1_CACHE_BYTES | 543 | addi r3,r3,L1_CACHE_BYTES |
535 | bdnz 0b | 544 | bdnz 0b |
536 | sync | 545 | sync |
546 | #ifndef CONFIG_44x | ||
547 | /* We don't flush the icache on 44x. Those have a virtual icache | ||
548 | * and we don't have access to the virtual address here (it's | ||
549 | * not the page vaddr but where it's mapped in user space). The | ||
550 | * flushing of the icache on these is handled elsewhere, when | ||
551 | * a change in the address space occurs, before returning to | ||
552 | * user space | ||
553 | */ | ||
537 | mtctr r4 | 554 | mtctr r4 |
538 | 1: icbi 0,r6 | 555 | 1: icbi 0,r6 |
539 | addi r6,r6,L1_CACHE_BYTES | 556 | addi r6,r6,L1_CACHE_BYTES |
540 | bdnz 1b | 557 | bdnz 1b |
541 | sync | 558 | sync |
542 | isync | 559 | isync |
560 | #endif /* CONFIG_44x */ | ||
543 | blr | 561 | blr |
544 | 562 | ||
545 | /* | 563 | /* |