diff options
Diffstat (limited to 'arch/ppc/kernel/misc.S')
| -rw-r--r-- | arch/ppc/kernel/misc.S | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index a22e1f4d94c8..e0c850d85c53 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S | |||
| @@ -224,7 +224,16 @@ _GLOBAL(_tlbia) | |||
| 224 | */ | 224 | */ |
| 225 | _GLOBAL(_tlbie) | 225 | _GLOBAL(_tlbie) |
| 226 | #if defined(CONFIG_40x) | 226 | #if defined(CONFIG_40x) |
| 227 | /* We run the search with interrupts disabled because we have to change | ||
| 228 | * the PID and I don't want to preempt when that happens. | ||
| 229 | */ | ||
| 230 | mfmsr r5 | ||
| 231 | mfspr r6,SPRN_PID | ||
| 232 | wrteei 0 | ||
| 233 | mtspr SPRN_PID,r4 | ||
| 227 | tlbsx. r3, 0, r3 | 234 | tlbsx. r3, 0, r3 |
| 235 | mtspr SPRN_PID,r6 | ||
| 236 | wrtee r5 | ||
| 228 | bne 10f | 237 | bne 10f |
| 229 | sync | 238 | sync |
| 230 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. | 239 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. |
| @@ -234,22 +243,21 @@ _GLOBAL(_tlbie) | |||
| 234 | isync | 243 | isync |
| 235 | 10: | 244 | 10: |
| 236 | #elif defined(CONFIG_44x) | 245 | #elif defined(CONFIG_44x) |
| 237 | mfspr r4,SPRN_MMUCR | 246 | mfspr r5,SPRN_MMUCR |
| 238 | mfspr r5,SPRN_PID /* Get PID */ | 247 | rlwimi r5,r4,0,24,31 /* Set TID */ |
| 239 | rlwimi r4,r5,0,24,31 /* Set TID */ | ||
| 240 | 248 | ||
| 241 | /* We have to run the search with interrupts disabled, even critical | 249 | /* We have to run the search with interrupts disabled, even critical |
| 242 | * and debug interrupts (in fact the only critical exceptions we have | 250 | * and debug interrupts (in fact the only critical exceptions we have |
| 243 | * are debug and machine check). Otherwise an interrupt which causes | 251 | * are debug and machine check). Otherwise an interrupt which causes |
| 244 | * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ | 252 | * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ |
| 245 | mfmsr r5 | 253 | mfmsr r4 |
| 246 | lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha | 254 | lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha |
| 247 | addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | 255 | addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l |
| 248 | andc r6,r5,r6 | 256 | andc r6,r4,r6 |
| 249 | mtmsr r6 | 257 | mtmsr r6 |
| 250 | mtspr SPRN_MMUCR,r4 | 258 | mtspr SPRN_MMUCR,r5 |
| 251 | tlbsx. r3, 0, r3 | 259 | tlbsx. r3, 0, r3 |
| 252 | mtmsr r5 | 260 | mtmsr r4 |
| 253 | bne 10f | 261 | bne 10f |
| 254 | sync | 262 | sync |
| 255 | /* There are only 64 TLB entries, so r3 < 64, | 263 | /* There are only 64 TLB entries, so r3 < 64, |
| @@ -491,12 +499,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | |||
| 491 | addi r3,r3,L1_CACHE_BYTES | 499 | addi r3,r3,L1_CACHE_BYTES |
| 492 | bdnz 0b | 500 | bdnz 0b |
| 493 | sync | 501 | sync |
| 502 | #ifndef CONFIG_44x | ||
| 503 | /* We don't flush the icache on 44x. Those have a virtual icache | ||
| 504 | * and we don't have access to the virtual address here (it's | ||
| 505 | * not the page vaddr but where it's mapped in user space). The | ||
| 506 | * flushing of the icache on these is handled elsewhere, when | ||
| 507 | * a change in the address space occurs, before returning to | ||
| 508 | * user space | ||
| 509 | */ | ||
| 494 | mtctr r4 | 510 | mtctr r4 |
| 495 | 1: icbi 0,r6 | 511 | 1: icbi 0,r6 |
| 496 | addi r6,r6,L1_CACHE_BYTES | 512 | addi r6,r6,L1_CACHE_BYTES |
| 497 | bdnz 1b | 513 | bdnz 1b |
| 498 | sync | 514 | sync |
| 499 | isync | 515 | isync |
| 516 | #endif /* CONFIG_44x */ | ||
| 500 | blr | 517 | blr |
| 501 | 518 | ||
| 502 | /* | 519 | /* |
