aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@au1.ibm.com>2006-05-30 00:14:19 -0400
committerPaul Mackerras <paulus@samba.org>2006-06-09 07:20:59 -0400
commitc5cf0e30bf3d8cb56758abb612827647c0a821cf (patch)
tree54a471818e91204db11f14544df0bb38bd03058f /arch/powerpc
parenta5bba930d802009c259e56c8d53086d96f63813b (diff)
[PATCH] powerpc: Fix buglet with MMU hash management
Our MMU hash management code would not set the "C" bit (changed bit) in the hardware PTE when updating a RO PTE into a RW PTE. That would cause the hardware to possibly to a write back to the hash table to set it on the first store access, which in addition to being a performance issue, might also hit a bug when running with native hash management (non-HV) as our code is specifically optimized for the case where no write back happens. Thus there is a very small therocial window were a hash PTE can become corrupted if that HPTE has just been upgraded to read write, a store access happens on it, and that races with another processor evicting that same slot. Since eviction (caused by an almost full hash) is extremely rare, the bug is very unlikely to happen fortunately. This fixes by allowing the updating of the protection bits in the native hash handling to also set (but not clear) the "C" bit, and, in order to also improve performances in the general case, by always setting that bit on newly inserted hash PTE so that writeback really never happens. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hash_low_64.S3
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
2 files changed, 4 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index e0d02c4a2615..106fba391987 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -136,6 +136,7 @@ _GLOBAL(__hash_page_4K)
136 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 136 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
137 andc r0,r30,r0 /* r0 = pte & ~r0 */ 137 andc r0,r30,r0 /* r0 = pte & ~r0 */
138 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 138 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
139 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
139 140
140 /* We eventually do the icache sync here (maybe inline that 141 /* We eventually do the icache sync here (maybe inline that
141 * code rather than call a C function...) 142 * code rather than call a C function...)
@@ -400,6 +401,7 @@ _GLOBAL(__hash_page_4K)
400 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 401 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
401 andc r0,r30,r0 /* r0 = pte & ~r0 */ 402 andc r0,r30,r0 /* r0 = pte & ~r0 */
402 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 403 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
404 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
403 405
404 /* We eventually do the icache sync here (maybe inline that 406 /* We eventually do the icache sync here (maybe inline that
405 * code rather than call a C function...) 407 * code rather than call a C function...)
@@ -671,6 +673,7 @@ _GLOBAL(__hash_page_64K)
671 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ 673 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
672 andc r0,r30,r0 /* r0 = pte & ~r0 */ 674 andc r0,r30,r0 /* r0 = pte & ~r0 */
673 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ 675 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
676 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
674 677
675 /* We eventually do the icache sync here (maybe inline that 678 /* We eventually do the icache sync here (maybe inline that
676 * code rather than call a C function...) 679 * code rather than call a C function...)
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 33654d1b1b43..3b8205033f15 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -238,7 +238,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
238 DBG_LOW(" -> hit\n"); 238 DBG_LOW(" -> hit\n");
239 /* Update the HPTE */ 239 /* Update the HPTE */
240 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | 240 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
241 (newpp & (HPTE_R_PP | HPTE_R_N)); 241 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
242 native_unlock_hpte(hptep); 242 native_unlock_hpte(hptep);
243 } 243 }
244 244