aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2005-05-01 11:58:45 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-01 11:58:45 -0400
commitd03853d566fb32c6bb8cab4bf2ecf53e692f001c (patch)
tree6e1865c6e19598978ab50d0c5b72e396a1bec608 /arch/ppc64/mm
parent66faf9845a05905d75da380767e93455f3e6d620 (diff)
[PATCH] PPC64: Remove hot busy-wait loop in __hash_page
It turns out that our current __hash_page code will do a very hot busy-wait loop waiting on _PAGE_BUSY to be cleared. It even does ldarx/stdcx in the loop, which will bounce reservations around like crazy if there's more than one CPU spinning on the same PTE (or even another PTE in the same reservation granule). The end result is that each fault takes longer when there's contention, which in turn increases the chance of another thread hitting the same fault and also piling up. Not pretty. There's two options here: 1. Do an out-of-line busy loop a'la spinlocks with just loads (no reserves) 2. Just bail and refault if needed. (2) makes sense here: If the PTE is busy, chances are it's in flux anyway and the other code path making a change might just be ready to hash it. This fixes a stampede seen on a large-ish system where a multithreaded HPC app faults in the same text pages on several cpus at the same time. Signed-off-by: Olof Johansson <olof@lixom.net> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/hash_low.S9
1 files changed, 8 insertions, 1 deletions
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index 8c0156a37001..c23d46956dd9 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -85,7 +85,10 @@ _GLOBAL(__hash_page)
85 bne- htab_wrong_access 85 bne- htab_wrong_access
86 /* Check if PTE is busy */ 86 /* Check if PTE is busy */
87 andi. r0,r31,_PAGE_BUSY 87 andi. r0,r31,_PAGE_BUSY
88 bne- 1b 88 /* If so, just bail out and refault if needed. Someone else
89 * is changing this PTE anyway and might hash it.
90 */
91 bne- bail_ok
89 /* Prepare new PTE value (turn access RW into DIRTY, then 92 /* Prepare new PTE value (turn access RW into DIRTY, then
90 * add BUSY,HASHPTE and ACCESSED) 93 * add BUSY,HASHPTE and ACCESSED)
91 */ 94 */
@@ -215,6 +218,10 @@ _GLOBAL(htab_call_hpte_remove)
215 /* Try all again */ 218 /* Try all again */
216 b htab_insert_pte 219 b htab_insert_pte
217 220
221bail_ok:
222 li r3,0
223 b bail
224
218htab_pte_insert_ok: 225htab_pte_insert_ok:
219 /* Insert slot number & secondary bit in PTE */ 226 /* Insert slot number & secondary bit in PTE */
220 rldimi r30,r3,12,63-15 227 rldimi r30,r3,12,63-15