aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/tsb.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-03-16 05:02:32 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:16:33 -0500
commit7a1ac5264108fc3ed22d17a3cdd76212ed1666d1 (patch)
tree75378a1b470afa54900f1f15a5b41966d301520d /arch/sparc64/kernel/tsb.S
parenta858f1ca726edc5eb7ed39722f7966d005f1c9ca (diff)
[SPARC64]: Fix and re-enable dynamic TSB sizing.
This is good for up to %50 performance improvement of some test cases. The problem has been the race conditions, and hopefully I've plugged them all up here. 1) There was a serious race in switch_mm() wrt. lazy TLB switching to and from kernel threads. We could erroneously skip a tsb_context_switch() and thus use a stale TSB across a TSB grow event. There is a big comment now in that function describing exactly how it can happen. 2) All code paths that do something with the TSB need to be guarded with the mm->context.lock spinlock. This makes page table flushing paths properly synchronize with both TSB growing and TLB context changes. 3) TSB growing events are moved to the end of successful fault processing. Previously it was in update_mmu_cache() but that is deadlock prone. At the end of do_sparc64_fault() we hold no spinlocks that could deadlock the TSB grow sequence. We also have dropped the address space semaphore. While we're here, add prefetching to the copy_tsb() routine and put it in assembler into the tsb.S file. This piece of code is quite time critical. There are some small negative side effects to this code which can be improved upon. In particular we grab the mm->context.lock even for the tsb insert done by update_mmu_cache() now and that's a bit excessive. We can get rid of that locking, and the same lock taking in flush_tsb_user(), by disabling PSTATE_IE around the whole operation including the capturing of the tsb pointer and tsb_nentries value. That would work because anyone growing the TSB won't free up the old TSB until all cpus respond to the TSB change cross call. I'm not quite so confident in that optimization to put it in right now, but eventually we might be able to and the description is here for reference. This code seems very solid now. It passes several parallel GCC bootstrap builds, and our favorite "nut cruncher" stress test which is a full "make -j8192" build of a "make allmodconfig" kernel. That puts about 256 processes on each cpu's run queue, makes lots of process cpu migrations occur, causes lots of page table and TLB flushing activity, incurs many context version number changes, and it swaps the machine real far out to disk even though there is 16GB of ram on this test system. :-) Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/tsb.S')
-rw-r--r--arch/sparc64/kernel/tsb.S71
1 files changed, 70 insertions, 1 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index d738910153f6..1b154c863628 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -34,8 +34,9 @@ tsb_miss_itlb:
34 ldxa [%g4] ASI_IMMU, %g4 34 ldxa [%g4] ASI_IMMU, %g4
35 35
36 /* At this point we have: 36 /* At this point we have:
37 * %g4 -- missing virtual address
38 * %g1 -- TSB entry address 37 * %g1 -- TSB entry address
38 * %g3 -- FAULT_CODE_{D,I}TLB
39 * %g4 -- missing virtual address
39 * %g6 -- TAG TARGET (vaddr >> 22) 40 * %g6 -- TAG TARGET (vaddr >> 22)
40 */ 41 */
41tsb_miss_page_table_walk: 42tsb_miss_page_table_walk:
@@ -45,6 +46,12 @@ tsb_miss_page_table_walk:
45tsb_miss_page_table_walk_sun4v_fastpath: 46tsb_miss_page_table_walk_sun4v_fastpath:
46 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) 47 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
47 48
49 /* At this point we have:
50 * %g1 -- TSB entry address
51 * %g3 -- FAULT_CODE_{D,I}TLB
52 * %g5 -- physical address of PTE in Linux page tables
53 * %g6 -- TAG TARGET (vaddr >> 22)
54 */
48tsb_reload: 55tsb_reload:
49 TSB_LOCK_TAG(%g1, %g2, %g7) 56 TSB_LOCK_TAG(%g1, %g2, %g7)
50 57
@@ -199,6 +206,7 @@ __tsb_insert:
199 wrpr %o5, %pstate 206 wrpr %o5, %pstate
200 retl 207 retl
201 nop 208 nop
209 .size __tsb_insert, .-__tsb_insert
202 210
203 /* Flush the given TSB entry if it has the matching 211 /* Flush the given TSB entry if it has the matching
204 * tag. 212 * tag.
@@ -208,6 +216,7 @@ __tsb_insert:
208 */ 216 */
209 .align 32 217 .align 32
210 .globl tsb_flush 218 .globl tsb_flush
219 .type tsb_flush,#function
211tsb_flush: 220tsb_flush:
212 sethi %hi(TSB_TAG_LOCK_HIGH), %g2 221 sethi %hi(TSB_TAG_LOCK_HIGH), %g2
2131: TSB_LOAD_TAG(%o0, %g1) 2221: TSB_LOAD_TAG(%o0, %g1)
@@ -225,6 +234,7 @@ tsb_flush:
225 nop 234 nop
2262: retl 2352: retl
227 TSB_MEMBAR 236 TSB_MEMBAR
237 .size tsb_flush, .-tsb_flush
228 238
229 /* Reload MMU related context switch state at 239 /* Reload MMU related context switch state at
230 * schedule() time. 240 * schedule() time.
@@ -241,6 +251,7 @@ tsb_flush:
241 */ 251 */
242 .align 32 252 .align 32
243 .globl __tsb_context_switch 253 .globl __tsb_context_switch
254 .type __tsb_context_switch,#function
244__tsb_context_switch: 255__tsb_context_switch:
245 rdpr %pstate, %o5 256 rdpr %pstate, %o5
246 wrpr %o5, PSTATE_IE, %pstate 257 wrpr %o5, PSTATE_IE, %pstate
@@ -302,3 +313,61 @@ __tsb_context_switch:
302 313
303 retl 314 retl
304 nop 315 nop
316 .size __tsb_context_switch, .-__tsb_context_switch
317
318#define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \
319 (1 << TSB_TAG_INVALID_BIT))
320
321 .align 32
322 .globl copy_tsb
323 .type copy_tsb,#function
324copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
325 * %o2=new_tsb_base, %o3=new_tsb_size
326 */
327 sethi %uhi(TSB_PASS_BITS), %g7
328 srlx %o3, 4, %o3
329 add %o0, %o1, %g1 /* end of old tsb */
330 sllx %g7, 32, %g7
331 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
332
333661: prefetcha [%o0] ASI_N, #one_read
334 .section .tsb_phys_patch, "ax"
335 .word 661b
336 prefetcha [%o0] ASI_PHYS_USE_EC, #one_read
337 .previous
338
33990: andcc %o0, (64 - 1), %g0
340 bne 1f
341 add %o0, 64, %o5
342
343661: prefetcha [%o5] ASI_N, #one_read
344 .section .tsb_phys_patch, "ax"
345 .word 661b
346 prefetcha [%o5] ASI_PHYS_USE_EC, #one_read
347 .previous
348
3491: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */
350 andcc %g2, %g7, %g0 /* LOCK or INVALID set? */
351 bne,pn %xcc, 80f /* Skip it */
352 sllx %g2, 22, %o4 /* TAG --> VADDR */
353
354 /* This can definitely be computed faster... */
355 srlx %o0, 4, %o5 /* Build index */
356 and %o5, 511, %o5 /* Mask index */
357 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
358 or %o4, %o5, %o4 /* Full VADDR. */
359 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
360 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
361 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
362 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
363 add %o4, 0x8, %o4 /* Advance to TTE */
364 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
365
36680: add %o0, 16, %o0
367 cmp %o0, %g1
368 bne,pt %xcc, 90b
369 nop
370
371 retl
372 TSB_MEMBAR
373 .size copy_tsb, .-copy_tsb