aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-02-23 17:19:28 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:14:00 -0500
commita0663a79ad4faebe1db4a56e2e767b120b12333a (patch)
tree612a53e387a6aea6116f8a1637050fa13c6d9f80 /arch/sparc64/mm
parent074d82cf688fe2dfa7ba4a2317c56f62d13fb522 (diff)
[SPARC64]: Fix TLB context allocation with SMT style shared TLBs.
The context allocation scheme we use depends upon there being a 1<-->1 mapping from cpu to physical TLB for correctness. Chips like Niagara break this assumption. So what we do is notify all cpus with a cross call when the context version number changes, and if necessary this makes them allocate a valid context for the address space they are running at the time. Stress tested with make -j1024, make -j2048, and make -j4096 kernel builds on a 32-strand, 8 core, T2000 with 16GB of ram. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/init.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 16f0db38d932..ccf083aecb65 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -629,17 +629,20 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
629 * let the user have CTX 0 (nucleus) or we ever use a CTX 629 * let the user have CTX 0 (nucleus) or we ever use a CTX
630 * version of zero (and thus NO_CONTEXT would not be caught 630 * version of zero (and thus NO_CONTEXT would not be caught
631 * by version mis-match tests in mmu_context.h). 631 * by version mis-match tests in mmu_context.h).
632 *
633 * Always invoked with interrupts disabled.
632 */ 634 */
633void get_new_mmu_context(struct mm_struct *mm) 635void get_new_mmu_context(struct mm_struct *mm)
634{ 636{
635 unsigned long ctx, new_ctx; 637 unsigned long ctx, new_ctx;
636 unsigned long orig_pgsz_bits; 638 unsigned long orig_pgsz_bits;
637 639 int new_version;
638 640
639 spin_lock(&ctx_alloc_lock); 641 spin_lock(&ctx_alloc_lock);
640 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 642 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
641 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 643 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
642 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 644 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
645 new_version = 0;
643 if (new_ctx >= (1 << CTX_NR_BITS)) { 646 if (new_ctx >= (1 << CTX_NR_BITS)) {
644 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 647 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
645 if (new_ctx >= ctx) { 648 if (new_ctx >= ctx) {
@@ -662,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm)
662 mmu_context_bmap[i + 2] = 0; 665 mmu_context_bmap[i + 2] = 0;
663 mmu_context_bmap[i + 3] = 0; 666 mmu_context_bmap[i + 3] = 0;
664 } 667 }
668 new_version = 1;
665 goto out; 669 goto out;
666 } 670 }
667 } 671 }
@@ -671,6 +675,9 @@ out:
671 tlb_context_cache = new_ctx; 675 tlb_context_cache = new_ctx;
672 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 676 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
673 spin_unlock(&ctx_alloc_lock); 677 spin_unlock(&ctx_alloc_lock);
678
679 if (unlikely(new_version))
680 smp_new_mmu_context_version();
674} 681}
675 682
676void sparc_ultra_dump_itlb(void) 683void sparc_ultra_dump_itlb(void)