aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/kernel/smp.c40
-rw-r--r--arch/sparc64/mm/init.c9
-rw-r--r--include/asm-sparc64/mmu.h1
-rw-r--r--include/asm-sparc64/mmu_context.h25
4 files changed, 50 insertions, 25 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 0cd9b16612e7..1ce940811492 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -885,26 +885,44 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
885 put_cpu(); 885 put_cpu();
886} 886}
887 887
888static void __smp_receive_signal_mask(cpumask_t mask)
889{
890 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
891}
892
888void smp_receive_signal(int cpu) 893void smp_receive_signal(int cpu)
889{ 894{
890 cpumask_t mask = cpumask_of_cpu(cpu); 895 cpumask_t mask = cpumask_of_cpu(cpu);
891 896
892 if (cpu_online(cpu)) { 897 if (cpu_online(cpu))
893 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); 898 __smp_receive_signal_mask(mask);
894
895 if (tlb_type == spitfire)
896 spitfire_xcall_deliver(data0, 0, 0, mask);
897 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
898 cheetah_xcall_deliver(data0, 0, 0, mask);
899 else if (tlb_type == hypervisor)
900 hypervisor_xcall_deliver(data0, 0, 0, mask);
901 }
902} 899}
903 900
904void smp_receive_signal_client(int irq, struct pt_regs *regs) 901void smp_receive_signal_client(int irq, struct pt_regs *regs)
905{ 902{
906 /* Just return, rtrap takes care of the rest. */ 903 struct mm_struct *mm;
904
907 clear_softint(1 << irq); 905 clear_softint(1 << irq);
906
907 /* See if we need to allocate a new TLB context because
908 * the version of the one we are using is now out of date.
909 */
910 mm = current->active_mm;
911 if (likely(mm)) {
912 if (unlikely(!CTX_VALID(mm->context))) {
913 unsigned long flags;
914
915 spin_lock_irqsave(&mm->context.lock, flags);
916 get_new_mmu_context(mm);
917 load_secondary_context(mm);
918 spin_unlock_irqrestore(&mm->context.lock, flags);
919 }
920 }
921}
922
923void smp_new_mmu_context_version(void)
924{
925 __smp_receive_signal_mask(cpu_online_map);
908} 926}
909 927
910void smp_report_regs(void) 928void smp_report_regs(void)
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 16f0db38d932..ccf083aecb65 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -629,17 +629,20 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
629 * let the user have CTX 0 (nucleus) or we ever use a CTX 629 * let the user have CTX 0 (nucleus) or we ever use a CTX
630 * version of zero (and thus NO_CONTEXT would not be caught 630 * version of zero (and thus NO_CONTEXT would not be caught
631 * by version mis-match tests in mmu_context.h). 631 * by version mis-match tests in mmu_context.h).
632 *
633 * Always invoked with interrupts disabled.
632 */ 634 */
633void get_new_mmu_context(struct mm_struct *mm) 635void get_new_mmu_context(struct mm_struct *mm)
634{ 636{
635 unsigned long ctx, new_ctx; 637 unsigned long ctx, new_ctx;
636 unsigned long orig_pgsz_bits; 638 unsigned long orig_pgsz_bits;
637 639 int new_version;
638 640
639 spin_lock(&ctx_alloc_lock); 641 spin_lock(&ctx_alloc_lock);
640 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 642 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
641 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 643 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
642 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 644 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
645 new_version = 0;
643 if (new_ctx >= (1 << CTX_NR_BITS)) { 646 if (new_ctx >= (1 << CTX_NR_BITS)) {
644 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 647 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
645 if (new_ctx >= ctx) { 648 if (new_ctx >= ctx) {
@@ -662,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm)
662 mmu_context_bmap[i + 2] = 0; 665 mmu_context_bmap[i + 2] = 0;
663 mmu_context_bmap[i + 3] = 0; 666 mmu_context_bmap[i + 3] = 0;
664 } 667 }
668 new_version = 1;
665 goto out; 669 goto out;
666 } 670 }
667 } 671 }
@@ -671,6 +675,9 @@ out:
671 tlb_context_cache = new_ctx; 675 tlb_context_cache = new_ctx;
672 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 676 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
673 spin_unlock(&ctx_alloc_lock); 677 spin_unlock(&ctx_alloc_lock);
678
679 if (unlikely(new_version))
680 smp_new_mmu_context_version();
674} 681}
675 682
676void sparc_ultra_dump_itlb(void) 683void sparc_ultra_dump_itlb(void)
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 473d990848ee..1504d303a1d5 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -102,6 +102,7 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte
102extern void tsb_flush(unsigned long ent, unsigned long tag); 102extern void tsb_flush(unsigned long ent, unsigned long tag);
103 103
104typedef struct { 104typedef struct {
105 spinlock_t lock;
105 unsigned long sparc64_ctx_val; 106 unsigned long sparc64_ctx_val;
106 struct tsb *tsb; 107 struct tsb *tsb;
107 unsigned long tsb_rss_limit; 108 unsigned long tsb_rss_limit;
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index eb660b1609c4..4be40c58e3c1 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -19,6 +19,12 @@ extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[]; 19extern unsigned long mmu_context_bmap[];
20 20
21extern void get_new_mmu_context(struct mm_struct *mm); 21extern void get_new_mmu_context(struct mm_struct *mm);
22#ifdef CONFIG_SMP
23extern void smp_new_mmu_context_version(void);
24#else
25#define smp_new_mmu_context_version() do { } while (0)
26#endif
27
22extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 28extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23extern void destroy_context(struct mm_struct *mm); 29extern void destroy_context(struct mm_struct *mm);
24 30
@@ -58,21 +64,17 @@ extern void smp_tsb_sync(struct mm_struct *mm);
58 64
59extern void __flush_tlb_mm(unsigned long, unsigned long); 65extern void __flush_tlb_mm(unsigned long, unsigned long);
60 66
61/* Switch the current MM context. */ 67/* Switch the current MM context. Interrupts are disabled. */
62static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 68static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
63{ 69{
64 unsigned long ctx_valid; 70 unsigned long ctx_valid;
65 int cpu; 71 int cpu;
66 72
67 /* Note: page_table_lock is used here to serialize switch_mm 73 spin_lock(&mm->context.lock);
68 * and activate_mm, and their calls to get_new_mmu_context.
69 * This use of page_table_lock is unrelated to its other uses.
70 */
71 spin_lock(&mm->page_table_lock);
72 ctx_valid = CTX_VALID(mm->context); 74 ctx_valid = CTX_VALID(mm->context);
73 if (!ctx_valid) 75 if (!ctx_valid)
74 get_new_mmu_context(mm); 76 get_new_mmu_context(mm);
75 spin_unlock(&mm->page_table_lock); 77 spin_unlock(&mm->context.lock);
76 78
77 if (!ctx_valid || (old_mm != mm)) { 79 if (!ctx_valid || (old_mm != mm)) {
78 load_secondary_context(mm); 80 load_secondary_context(mm);
@@ -98,19 +100,16 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
98/* Activate a new MM instance for the current task. */ 100/* Activate a new MM instance for the current task. */
99static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) 101static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
100{ 102{
103 unsigned long flags;
101 int cpu; 104 int cpu;
102 105
103 /* Note: page_table_lock is used here to serialize switch_mm 106 spin_lock_irqsave(&mm->context.lock, flags);
104 * and activate_mm, and their calls to get_new_mmu_context.
105 * This use of page_table_lock is unrelated to its other uses.
106 */
107 spin_lock(&mm->page_table_lock);
108 if (!CTX_VALID(mm->context)) 107 if (!CTX_VALID(mm->context))
109 get_new_mmu_context(mm); 108 get_new_mmu_context(mm);
110 cpu = smp_processor_id(); 109 cpu = smp_processor_id();
111 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 110 if (!cpu_isset(cpu, mm->cpu_vm_mask))
112 cpu_set(cpu, mm->cpu_vm_mask); 111 cpu_set(cpu, mm->cpu_vm_mask);
113 spin_unlock(&mm->page_table_lock); 112 spin_unlock_irqrestore(&mm->context.lock, flags);
114 113
115 load_secondary_context(mm); 114 load_secondary_context(mm);
116 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 115 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);