aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64/mmu_context.h')
-rw-r--r--include/asm-sparc64/mmu_context.h50
1 files changed, 38 insertions, 12 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index ca36ea96f64b..e7974321d052 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -42,7 +42,7 @@ static inline void tsb_context_switch(struct mm_struct *mm)
42 __pa(&mm->context.tsb_descr)); 42 __pa(&mm->context.tsb_descr));
43} 43}
44 44
45extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags); 45extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss);
46#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
47extern void smp_tsb_sync(struct mm_struct *mm); 47extern void smp_tsb_sync(struct mm_struct *mm);
48#else 48#else
@@ -74,18 +74,43 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
74 ctx_valid = CTX_VALID(mm->context); 74 ctx_valid = CTX_VALID(mm->context);
75 if (!ctx_valid) 75 if (!ctx_valid)
76 get_new_mmu_context(mm); 76 get_new_mmu_context(mm);
77 spin_unlock_irqrestore(&mm->context.lock, flags);
78 77
79 if (!ctx_valid || (old_mm != mm)) { 78 /* We have to be extremely careful here or else we will miss
80 load_secondary_context(mm); 79 * a TSB grow if we switch back and forth between a kernel
81 tsb_context_switch(mm); 80 * thread and an address space which has it's TSB size increased
82 } 81 * on another processor.
82 *
83 * It is possible to play some games in order to optimize the
84 * switch, but the safest thing to do is to unconditionally
85 * perform the secondary context load and the TSB context switch.
86 *
87 * For reference the bad case is, for address space "A":
88 *
89 * CPU 0 CPU 1
90 * run address space A
91 * set cpu0's bits in cpu_vm_mask
92 * switch to kernel thread, borrow
93 * address space A via entry_lazy_tlb
94 * run address space A
95 * set cpu1's bit in cpu_vm_mask
96 * flush_tlb_pending()
97 * reset cpu_vm_mask to just cpu1
98 * TSB grow
99 * run address space A
100 * context was valid, so skip
101 * TSB context switch
102 *
103 * At that point cpu0 continues to use a stale TSB, the one from
104 * before the TSB grow performed on cpu1. cpu1 did not cross-call
105 * cpu0 to update it's TSB because at that point the cpu_vm_mask
106 * only had cpu1 set in it.
107 */
108 load_secondary_context(mm);
109 tsb_context_switch(mm);
83 110
84 /* Even if (mm == old_mm) we _must_ check 111 /* Any time a processor runs a context on an address space
85 * the cpu_vm_mask. If we do not we could 112 * for the first time, we must flush that context out of the
86 * corrupt the TLB state because of how 113 * local TLB.
87 * smp_flush_tlb_{page,range,mm} on sparc64
88 * and lazy tlb switches work. -DaveM
89 */ 114 */
90 cpu = smp_processor_id(); 115 cpu = smp_processor_id();
91 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { 116 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
@@ -93,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
93 __flush_tlb_mm(CTX_HWBITS(mm->context), 118 __flush_tlb_mm(CTX_HWBITS(mm->context),
94 SECONDARY_CONTEXT); 119 SECONDARY_CONTEXT);
95 } 120 }
121 spin_unlock_irqrestore(&mm->context.lock, flags);
96} 122}
97 123
98#define deactivate_mm(tsk,mm) do { } while (0) 124#define deactivate_mm(tsk,mm) do { } while (0)
@@ -109,11 +135,11 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
109 cpu = smp_processor_id(); 135 cpu = smp_processor_id();
110 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 136 if (!cpu_isset(cpu, mm->cpu_vm_mask))
111 cpu_set(cpu, mm->cpu_vm_mask); 137 cpu_set(cpu, mm->cpu_vm_mask);
112 spin_unlock_irqrestore(&mm->context.lock, flags);
113 138
114 load_secondary_context(mm); 139 load_secondary_context(mm);
115 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 140 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
116 tsb_context_switch(mm); 141 tsb_context_switch(mm);
142 spin_unlock_irqrestore(&mm->context.lock, flags);
117} 143}
118 144
119#endif /* !(__ASSEMBLY__) */ 145#endif /* !(__ASSEMBLY__) */