aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu_context.h
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-11-07 17:09:01 -0500
committerDavid S. Miller <davem@davemloft.net>2005-11-07 17:09:01 -0500
commitdedeb0029b9c83420fc1337d4ee53daa7b2a0ad4 (patch)
treed87e66e1d6240cd412c20ecbc12f5b810c9807e4 /include/asm-sparc64/mmu_context.h
parentb8ae48656db860d4c83a29aa7b0588fc89361935 (diff)
[SPARC64] mm: context switch ptlock
sparc64 is unique among architectures in taking the page_table_lock in its context switch (well, cris does too, but erroneously, and it's not yet SMP anyway). This seems to be a private affair between switch_mm and activate_mm, using page_table_lock as a per-mm lock, without any relation to its uses elsewhere. That's fine, but comment it as such; and unlock sooner in switch_mm, more like in activate_mm (preemption is disabled here). There is a block of "if (0)"ed code in smp_flush_tlb_pending which would have liked to rely on the page_table_lock, in switch_mm and elsewhere; but its comment explains how dup_mmap's flush_tlb_mm defeated it. And though that could have been changed at any time over the past few years, now the chance vanishes as we push the page_table_lock downwards, and perhaps split it per page table page. Just delete that block of code. Which leaves the mysterious spin_unlock_wait(&oldmm->page_table_lock) in kernel/fork.c copy_mm. Textual analysis (supported by Nick Piggin) suggests that the comment was written by DaveM, and that it relates to the defeated approach in the sparc64 smp_flush_tlb_pending. Just delete this block too. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/mmu_context.h')
-rw-r--r--include/asm-sparc64/mmu_context.h46
1 files changed, 24 insertions, 22 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 87c43c67866e..08ba72d7722c 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -87,37 +87,35 @@ extern void __flush_tlb_mm(unsigned long, unsigned long);
87static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 87static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
88{ 88{
89 unsigned long ctx_valid; 89 unsigned long ctx_valid;
90 int cpu;
90 91
92 /* Note: page_table_lock is used here to serialize switch_mm
93 * and activate_mm, and their calls to get_new_mmu_context.
94 * This use of page_table_lock is unrelated to its other uses.
95 */
91 spin_lock(&mm->page_table_lock); 96 spin_lock(&mm->page_table_lock);
92 if (CTX_VALID(mm->context)) 97 ctx_valid = CTX_VALID(mm->context);
93 ctx_valid = 1; 98 if (!ctx_valid)
94 else 99 get_new_mmu_context(mm);
95 ctx_valid = 0; 100 spin_unlock(&mm->page_table_lock);
96 101
97 if (!ctx_valid || (old_mm != mm)) { 102 if (!ctx_valid || (old_mm != mm)) {
98 if (!ctx_valid)
99 get_new_mmu_context(mm);
100
101 load_secondary_context(mm); 103 load_secondary_context(mm);
102 reload_tlbmiss_state(tsk, mm); 104 reload_tlbmiss_state(tsk, mm);
103 } 105 }
104 106
105 { 107 /* Even if (mm == old_mm) we _must_ check
106 int cpu = smp_processor_id(); 108 * the cpu_vm_mask. If we do not we could
107 109 * corrupt the TLB state because of how
108 /* Even if (mm == old_mm) we _must_ check 110 * smp_flush_tlb_{page,range,mm} on sparc64
109 * the cpu_vm_mask. If we do not we could 111 * and lazy tlb switches work. -DaveM
110 * corrupt the TLB state because of how 112 */
111 * smp_flush_tlb_{page,range,mm} on sparc64 113 cpu = smp_processor_id();
112 * and lazy tlb switches work. -DaveM 114 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
113 */ 115 cpu_set(cpu, mm->cpu_vm_mask);
114 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { 116 __flush_tlb_mm(CTX_HWBITS(mm->context),
115 cpu_set(cpu, mm->cpu_vm_mask); 117 SECONDARY_CONTEXT);
116 __flush_tlb_mm(CTX_HWBITS(mm->context),
117 SECONDARY_CONTEXT);
118 }
119 } 118 }
120 spin_unlock(&mm->page_table_lock);
121} 119}
122 120
123#define deactivate_mm(tsk,mm) do { } while (0) 121#define deactivate_mm(tsk,mm) do { } while (0)
@@ -127,6 +125,10 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
127{ 125{
128 int cpu; 126 int cpu;
129 127
128 /* Note: page_table_lock is used here to serialize switch_mm
129 * and activate_mm, and their calls to get_new_mmu_context.
130 * This use of page_table_lock is unrelated to its other uses.
131 */
130 spin_lock(&mm->page_table_lock); 132 spin_lock(&mm->page_table_lock);
131 if (!CTX_VALID(mm->context)) 133 if (!CTX_VALID(mm->context))
132 get_new_mmu_context(mm); 134 get_new_mmu_context(mm);