aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-05-19 12:56:42 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-09 02:42:21 -0400
commit3035c8634f0538a0c6946e3191bb6c9284b63798 (patch)
tree68f81301d2ced6492957c8320a631ad96518bd53 /arch
parentec097c84dff17511f2693e6ef6c3064dfbf0a3af (diff)
powerpc/mm: Fix some SMP issues with MMU context handling
This patch fixes a couple of issues that can happen as a result of steal_context() dropping the context_lock when all possible PIDs are ineligible for stealing (hopefully an extremely hard to hit occurence). This case exposes the possibility of a stale context_mm[] entry to be seen since destroy_context() doesn't clear it and the free map isn't re-tested. It also means steal_context() will not notice a context freed while the lock was help, thus possibly trying to steal a context when a free one was available. This fixes it by always returning to the caller from steal_context when it dropped the lock with a return value that causes the caller to re-samble the number of free contexts, along with properly clearing the context_mm[] array for destroyed contexts. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 030d0005b4d2..c42858780cbd 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -73,7 +73,6 @@ static unsigned int steal_context_smp(unsigned int id)
73 struct mm_struct *mm; 73 struct mm_struct *mm;
74 unsigned int cpu, max; 74 unsigned int cpu, max;
75 75
76 again:
77 max = last_context - first_context; 76 max = last_context - first_context;
78 77
79 /* Attempt to free next_context first and then loop until we manage */ 78 /* Attempt to free next_context first and then loop until we manage */
@@ -108,7 +107,9 @@ static unsigned int steal_context_smp(unsigned int id)
108 spin_unlock(&context_lock); 107 spin_unlock(&context_lock);
109 cpu_relax(); 108 cpu_relax();
110 spin_lock(&context_lock); 109 spin_lock(&context_lock);
111 goto again; 110
111 /* This will cause the caller to try again */
112 return MMU_NO_CONTEXT;
112} 113}
113#endif /* CONFIG_SMP */ 114#endif /* CONFIG_SMP */
114 115
@@ -194,6 +195,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
194 WARN_ON(prev->context.active < 1); 195 WARN_ON(prev->context.active < 1);
195 prev->context.active--; 196 prev->context.active--;
196 } 197 }
198
199 again:
197#endif /* CONFIG_SMP */ 200#endif /* CONFIG_SMP */
198 201
199 /* If we already have a valid assigned context, skip all that */ 202 /* If we already have a valid assigned context, skip all that */
@@ -212,7 +215,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
212#ifdef CONFIG_SMP 215#ifdef CONFIG_SMP
213 if (num_online_cpus() > 1) { 216 if (num_online_cpus() > 1) {
214 id = steal_context_smp(id); 217 id = steal_context_smp(id);
215 goto stolen; 218 if (id == MMU_NO_CONTEXT)
219 goto again;
216 } 220 }
217#endif /* CONFIG_SMP */ 221#endif /* CONFIG_SMP */
218 id = steal_context_up(id); 222 id = steal_context_up(id);
@@ -286,8 +290,8 @@ void destroy_context(struct mm_struct *mm)
286 mm->context.id = MMU_NO_CONTEXT; 290 mm->context.id = MMU_NO_CONTEXT;
287#ifdef DEBUG_MAP_CONSISTENCY 291#ifdef DEBUG_MAP_CONSISTENCY
288 mm->context.active = 0; 292 mm->context.active = 0;
289 context_mm[id] = NULL;
290#endif 293#endif
294 context_mm[id] = NULL;
291 nr_free_contexts++; 295 nr_free_contexts++;
292 } 296 }
293 spin_unlock(&context_lock); 297 spin_unlock(&context_lock);