diff options
author | Dimitri Sivanich <sivanich@sgi.com> | 2010-12-28 14:34:42 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2010-12-28 17:06:21 -0500 |
commit | 75c1c91cb92806f960fcd6e53d2a0c21f343081c (patch) | |
tree | f87fd2f7cf75b9d2b1180452120f8dd852d2c9b6 | |
parent | 90a8a73c06cc32b609a880d48449d7083327e11a (diff) |
[IA64] eliminate race condition in smp_flush_tlb_mm
A race condition exists within smp_call_function_many() when called from
smp_flush_tlb_mm(). On rare occasions the cpu_vm_mask can be cleared
while smp_call_function_many is executing, occasionally resulting in a
hung process.
Make a copy of the mask prior to calling smp_call_function_many().
Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | arch/ia64/kernel/smp.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index dabeefe21134..be450a3e9871 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -293,6 +293,7 @@ smp_flush_tlb_all (void) | |||
293 | void | 293 | void |
294 | smp_flush_tlb_mm (struct mm_struct *mm) | 294 | smp_flush_tlb_mm (struct mm_struct *mm) |
295 | { | 295 | { |
296 | cpumask_var_t cpus; | ||
296 | preempt_disable(); | 297 | preempt_disable(); |
297 | /* this happens for the common case of a single-threaded fork(): */ | 298 | /* this happens for the common case of a single-threaded fork(): */ |
298 | if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) | 299 | if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) |
@@ -301,9 +302,15 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
301 | preempt_enable(); | 302 | preempt_enable(); |
302 | return; | 303 | return; |
303 | } | 304 | } |
304 | 305 | if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { | |
305 | smp_call_function_many(mm_cpumask(mm), | 306 | smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, |
306 | (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); | 307 | mm, 1); |
308 | } else { | ||
309 | cpumask_copy(cpus, mm_cpumask(mm)); | ||
310 | smp_call_function_many(cpus, | ||
311 | (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); | ||
312 | free_cpumask_var(cpus); | ||
313 | } | ||
307 | local_irq_disable(); | 314 | local_irq_disable(); |
308 | local_finish_flush_tlb_mm(mm); | 315 | local_finish_flush_tlb_mm(mm); |
309 | local_irq_enable(); | 316 | local_irq_enable(); |