aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAnton Vorontsov <anton.vorontsov@linaro.org>2012-05-31 19:26:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-31 20:49:29 -0400
commit73863ab028579ed98c4f1f36d016536b1b415344 (patch)
tree8ad66593e4b2ebd7f32acdb2e53dc0bb8dd201f5 /arch
parent3eaa73bde2fb475b731a0fde7dd11c3ecfb8679c (diff)
powerpc: use clear_tasks_mm_cpumask()
Current CPU hotplug code has some task->mm handling issues: 1. Working with task->mm w/o getting mm or grabing the task lock is dangerous as ->mm might disappear (exit_mm() assigns NULL under task_lock(), so tasklist lock is not enough). We can't use get_task_mm()/mmput() pair as mmput() might sleep, so we must take the task lock while handle its mm. 2. Checking for process->mm is not enough because process' main thread may exit or detach its mm via use_mm(), but other threads may still have a valid mm. To fix this we would need to use find_lock_task_mm(), which would walk up all threads and returns an appropriate task (with task lock held). clear_tasks_mm_cpumask() has all the issues fixed, so let's use it. Suggested-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5b63bd3da4a9..e779642c25e5 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -333,9 +333,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
333 unsigned long action, void *hcpu) 333 unsigned long action, void *hcpu)
334{ 334{
335 unsigned int cpu = (unsigned int)(long)hcpu; 335 unsigned int cpu = (unsigned int)(long)hcpu;
336#ifdef CONFIG_HOTPLUG_CPU 336
337 struct task_struct *p;
338#endif
339 /* We don't touch CPU 0 map, it's allocated at aboot and kept 337 /* We don't touch CPU 0 map, it's allocated at aboot and kept
340 * around forever 338 * around forever
341 */ 339 */
@@ -358,12 +356,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
358 stale_map[cpu] = NULL; 356 stale_map[cpu] = NULL;
359 357
360 /* We also clear the cpu_vm_mask bits of CPUs going away */ 358 /* We also clear the cpu_vm_mask bits of CPUs going away */
361 read_lock(&tasklist_lock); 359 clear_tasks_mm_cpumask(cpu);
362 for_each_process(p) {
363 if (p->mm)
364 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
365 }
366 read_unlock(&tasklist_lock);
367 break; 360 break;
368#endif /* CONFIG_HOTPLUG_CPU */ 361#endif /* CONFIG_HOTPLUG_CPU */
369 } 362 }