aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kthread.c
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2008-07-25 22:45:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:09 -0400
commit85ba2d862e521375a8ee01526c5c46b1f24bb4af (patch)
treef098555cb580b2864f145aa845b574f07d2cf694 /kernel/kthread.c
parent1f5a4ad97a0e7b663d527ecc02aabe203d000d91 (diff)
tracehook: wait_task_inactive
This extends wait_task_inactive() with a new argument so it can be used in a "soft" mode where it will check for the task changing state unexpectedly and back off. There is no change to existing callers. This lays the groundwork to allow robust, noninvasive tracing that can try to sample a blocked thread but back off safely if it wakes up. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Reviewed-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r--kernel/kthread.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 6111c27491b1..96cff2f8710b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
176 return; 176 return;
177 } 177 }
178 /* Must have done schedule() in kthread() before we set_task_cpu */ 178 /* Must have done schedule() in kthread() before we set_task_cpu */
179 wait_task_inactive(k); 179 wait_task_inactive(k, 0);
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;