aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@suse.cz>2009-03-23 11:07:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-23 12:22:31 -0400
commit53da1d9456fe7f87a920a78fdbdcf1225d197cb7 (patch)
treeeccd5357ceff25a9a07be802ac0161c8c1842e64
parentb0dcb4a91ddb79f2e213205cf8d86b467f8559c7 (diff)
fix ptrace slowness
This patch fixes bug #12208: Bug-Entry : http://bugzilla.kernel.org/show_bug.cgi?id=12208 Subject : uml is very slow on 2.6.28 host This turned out to be not a scheduler regression, but an already existing problem in ptrace being triggered by subtle scheduler changes. The problem is this: - task A is ptracing task B - task B stops on a trace event - task A is woken up and preempts task B - task A calls ptrace on task B, which does ptrace_check_attach() - this calls wait_task_inactive(), which sees that task B is still on the runq - task A goes to sleep for a jiffy - ... Since UML does lots of the above sequences, those jiffies quickly add up to make it slow as hell. This patch solves this by not rescheduling in read_unlock() after ptrace_stop() has woken up the tracer. Thanks to Oleg Nesterov and Ingo Molnar for the feedback. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> CC: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/signal.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a74fe87c0dd..1c8814481a11 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1575,7 +1575,15 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1575 read_lock(&tasklist_lock); 1575 read_lock(&tasklist_lock);
1576 if (may_ptrace_stop()) { 1576 if (may_ptrace_stop()) {
1577 do_notify_parent_cldstop(current, CLD_TRAPPED); 1577 do_notify_parent_cldstop(current, CLD_TRAPPED);
1578 /*
1579 * Don't want to allow preemption here, because
1580 * sys_ptrace() needs this task to be inactive.
1581 *
1582 * XXX: implement read_unlock_no_resched().
1583 */
1584 preempt_disable();
1578 read_unlock(&tasklist_lock); 1585 read_unlock(&tasklist_lock);
1586 preempt_enable_no_resched();
1579 schedule(); 1587 schedule();
1580 } else { 1588 } else {
1581 /* 1589 /*