aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2009-09-23 18:56:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-24 10:21:00 -0400
commitae6d2ed7bb3877ff35b9569402025f40ea2e1803 (patch)
tree80527061ab7615cd890236b777b2be6e909a1573
parentb6fe2d117e98805ee76352e6468f87d494a97292 (diff)
signals: tracehook_notify_jctl change
This changes tracehook_notify_jctl() so it's called with the siglock held, and changes its argument and return value definition. These clean-ups make it a better fit for what new tracing hooks need to check. Tracing needs the siglock here, held from the time TASK_STOPPED was set, to avoid potential SIGCONT races if it wants to allow any blocking in its tracing hooks. This also folds the finish_stop() function into its caller do_signal_stop(). The function is short, called only once and only unconditionally. It aids readability to fold it in. [oleg@redhat.com: do not call tracehook_notify_jctl() in TASK_STOPPED state] [oleg@redhat.com: introduce tracehook_finish_jctl() helper] Signed-off-by: Roland McGrath <roland@redhat.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/tracehook.h34
-rw-r--r--kernel/signal.c97
2 files changed, 72 insertions, 59 deletions
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 17ba82efa483..1eb44a924e56 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Tracing hooks 2 * Tracing hooks
3 * 3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
5 * 5 *
6 * This copyrighted material is made available to anyone wishing to use, 6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions 7 * modify, copy, or redistribute it subject to the terms and conditions
@@ -463,22 +463,38 @@ static inline int tracehook_get_signal(struct task_struct *task,
463 463
464/** 464/**
465 * tracehook_notify_jctl - report about job control stop/continue 465 * tracehook_notify_jctl - report about job control stop/continue
466 * @notify: nonzero if this is the last thread in the group to stop 466 * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED
467 * @why: %CLD_STOPPED or %CLD_CONTINUED 467 * @why: %CLD_STOPPED or %CLD_CONTINUED
468 * 468 *
469 * This is called when we might call do_notify_parent_cldstop(). 469 * This is called when we might call do_notify_parent_cldstop().
470 * It's called when about to stop for job control; we are already in
471 * %TASK_STOPPED state, about to call schedule(). It's also called when
472 * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made.
473 * 470 *
474 * Return nonzero to generate a %SIGCHLD with @why, which is 471 * @notify is zero if we would not ordinarily send a %SIGCHLD,
475 * normal if @notify is nonzero. 472 * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD.
476 * 473 *
477 * Called with no locks held. 474 * @why is %CLD_STOPPED when about to stop for job control;
475 * we are already in %TASK_STOPPED state, about to call schedule().
476 * It might also be that we have just exited (check %PF_EXITING),
477 * but need to report that a group-wide stop is complete.
478 *
479 * @why is %CLD_CONTINUED when waking up after job control stop and
480 * ready to make a delayed @notify report.
481 *
482 * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal.
483 *
484 * Called with the siglock held.
478 */ 485 */
479static inline int tracehook_notify_jctl(int notify, int why) 486static inline int tracehook_notify_jctl(int notify, int why)
480{ 487{
481 return notify || (current->ptrace & PT_PTRACED); 488 return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
489}
490
491/**
492 * tracehook_finish_jctl - report about return from job control stop
493 *
494 * This is called by do_signal_stop() after wakeup.
495 */
496static inline void tracehook_finish_jctl(void)
497{
482} 498}
483 499
484#define DEATH_REAP -1 500#define DEATH_REAP -1
diff --git a/kernel/signal.c b/kernel/signal.c
index 534ea81cde47..5d3b3f8f219b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -705,7 +705,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
705 705
706 if (why) { 706 if (why) {
707 /* 707 /*
708 * The first thread which returns from finish_stop() 708 * The first thread which returns from do_signal_stop()
709 * will take ->siglock, notice SIGNAL_CLD_MASK, and 709 * will take ->siglock, notice SIGNAL_CLD_MASK, and
710 * notify its parent. See get_signal_to_deliver(). 710 * notify its parent. See get_signal_to_deliver().
711 */ 711 */
@@ -1664,29 +1664,6 @@ void ptrace_notify(int exit_code)
1664 spin_unlock_irq(&current->sighand->siglock); 1664 spin_unlock_irq(&current->sighand->siglock);
1665} 1665}
1666 1666
1667static void
1668finish_stop(int stop_count)
1669{
1670 /*
1671 * If there are no other threads in the group, or if there is
1672 * a group stop in progress and we are the last to stop,
1673 * report to the parent. When ptraced, every thread reports itself.
1674 */
1675 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1676 read_lock(&tasklist_lock);
1677 do_notify_parent_cldstop(current, CLD_STOPPED);
1678 read_unlock(&tasklist_lock);
1679 }
1680
1681 do {
1682 schedule();
1683 } while (try_to_freeze());
1684 /*
1685 * Now we don't run again until continued.
1686 */
1687 current->exit_code = 0;
1688}
1689
1690/* 1667/*
1691 * This performs the stopping for SIGSTOP and other stop signals. 1668 * This performs the stopping for SIGSTOP and other stop signals.
1692 * We have to stop all threads in the thread group. 1669 * We have to stop all threads in the thread group.
@@ -1696,15 +1673,9 @@ finish_stop(int stop_count)
1696static int do_signal_stop(int signr) 1673static int do_signal_stop(int signr)
1697{ 1674{
1698 struct signal_struct *sig = current->signal; 1675 struct signal_struct *sig = current->signal;
1699 int stop_count; 1676 int notify;
1700 1677
1701 if (sig->group_stop_count > 0) { 1678 if (!sig->group_stop_count) {
1702 /*
1703 * There is a group stop in progress. We don't need to
1704 * start another one.
1705 */
1706 stop_count = --sig->group_stop_count;
1707 } else {
1708 struct task_struct *t; 1679 struct task_struct *t;
1709 1680
1710 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || 1681 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
@@ -1716,7 +1687,7 @@ static int do_signal_stop(int signr)
1716 */ 1687 */
1717 sig->group_exit_code = signr; 1688 sig->group_exit_code = signr;
1718 1689
1719 stop_count = 0; 1690 sig->group_stop_count = 1;
1720 for (t = next_thread(current); t != current; t = next_thread(t)) 1691 for (t = next_thread(current); t != current; t = next_thread(t))
1721 /* 1692 /*
1722 * Setting state to TASK_STOPPED for a group 1693 * Setting state to TASK_STOPPED for a group
@@ -1725,19 +1696,44 @@ static int do_signal_stop(int signr)
1725 */ 1696 */
1726 if (!(t->flags & PF_EXITING) && 1697 if (!(t->flags & PF_EXITING) &&
1727 !task_is_stopped_or_traced(t)) { 1698 !task_is_stopped_or_traced(t)) {
1728 stop_count++; 1699 sig->group_stop_count++;
1729 signal_wake_up(t, 0); 1700 signal_wake_up(t, 0);
1730 } 1701 }
1731 sig->group_stop_count = stop_count;
1732 } 1702 }
1703 /*
1704 * If there are no other threads in the group, or if there is
1705 * a group stop in progress and we are the last to stop, report
1706 * to the parent. When ptraced, every thread reports itself.
1707 */
1708 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1709 notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1710 /*
1711 * tracehook_notify_jctl() can drop and reacquire siglock, so
1712 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1713 * or SIGKILL comes in between ->group_stop_count == 0.
1714 */
1715 if (sig->group_stop_count) {
1716 if (!--sig->group_stop_count)
1717 sig->flags = SIGNAL_STOP_STOPPED;
1718 current->exit_code = sig->group_exit_code;
1719 __set_current_state(TASK_STOPPED);
1720 }
1721 spin_unlock_irq(&current->sighand->siglock);
1733 1722
1734 if (stop_count == 0) 1723 if (notify) {
1735 sig->flags = SIGNAL_STOP_STOPPED; 1724 read_lock(&tasklist_lock);
1736 current->exit_code = sig->group_exit_code; 1725 do_notify_parent_cldstop(current, notify);
1737 __set_current_state(TASK_STOPPED); 1726 read_unlock(&tasklist_lock);
1727 }
1728
1729 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1730 do {
1731 schedule();
1732 } while (try_to_freeze());
1733
1734 tracehook_finish_jctl();
1735 current->exit_code = 0;
1738 1736
1739 spin_unlock_irq(&current->sighand->siglock);
1740 finish_stop(stop_count);
1741 return 1; 1737 return 1;
1742} 1738}
1743 1739
@@ -1806,14 +1802,15 @@ relock:
1806 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 1802 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1807 ? CLD_CONTINUED : CLD_STOPPED; 1803 ? CLD_CONTINUED : CLD_STOPPED;
1808 signal->flags &= ~SIGNAL_CLD_MASK; 1804 signal->flags &= ~SIGNAL_CLD_MASK;
1809 spin_unlock_irq(&sighand->siglock);
1810 1805
1811 if (unlikely(!tracehook_notify_jctl(1, why))) 1806 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1812 goto relock; 1807 spin_unlock_irq(&sighand->siglock);
1813 1808
1814 read_lock(&tasklist_lock); 1809 if (why) {
1815 do_notify_parent_cldstop(current->group_leader, why); 1810 read_lock(&tasklist_lock);
1816 read_unlock(&tasklist_lock); 1811 do_notify_parent_cldstop(current->group_leader, why);
1812 read_unlock(&tasklist_lock);
1813 }
1817 goto relock; 1814 goto relock;
1818 } 1815 }
1819 1816
@@ -1978,14 +1975,14 @@ void exit_signals(struct task_struct *tsk)
1978 if (unlikely(tsk->signal->group_stop_count) && 1975 if (unlikely(tsk->signal->group_stop_count) &&
1979 !--tsk->signal->group_stop_count) { 1976 !--tsk->signal->group_stop_count) {
1980 tsk->signal->flags = SIGNAL_STOP_STOPPED; 1977 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1981 group_stop = 1; 1978 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
1982 } 1979 }
1983out: 1980out:
1984 spin_unlock_irq(&tsk->sighand->siglock); 1981 spin_unlock_irq(&tsk->sighand->siglock);
1985 1982
1986 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { 1983 if (unlikely(group_stop)) {
1987 read_lock(&tasklist_lock); 1984 read_lock(&tasklist_lock);
1988 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1985 do_notify_parent_cldstop(tsk, group_stop);
1989 read_unlock(&tasklist_lock); 1986 read_unlock(&tasklist_lock);
1990 } 1987 }
1991} 1988}