aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-10-26 13:46:06 -0400
committerTejun Heo <tj@kernel.org>2012-10-26 17:27:49 -0400
commit5d8f72b55c275677865de670fa147ed318191d81 (patch)
tree58f4f571440f412861a232ed0c5753771e5a6e58
parentead5c473712eb26db792b18a4dc98fdb312883fe (diff)
freezer: change ptrace_stop/do_signal_stop to use freezable_schedule()
try_to_freeze_tasks() and cgroup_freezer rely on scheduler locks to ensure that a task doing STOPPED/TRACED -> RUNNING transition can't escape freezing. This mostly works, but ptrace_stop() does not necessarily call schedule(), it can change task->state back to RUNNING and check freezing() without any lock/barrier in between. We could add the necessary barrier, but this patch changes ptrace_stop() and do_signal_stop() to use freezable_schedule(). This fixes the race, freezer_count() and freezer_should_skip() carefully avoid the race. And this simplifies the code, try_to_freeze_tasks/update_if_frozen no longer need to use task_is_stopped_or_traced() checks with the non trivial assumptions. We can rely on the mechanism which was specially designed to mark the sleeping task as "frozen enough". v2: As Tejun pointed out, we can also change get_signal_to_deliver() and move try_to_freeze() up before 'relock' label. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/freezer.h7
-rw-r--r--kernel/cgroup_freezer.c3
-rw-r--r--kernel/freezer.c11
-rw-r--r--kernel/power/process.c13
-rw-r--r--kernel/signal.c20
5 files changed, 13 insertions, 41 deletions
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index ee899329e65a..8039893bc3ec 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -134,10 +134,9 @@ static inline bool freezer_should_skip(struct task_struct *p)
134} 134}
135 135
136/* 136/*
137 * These macros are intended to be used whenever you want allow a task that's 137 * These macros are intended to be used whenever you want allow a sleeping
138 * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note 138 * task to be frozen. Note that neither return any clear indication of
139 * that neither return any clear indication of whether a freeze event happened 139 * whether a freeze event happened while in this function.
140 * while in this function.
141 */ 140 */
142 141
143/* Like schedule(), but should not block the freezer. */ 142/* Like schedule(), but should not block the freezer. */
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 8a92b0e52099..bedefd9a22df 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -198,8 +198,7 @@ static void update_if_frozen(struct cgroup *cgroup, struct freezer *freezer)
198 * completion. Consider it frozen in addition to 198 * completion. Consider it frozen in addition to
199 * the usual frozen condition. 199 * the usual frozen condition.
200 */ 200 */
201 if (!frozen(task) && !task_is_stopped_or_traced(task) && 201 if (!frozen(task) && !freezer_should_skip(task))
202 !freezer_should_skip(task))
203 goto notyet; 202 goto notyet;
204 } 203 }
205 } 204 }
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 11f82a4d4eae..c38893b0efba 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -116,17 +116,10 @@ bool freeze_task(struct task_struct *p)
116 return false; 116 return false;
117 } 117 }
118 118
119 if (!(p->flags & PF_KTHREAD)) { 119 if (!(p->flags & PF_KTHREAD))
120 fake_signal_wake_up(p); 120 fake_signal_wake_up(p);
121 /* 121 else
122 * fake_signal_wake_up() goes through p's scheduler
123 * lock and guarantees that TASK_STOPPED/TRACED ->
124 * TASK_RUNNING transition can't race with task state
125 * testing in try_to_freeze_tasks().
126 */
127 } else {
128 wake_up_state(p, TASK_INTERRUPTIBLE); 122 wake_up_state(p, TASK_INTERRUPTIBLE);
129 }
130 123
131 spin_unlock_irqrestore(&freezer_lock, flags); 124 spin_unlock_irqrestore(&freezer_lock, flags);
132 return true; 125 return true;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 87da817f9e13..d5a258b60c6f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -48,18 +48,7 @@ static int try_to_freeze_tasks(bool user_only)
48 if (p == current || !freeze_task(p)) 48 if (p == current || !freeze_task(p))
49 continue; 49 continue;
50 50
51 /* 51 if (!freezer_should_skip(p))
52 * Now that we've done set_freeze_flag, don't
53 * perturb a task in TASK_STOPPED or TASK_TRACED.
54 * It is "frozen enough". If the task does wake
55 * up, it will immediately call try_to_freeze.
56 *
57 * Because freeze_task() goes through p's scheduler lock, it's
58 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
59 * transition can't race with task state testing here.
60 */
61 if (!task_is_stopped_or_traced(p) &&
62 !freezer_should_skip(p))
63 todo++; 52 todo++;
64 } while_each_thread(g, p); 53 } while_each_thread(g, p);
65 read_unlock(&tasklist_lock); 54 read_unlock(&tasklist_lock);
diff --git a/kernel/signal.c b/kernel/signal.c
index 0af8868525d6..5ffb5626e072 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1908,7 +1908,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1908 preempt_disable(); 1908 preempt_disable();
1909 read_unlock(&tasklist_lock); 1909 read_unlock(&tasklist_lock);
1910 preempt_enable_no_resched(); 1910 preempt_enable_no_resched();
1911 schedule(); 1911 freezable_schedule();
1912 } else { 1912 } else {
1913 /* 1913 /*
1914 * By the time we got the lock, our tracer went away. 1914 * By the time we got the lock, our tracer went away.
@@ -1930,13 +1930,6 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1930 } 1930 }
1931 1931
1932 /* 1932 /*
1933 * While in TASK_TRACED, we were considered "frozen enough".
1934 * Now that we woke up, it's crucial if we're supposed to be
1935 * frozen that we freeze now before running anything substantial.
1936 */
1937 try_to_freeze();
1938
1939 /*
1940 * We are back. Now reacquire the siglock before touching 1933 * We are back. Now reacquire the siglock before touching
1941 * last_siginfo, so that we are sure to have synchronized with 1934 * last_siginfo, so that we are sure to have synchronized with
1942 * any signal-sending on another CPU that wants to examine it. 1935 * any signal-sending on another CPU that wants to examine it.
@@ -2092,7 +2085,7 @@ static bool do_signal_stop(int signr)
2092 } 2085 }
2093 2086
2094 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2087 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2095 schedule(); 2088 freezable_schedule();
2096 return true; 2089 return true;
2097 } else { 2090 } else {
2098 /* 2091 /*
@@ -2200,15 +2193,14 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2200 if (unlikely(uprobe_deny_signal())) 2193 if (unlikely(uprobe_deny_signal()))
2201 return 0; 2194 return 0;
2202 2195
2203relock:
2204 /* 2196 /*
2205 * We'll jump back here after any time we were stopped in TASK_STOPPED. 2197 * Do this once, we can't return to user-mode if freezing() == T.
2206 * While in TASK_STOPPED, we were considered "frozen enough". 2198 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2207 * Now that we woke up, it's crucial if we're supposed to be 2199 * thus do not need another check after return.
2208 * frozen that we freeze now before running anything substantial.
2209 */ 2200 */
2210 try_to_freeze(); 2201 try_to_freeze();
2211 2202
2203relock:
2212 spin_lock_irq(&sighand->siglock); 2204 spin_lock_irq(&sighand->siglock);
2213 /* 2205 /*
2214 * Every stopped thread goes here after wakeup. Check to see if 2206 * Every stopped thread goes here after wakeup. Check to see if