aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 18:06:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 18:06:50 -0400
commit8209f53d79444747782a28520187abaf689761f2 (patch)
tree726270ea29e037f026d77a99787b9d844531ac42
parent22a3b9771117d566def0150ea787fcc95f16e724 (diff)
parenteac1b5e57d7abc836e78fd3fbcf77dbeed01edc9 (diff)
Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc
* 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc: (39 commits) ptrace: do_wait(traced_leader_killed_by_mt_exec) can block forever ptrace: fix ptrace_signal() && STOP_DEQUEUED interaction connector: add an event for monitoring process tracers ptrace: dont send SIGSTOP on auto-attach if PT_SEIZED ptrace: mv send-SIGSTOP from do_fork() to ptrace_init_task() ptrace_init_task: initialize child->jobctl explicitly has_stopped_jobs: s/task_is_stopped/SIGNAL_STOP_STOPPED/ ptrace: make former thread ID available via PTRACE_GETEVENTMSG after PTRACE_EVENT_EXEC stop ptrace: wait_consider_task: s/same_thread_group/ptrace_reparented/ ptrace: kill real_parent_is_ptracer() in in favor of ptrace_reparented() ptrace: ptrace_reparented() should check same_thread_group() redefine thread_group_leader() as exit_signal >= 0 do not change dead_task->exit_signal kill task_detached() reparent_leader: check EXIT_DEAD instead of task_detached() make do_notify_parent() __must_check, update the callers __ptrace_detach: avoid task_detached(), check do_notify_parent() kill tracehook_notify_death() make do_notify_parent() return bool ptrace: s/tracehook_tracer_task()/ptrace_parent()/ ...
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--drivers/connector/cn_proc.c35
-rw-r--r--fs/exec.c27
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c2
-rw-r--r--include/linux/cn_proc.h13
-rw-r--r--include/linux/ptrace.h104
-rw-r--r--include/linux/sched.h52
-rw-r--r--include/linux/tracehook.h385
-rw-r--r--kernel/exit.c91
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/ptrace.c197
-rw-r--r--kernel/signal.c425
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/oom_kill.c3
-rw-r--r--security/apparmor/domain.c2
-rw-r--r--security/selinux/hooks.c4
17 files changed, 674 insertions, 708 deletions
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a65d2e82f61d..a63d34c3611e 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -331,7 +331,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
331{ 331{
332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
333 return; 333 return;
334 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 334 if (current->ptrace)
335 force_sig(SIGTRAP, current); 335 force_sig(SIGTRAP, current);
336} 336}
337 337
@@ -425,7 +425,7 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
425 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 425 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
426 return; 426 return;
427 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 427 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
428 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 428 if (current->ptrace)
429 force_sig(SIGTRAP, current); 429 force_sig(SIGTRAP, current);
430 else 430 else
431 signal = SIGILL; 431 signal = SIGILL;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 2b46a7efa0ac..281902d3f7ec 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/connector.h> 29#include <linux/connector.h>
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/ptrace.h>
31#include <asm/atomic.h> 32#include <asm/atomic.h>
32#include <asm/unaligned.h> 33#include <asm/unaligned.h>
33 34
@@ -166,6 +167,40 @@ void proc_sid_connector(struct task_struct *task)
166 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 167 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
167} 168}
168 169
170void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
171{
172 struct cn_msg *msg;
173 struct proc_event *ev;
174 struct timespec ts;
175 __u8 buffer[CN_PROC_MSG_SIZE];
176 struct task_struct *tracer;
177
178 if (atomic_read(&proc_event_num_listeners) < 1)
179 return;
180
181 msg = (struct cn_msg *)buffer;
182 ev = (struct proc_event *)msg->data;
183 get_seq(&msg->seq, &ev->cpu);
184 ktime_get_ts(&ts); /* get high res monotonic timestamp */
185 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
186 ev->what = PROC_EVENT_PTRACE;
187 ev->event_data.ptrace.process_pid = task->pid;
188 ev->event_data.ptrace.process_tgid = task->tgid;
189 if (ptrace_id == PTRACE_ATTACH) {
190 ev->event_data.ptrace.tracer_pid = current->pid;
191 ev->event_data.ptrace.tracer_tgid = current->tgid;
192 } else if (ptrace_id == PTRACE_DETACH) {
193 ev->event_data.ptrace.tracer_pid = 0;
194 ev->event_data.ptrace.tracer_tgid = 0;
195 } else
196 return;
197
198 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
199 msg->ack = 0; /* not used */
200 msg->len = sizeof(*ev);
201 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
202}
203
169void proc_exit_connector(struct task_struct *task) 204void proc_exit_connector(struct task_struct *task)
170{ 205{
171 struct cn_msg *msg; 206 struct cn_msg *msg;
diff --git a/fs/exec.c b/fs/exec.c
index 6075a1e727ae..d9576f261815 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -963,9 +963,18 @@ static int de_thread(struct task_struct *tsk)
963 leader->group_leader = tsk; 963 leader->group_leader = tsk;
964 964
965 tsk->exit_signal = SIGCHLD; 965 tsk->exit_signal = SIGCHLD;
966 leader->exit_signal = -1;
966 967
967 BUG_ON(leader->exit_state != EXIT_ZOMBIE); 968 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
968 leader->exit_state = EXIT_DEAD; 969 leader->exit_state = EXIT_DEAD;
970
971 /*
972 * We are going to release_task()->ptrace_unlink() silently,
973 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
974 * the tracer wont't block again waiting for this thread.
975 */
976 if (unlikely(leader->ptrace))
977 __wake_up_parent(leader, leader->parent);
969 write_unlock_irq(&tasklist_lock); 978 write_unlock_irq(&tasklist_lock);
970 979
971 release_task(leader); 980 release_task(leader);
@@ -1225,7 +1234,12 @@ int check_unsafe_exec(struct linux_binprm *bprm)
1225 unsigned n_fs; 1234 unsigned n_fs;
1226 int res = 0; 1235 int res = 0;
1227 1236
1228 bprm->unsafe = tracehook_unsafe_exec(p); 1237 if (p->ptrace) {
1238 if (p->ptrace & PT_PTRACE_CAP)
1239 bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1240 else
1241 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1242 }
1229 1243
1230 n_fs = 1; 1244 n_fs = 1;
1231 spin_lock(&p->fs->lock); 1245 spin_lock(&p->fs->lock);
@@ -1353,6 +1367,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1353 unsigned int depth = bprm->recursion_depth; 1367 unsigned int depth = bprm->recursion_depth;
1354 int try,retval; 1368 int try,retval;
1355 struct linux_binfmt *fmt; 1369 struct linux_binfmt *fmt;
1370 pid_t old_pid;
1356 1371
1357 retval = security_bprm_check(bprm); 1372 retval = security_bprm_check(bprm);
1358 if (retval) 1373 if (retval)
@@ -1362,6 +1377,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1362 if (retval) 1377 if (retval)
1363 return retval; 1378 return retval;
1364 1379
1380 /* Need to fetch pid before load_binary changes it */
1381 rcu_read_lock();
1382 old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1383 rcu_read_unlock();
1384
1365 retval = -ENOENT; 1385 retval = -ENOENT;
1366 for (try=0; try<2; try++) { 1386 for (try=0; try<2; try++) {
1367 read_lock(&binfmt_lock); 1387 read_lock(&binfmt_lock);
@@ -1381,7 +1401,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1381 bprm->recursion_depth = depth; 1401 bprm->recursion_depth = depth;
1382 if (retval >= 0) { 1402 if (retval >= 0) {
1383 if (depth == 0) 1403 if (depth == 0)
1384 tracehook_report_exec(fmt, bprm, regs); 1404 ptrace_event(PTRACE_EVENT_EXEC,
1405 old_pid);
1385 put_binfmt(fmt); 1406 put_binfmt(fmt);
1386 allow_write_access(bprm->file); 1407 allow_write_access(bprm->file);
1387 if (bprm->file) 1408 if (bprm->file)
@@ -1769,7 +1790,7 @@ static int zap_process(struct task_struct *start, int exit_code)
1769 1790
1770 t = start; 1791 t = start;
1771 do { 1792 do {
1772 task_clear_group_stop_pending(t); 1793 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1773 if (t != current && t->mm) { 1794 if (t != current && t->mm) {
1774 sigaddset(&t->pending.signal, SIGKILL); 1795 sigaddset(&t->pending.signal, SIGKILL);
1775 signal_wake_up(t, 1); 1796 signal_wake_up(t, 1);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9b45ee84fbcc..3a1dafd228d1 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -172,7 +172,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
172 task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; 172 task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
173 tpid = 0; 173 tpid = 0;
174 if (pid_alive(p)) { 174 if (pid_alive(p)) {
175 struct task_struct *tracer = tracehook_tracer_task(p); 175 struct task_struct *tracer = ptrace_parent(p);
176 if (tracer) 176 if (tracer)
177 tpid = task_pid_nr_ns(tracer, ns); 177 tpid = task_pid_nr_ns(tracer, ns);
178 } 178 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index fc5bc2767692..c47719aaadef 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -216,7 +216,7 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
216 if (task_is_stopped_or_traced(task)) { 216 if (task_is_stopped_or_traced(task)) {
217 int match; 217 int match;
218 rcu_read_lock(); 218 rcu_read_lock();
219 match = (tracehook_tracer_task(task) == current); 219 match = (ptrace_parent(task) == current);
220 rcu_read_unlock(); 220 rcu_read_unlock();
221 if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH)) 221 if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
222 return mm; 222 return mm;
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 47dac5ea8d3a..12c517b51ca2 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -53,6 +53,7 @@ struct proc_event {
53 PROC_EVENT_UID = 0x00000004, 53 PROC_EVENT_UID = 0x00000004,
54 PROC_EVENT_GID = 0x00000040, 54 PROC_EVENT_GID = 0x00000040,
55 PROC_EVENT_SID = 0x00000080, 55 PROC_EVENT_SID = 0x00000080,
56 PROC_EVENT_PTRACE = 0x00000100,
56 /* "next" should be 0x00000400 */ 57 /* "next" should be 0x00000400 */
57 /* "last" is the last process event: exit */ 58 /* "last" is the last process event: exit */
58 PROC_EVENT_EXIT = 0x80000000 59 PROC_EVENT_EXIT = 0x80000000
@@ -95,6 +96,13 @@ struct proc_event {
95 __kernel_pid_t process_tgid; 96 __kernel_pid_t process_tgid;
96 } sid; 97 } sid;
97 98
99 struct ptrace_proc_event {
100 __kernel_pid_t process_pid;
101 __kernel_pid_t process_tgid;
102 __kernel_pid_t tracer_pid;
103 __kernel_pid_t tracer_tgid;
104 } ptrace;
105
98 struct exit_proc_event { 106 struct exit_proc_event {
99 __kernel_pid_t process_pid; 107 __kernel_pid_t process_pid;
100 __kernel_pid_t process_tgid; 108 __kernel_pid_t process_tgid;
@@ -109,6 +117,7 @@ void proc_fork_connector(struct task_struct *task);
109void proc_exec_connector(struct task_struct *task); 117void proc_exec_connector(struct task_struct *task);
110void proc_id_connector(struct task_struct *task, int which_id); 118void proc_id_connector(struct task_struct *task, int which_id);
111void proc_sid_connector(struct task_struct *task); 119void proc_sid_connector(struct task_struct *task);
120void proc_ptrace_connector(struct task_struct *task, int which_id);
112void proc_exit_connector(struct task_struct *task); 121void proc_exit_connector(struct task_struct *task);
113#else 122#else
114static inline void proc_fork_connector(struct task_struct *task) 123static inline void proc_fork_connector(struct task_struct *task)
@@ -124,6 +133,10 @@ static inline void proc_id_connector(struct task_struct *task,
124static inline void proc_sid_connector(struct task_struct *task) 133static inline void proc_sid_connector(struct task_struct *task)
125{} 134{}
126 135
136static inline void proc_ptrace_connector(struct task_struct *task,
137 int ptrace_id)
138{}
139
127static inline void proc_exit_connector(struct task_struct *task) 140static inline void proc_exit_connector(struct task_struct *task)
128{} 141{}
129#endif /* CONFIG_PROC_EVENTS */ 142#endif /* CONFIG_PROC_EVENTS */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 9178d5cc0b01..800f113bea66 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -47,6 +47,13 @@
47#define PTRACE_GETREGSET 0x4204 47#define PTRACE_GETREGSET 0x4204
48#define PTRACE_SETREGSET 0x4205 48#define PTRACE_SETREGSET 0x4205
49 49
50#define PTRACE_SEIZE 0x4206
51#define PTRACE_INTERRUPT 0x4207
52#define PTRACE_LISTEN 0x4208
53
54/* flags in @data for PTRACE_SEIZE */
55#define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */
56
50/* options set using PTRACE_SETOPTIONS */ 57/* options set using PTRACE_SETOPTIONS */
51#define PTRACE_O_TRACESYSGOOD 0x00000001 58#define PTRACE_O_TRACESYSGOOD 0x00000001
52#define PTRACE_O_TRACEFORK 0x00000002 59#define PTRACE_O_TRACEFORK 0x00000002
@@ -65,6 +72,7 @@
65#define PTRACE_EVENT_EXEC 4 72#define PTRACE_EVENT_EXEC 4
66#define PTRACE_EVENT_VFORK_DONE 5 73#define PTRACE_EVENT_VFORK_DONE 5
67#define PTRACE_EVENT_EXIT 6 74#define PTRACE_EVENT_EXIT 6
75#define PTRACE_EVENT_STOP 7
68 76
69#include <asm/ptrace.h> 77#include <asm/ptrace.h>
70 78
@@ -77,16 +85,22 @@
77 * flags. When the a task is stopped the ptracer owns task->ptrace. 85 * flags. When the a task is stopped the ptracer owns task->ptrace.
78 */ 86 */
79 87
88#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
80#define PT_PTRACED 0x00000001 89#define PT_PTRACED 0x00000001
81#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ 90#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
82#define PT_TRACESYSGOOD 0x00000004 91#define PT_TRACESYSGOOD 0x00000004
83#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */ 92#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
84#define PT_TRACE_FORK 0x00000010 93
85#define PT_TRACE_VFORK 0x00000020 94/* PT_TRACE_* event enable flags */
86#define PT_TRACE_CLONE 0x00000040 95#define PT_EVENT_FLAG_SHIFT 4
87#define PT_TRACE_EXEC 0x00000080 96#define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1))
88#define PT_TRACE_VFORK_DONE 0x00000100 97
89#define PT_TRACE_EXIT 0x00000200 98#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
99#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
100#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
101#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
102#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
103#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
90 104
91#define PT_TRACE_MASK 0x000003f4 105#define PT_TRACE_MASK 0x000003f4
92 106
@@ -105,7 +119,7 @@ extern long arch_ptrace(struct task_struct *child, long request,
105extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 119extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
106extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 120extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
107extern void ptrace_disable(struct task_struct *); 121extern void ptrace_disable(struct task_struct *);
108extern int ptrace_check_attach(struct task_struct *task, int kill); 122extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
109extern int ptrace_request(struct task_struct *child, long request, 123extern int ptrace_request(struct task_struct *child, long request,
110 unsigned long addr, unsigned long data); 124 unsigned long addr, unsigned long data);
111extern void ptrace_notify(int exit_code); 125extern void ptrace_notify(int exit_code);
@@ -122,7 +136,7 @@ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
122 136
123static inline int ptrace_reparented(struct task_struct *child) 137static inline int ptrace_reparented(struct task_struct *child)
124{ 138{
125 return child->real_parent != child->parent; 139 return !same_thread_group(child->real_parent, child->parent);
126} 140}
127 141
128static inline void ptrace_unlink(struct task_struct *child) 142static inline void ptrace_unlink(struct task_struct *child)
@@ -137,36 +151,56 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
137 unsigned long data); 151 unsigned long data);
138 152
139/** 153/**
140 * task_ptrace - return %PT_* flags that apply to a task 154 * ptrace_parent - return the task that is tracing the given task
141 * @task: pointer to &task_struct in question 155 * @task: task to consider
142 * 156 *
143 * Returns the %PT_* flags that apply to @task. 157 * Returns %NULL if no one is tracing @task, or the &struct task_struct
158 * pointer to its tracer.
159 *
160 * Must called under rcu_read_lock(). The pointer returned might be kept
161 * live only by RCU. During exec, this may be called with task_lock() held
162 * on @task, still held from when check_unsafe_exec() was called.
144 */ 163 */
145static inline int task_ptrace(struct task_struct *task) 164static inline struct task_struct *ptrace_parent(struct task_struct *task)
146{ 165{
147 return task->ptrace; 166 if (unlikely(task->ptrace))
167 return rcu_dereference(task->parent);
168 return NULL;
169}
170
171/**
172 * ptrace_event_enabled - test whether a ptrace event is enabled
173 * @task: ptracee of interest
174 * @event: %PTRACE_EVENT_* to test
175 *
176 * Test whether @event is enabled for ptracee @task.
177 *
178 * Returns %true if @event is enabled, %false otherwise.
179 */
180static inline bool ptrace_event_enabled(struct task_struct *task, int event)
181{
182 return task->ptrace & PT_EVENT_FLAG(event);
148} 183}
149 184
150/** 185/**
151 * ptrace_event - possibly stop for a ptrace event notification 186 * ptrace_event - possibly stop for a ptrace event notification
152 * @mask: %PT_* bit to check in @current->ptrace 187 * @event: %PTRACE_EVENT_* value to report
153 * @event: %PTRACE_EVENT_* value to report if @mask is set
154 * @message: value for %PTRACE_GETEVENTMSG to return 188 * @message: value for %PTRACE_GETEVENTMSG to return
155 * 189 *
156 * This checks the @mask bit to see if ptrace wants stops for this event. 190 * Check whether @event is enabled and, if so, report @event and @message
157 * If so we stop, reporting @event and @message to the ptrace parent. 191 * to the ptrace parent.
158 *
159 * Returns nonzero if we did a ptrace notification, zero if not.
160 * 192 *
161 * Called without locks. 193 * Called without locks.
162 */ 194 */
163static inline int ptrace_event(int mask, int event, unsigned long message) 195static inline void ptrace_event(int event, unsigned long message)
164{ 196{
165 if (mask && likely(!(current->ptrace & mask))) 197 if (unlikely(ptrace_event_enabled(current, event))) {
166 return 0; 198 current->ptrace_message = message;
167 current->ptrace_message = message; 199 ptrace_notify((event << 8) | SIGTRAP);
168 ptrace_notify((event << 8) | SIGTRAP); 200 } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
169 return 1; 201 /* legacy EXEC report via SIGTRAP */
202 send_sig(SIGTRAP, current, 0);
203 }
170} 204}
171 205
172/** 206/**
@@ -183,16 +217,24 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
183{ 217{
184 INIT_LIST_HEAD(&child->ptrace_entry); 218 INIT_LIST_HEAD(&child->ptrace_entry);
185 INIT_LIST_HEAD(&child->ptraced); 219 INIT_LIST_HEAD(&child->ptraced);
186 child->parent = child->real_parent; 220#ifdef CONFIG_HAVE_HW_BREAKPOINT
221 atomic_set(&child->ptrace_bp_refcnt, 1);
222#endif
223 child->jobctl = 0;
187 child->ptrace = 0; 224 child->ptrace = 0;
188 if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) { 225 child->parent = child->real_parent;
226
227 if (unlikely(ptrace) && current->ptrace) {
189 child->ptrace = current->ptrace; 228 child->ptrace = current->ptrace;
190 __ptrace_link(child, current->parent); 229 __ptrace_link(child, current->parent);
191 }
192 230
193#ifdef CONFIG_HAVE_HW_BREAKPOINT 231 if (child->ptrace & PT_SEIZED)
194 atomic_set(&child->ptrace_bp_refcnt, 1); 232 task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
195#endif 233 else
234 sigaddset(&child->pending.signal, SIGSTOP);
235
236 set_tsk_thread_flag(child, TIF_SIGPENDING);
237 }
196} 238}
197 239
198/** 240/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 14a6c7b545de..f6ef727ee4fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1292,7 +1292,7 @@ struct task_struct {
1292 int exit_state; 1292 int exit_state;
1293 int exit_code, exit_signal; 1293 int exit_code, exit_signal;
1294 int pdeath_signal; /* The signal sent when the parent dies */ 1294 int pdeath_signal; /* The signal sent when the parent dies */
1295 unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ 1295 unsigned int jobctl; /* JOBCTL_*, siglock protected */
1296 /* ??? */ 1296 /* ??? */
1297 unsigned int personality; 1297 unsigned int personality;
1298 unsigned did_exec:1; 1298 unsigned did_exec:1;
@@ -1813,15 +1813,34 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1813#define used_math() tsk_used_math(current) 1813#define used_math() tsk_used_math(current)
1814 1814
1815/* 1815/*
1816 * task->group_stop flags 1816 * task->jobctl flags
1817 */ 1817 */
1818#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ 1818#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
1819#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ 1819
1820#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ 1820#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
1821#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ 1821#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
1822#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ 1822#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
1823 1823#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
1824extern void task_clear_group_stop_pending(struct task_struct *task); 1824#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
1825#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
1826#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
1827
1828#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1829#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1830#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1831#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1832#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1833#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1834#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1835
1836#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1837#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1838
1839extern bool task_set_jobctl_pending(struct task_struct *task,
1840 unsigned int mask);
1841extern void task_clear_jobctl_trapping(struct task_struct *task);
1842extern void task_clear_jobctl_pending(struct task_struct *task,
1843 unsigned int mask);
1825 1844
1826#ifdef CONFIG_PREEMPT_RCU 1845#ifdef CONFIG_PREEMPT_RCU
1827 1846
@@ -2136,7 +2155,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
2136 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2155 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2137 2156
2138 return ret; 2157 return ret;
2139} 2158}
2140 2159
2141extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2160extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2142 sigset_t *mask); 2161 sigset_t *mask);
@@ -2151,7 +2170,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
2151extern int kill_pgrp(struct pid *pid, int sig, int priv); 2170extern int kill_pgrp(struct pid *pid, int sig, int priv);
2152extern int kill_pid(struct pid *pid, int sig, int priv); 2171extern int kill_pid(struct pid *pid, int sig, int priv);
2153extern int kill_proc_info(int, struct siginfo *, pid_t); 2172extern int kill_proc_info(int, struct siginfo *, pid_t);
2154extern int do_notify_parent(struct task_struct *, int); 2173extern __must_check bool do_notify_parent(struct task_struct *, int);
2155extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2174extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2156extern void force_sig(int, struct task_struct *); 2175extern void force_sig(int, struct task_struct *);
2157extern int send_sig(int, struct task_struct *, int); 2176extern int send_sig(int, struct task_struct *, int);
@@ -2275,8 +2294,10 @@ static inline int get_nr_threads(struct task_struct *tsk)
2275 return tsk->signal->nr_threads; 2294 return tsk->signal->nr_threads;
2276} 2295}
2277 2296
2278/* de_thread depends on thread_group_leader not being a pid based check */ 2297static inline bool thread_group_leader(struct task_struct *p)
2279#define thread_group_leader(p) (p == p->group_leader) 2298{
2299 return p->exit_signal >= 0;
2300}
2280 2301
2281/* Do to the insanities of de_thread it is possible for a process 2302/* Do to the insanities of de_thread it is possible for a process
2282 * to have the pid of the thread group leader without actually being 2303 * to have the pid of the thread group leader without actually being
@@ -2309,11 +2330,6 @@ static inline int thread_group_empty(struct task_struct *p)
2309#define delay_group_leader(p) \ 2330#define delay_group_leader(p) \
2310 (thread_group_leader(p) && !thread_group_empty(p)) 2331 (thread_group_leader(p) && !thread_group_empty(p))
2311 2332
2312static inline int task_detached(struct task_struct *p)
2313{
2314 return p->exit_signal == -1;
2315}
2316
2317/* 2333/*
2318 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2334 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2319 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2335 * subscriptions and synchronises with wait4(). Also used in procfs. Also
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index e95f5236611f..a71a2927a6a0 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,27 +51,12 @@
51#include <linux/security.h> 51#include <linux/security.h>
52struct linux_binprm; 52struct linux_binprm;
53 53
54/**
55 * tracehook_expect_breakpoints - guess if task memory might be touched
56 * @task: current task, making a new mapping
57 *
58 * Return nonzero if @task is expected to want breakpoint insertion in
59 * its memory at some point. A zero return is no guarantee it won't
60 * be done, but this is a hint that it's known to be likely.
61 *
62 * May be called with @task->mm->mmap_sem held for writing.
63 */
64static inline int tracehook_expect_breakpoints(struct task_struct *task)
65{
66 return (task_ptrace(task) & PT_PTRACED) != 0;
67}
68
69/* 54/*
70 * ptrace report for syscall entry and exit looks identical. 55 * ptrace report for syscall entry and exit looks identical.
71 */ 56 */
72static inline void ptrace_report_syscall(struct pt_regs *regs) 57static inline void ptrace_report_syscall(struct pt_regs *regs)
73{ 58{
74 int ptrace = task_ptrace(current); 59 int ptrace = current->ptrace;
75 60
76 if (!(ptrace & PT_PTRACED)) 61 if (!(ptrace & PT_PTRACED))
77 return; 62 return;
@@ -145,229 +130,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
145} 130}
146 131
147/** 132/**
148 * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
149 * @task: current task doing exec
150 *
151 * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
152 *
153 * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
154 */
155static inline int tracehook_unsafe_exec(struct task_struct *task)
156{
157 int unsafe = 0;
158 int ptrace = task_ptrace(task);
159 if (ptrace & PT_PTRACED) {
160 if (ptrace & PT_PTRACE_CAP)
161 unsafe |= LSM_UNSAFE_PTRACE_CAP;
162 else
163 unsafe |= LSM_UNSAFE_PTRACE;
164 }
165 return unsafe;
166}
167
168/**
169 * tracehook_tracer_task - return the task that is tracing the given task
170 * @tsk: task to consider
171 *
172 * Returns NULL if no one is tracing @task, or the &struct task_struct
173 * pointer to its tracer.
174 *
175 * Must called under rcu_read_lock(). The pointer returned might be kept
176 * live only by RCU. During exec, this may be called with task_lock()
177 * held on @task, still held from when tracehook_unsafe_exec() was called.
178 */
179static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
180{
181 if (task_ptrace(tsk) & PT_PTRACED)
182 return rcu_dereference(tsk->parent);
183 return NULL;
184}
185
186/**
187 * tracehook_report_exec - a successful exec was completed
188 * @fmt: &struct linux_binfmt that performed the exec
189 * @bprm: &struct linux_binprm containing exec details
190 * @regs: user-mode register state
191 *
192 * An exec just completed, we are shortly going to return to user mode.
193 * The freshly initialized register state can be seen and changed in @regs.
194 * The name, file and other pointers in @bprm are still on hand to be
195 * inspected, but will be freed as soon as this returns.
196 *
197 * Called with no locks, but with some kernel resources held live
198 * and a reference on @fmt->module.
199 */
200static inline void tracehook_report_exec(struct linux_binfmt *fmt,
201 struct linux_binprm *bprm,
202 struct pt_regs *regs)
203{
204 if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
205 unlikely(task_ptrace(current) & PT_PTRACED))
206 send_sig(SIGTRAP, current, 0);
207}
208
209/**
210 * tracehook_report_exit - task has begun to exit
211 * @exit_code: pointer to value destined for @current->exit_code
212 *
213 * @exit_code points to the value passed to do_exit(), which tracing
214 * might change here. This is almost the first thing in do_exit(),
215 * before freeing any resources or setting the %PF_EXITING flag.
216 *
217 * Called with no locks held.
218 */
219static inline void tracehook_report_exit(long *exit_code)
220{
221 ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
222}
223
224/**
225 * tracehook_prepare_clone - prepare for new child to be cloned
226 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
227 *
228 * This is called before a new user task is to be cloned.
229 * Its return value will be passed to tracehook_finish_clone().
230 *
231 * Called with no locks held.
232 */
233static inline int tracehook_prepare_clone(unsigned clone_flags)
234{
235 if (clone_flags & CLONE_UNTRACED)
236 return 0;
237
238 if (clone_flags & CLONE_VFORK) {
239 if (current->ptrace & PT_TRACE_VFORK)
240 return PTRACE_EVENT_VFORK;
241 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
242 if (current->ptrace & PT_TRACE_CLONE)
243 return PTRACE_EVENT_CLONE;
244 } else if (current->ptrace & PT_TRACE_FORK)
245 return PTRACE_EVENT_FORK;
246
247 return 0;
248}
249
250/**
251 * tracehook_finish_clone - new child created and being attached
252 * @child: new child task
253 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
254 * @trace: return value from tracehook_prepare_clone()
255 *
256 * This is called immediately after adding @child to its parent's children list.
257 * The @trace value is that returned by tracehook_prepare_clone().
258 *
259 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
260 */
261static inline void tracehook_finish_clone(struct task_struct *child,
262 unsigned long clone_flags, int trace)
263{
264 ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
265}
266
267/**
268 * tracehook_report_clone - in parent, new child is about to start running
269 * @regs: parent's user register state
270 * @clone_flags: flags from parent's system call
271 * @pid: new child's PID in the parent's namespace
272 * @child: new child task
273 *
274 * Called after a child is set up, but before it has been started running.
275 * This is not a good place to block, because the child has not started
276 * yet. Suspend the child here if desired, and then block in
277 * tracehook_report_clone_complete(). This must prevent the child from
278 * self-reaping if tracehook_report_clone_complete() uses the @child
279 * pointer; otherwise it might have died and been released by the time
280 * tracehook_report_clone_complete() is called.
281 *
282 * Called with no locks held, but the child cannot run until this returns.
283 */
284static inline void tracehook_report_clone(struct pt_regs *regs,
285 unsigned long clone_flags,
286 pid_t pid, struct task_struct *child)
287{
288 if (unlikely(task_ptrace(child))) {
289 /*
290 * It doesn't matter who attached/attaching to this
291 * task, the pending SIGSTOP is right in any case.
292 */
293 sigaddset(&child->pending.signal, SIGSTOP);
294 set_tsk_thread_flag(child, TIF_SIGPENDING);
295 }
296}
297
298/**
299 * tracehook_report_clone_complete - new child is running
300 * @trace: return value from tracehook_prepare_clone()
301 * @regs: parent's user register state
302 * @clone_flags: flags from parent's system call
303 * @pid: new child's PID in the parent's namespace
304 * @child: child task, already running
305 *
306 * This is called just after the child has started running. This is
307 * just before the clone/fork syscall returns, or blocks for vfork
308 * child completion if @clone_flags has the %CLONE_VFORK bit set.
309 * The @child pointer may be invalid if a self-reaping child died and
310 * tracehook_report_clone() took no action to prevent it from self-reaping.
311 *
312 * Called with no locks held.
313 */
314static inline void tracehook_report_clone_complete(int trace,
315 struct pt_regs *regs,
316 unsigned long clone_flags,
317 pid_t pid,
318 struct task_struct *child)
319{
320 if (unlikely(trace))
321 ptrace_event(0, trace, pid);
322}
323
324/**
325 * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
326 * @child: child task, already running
327 * @pid: new child's PID in the parent's namespace
328 *
329 * Called after a %CLONE_VFORK parent has waited for the child to complete.
330 * The clone/vfork system call will return immediately after this.
331 * The @child pointer may be invalid if a self-reaping child died and
332 * tracehook_report_clone() took no action to prevent it from self-reaping.
333 *
334 * Called with no locks held.
335 */
336static inline void tracehook_report_vfork_done(struct task_struct *child,
337 pid_t pid)
338{
339 ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
340}
341
342/**
343 * tracehook_prepare_release_task - task is being reaped, clean up tracing
344 * @task: task in %EXIT_DEAD state
345 *
346 * This is called in release_task() just before @task gets finally reaped
347 * and freed. This would be the ideal place to remove and clean up any
348 * tracing-related state for @task.
349 *
350 * Called with no locks held.
351 */
352static inline void tracehook_prepare_release_task(struct task_struct *task)
353{
354}
355
356/**
357 * tracehook_finish_release_task - final tracing clean-up
358 * @task: task in %EXIT_DEAD state
359 *
360 * This is called in release_task() when @task is being in the middle of
361 * being reaped. After this, there must be no tracing entanglements.
362 *
363 * Called with write_lock_irq(&tasklist_lock) held.
364 */
365static inline void tracehook_finish_release_task(struct task_struct *task)
366{
367 ptrace_release_task(task);
368}
369
370/**
371 * tracehook_signal_handler - signal handler setup is complete 133 * tracehook_signal_handler - signal handler setup is complete
372 * @sig: number of signal being delivered 134 * @sig: number of signal being delivered
373 * @info: siginfo_t of signal being delivered 135 * @info: siginfo_t of signal being delivered
@@ -390,151 +152,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
390 ptrace_notify(SIGTRAP); 152 ptrace_notify(SIGTRAP);
391} 153}
392 154
393/**
394 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
395 * @task: task receiving the signal
396 * @sig: signal number being sent
397 *
398 * Return zero iff tracing doesn't care to examine this ignored signal,
399 * so it can short-circuit normal delivery and never even get queued.
400 *
401 * Called with @task->sighand->siglock held.
402 */
403static inline int tracehook_consider_ignored_signal(struct task_struct *task,
404 int sig)
405{
406 return (task_ptrace(task) & PT_PTRACED) != 0;
407}
408
409/**
410 * tracehook_consider_fatal_signal - suppress special handling of fatal signal
411 * @task: task receiving the signal
412 * @sig: signal number being sent
413 *
414 * Return nonzero to prevent special handling of this termination signal.
415 * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
416 * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
417 * When this returns zero, this signal might cause a quick termination
418 * that does not give the debugger a chance to intercept the signal.
419 *
420 * Called with or without @task->sighand->siglock held.
421 */
422static inline int tracehook_consider_fatal_signal(struct task_struct *task,
423 int sig)
424{
425 return (task_ptrace(task) & PT_PTRACED) != 0;
426}
427
428/**
429 * tracehook_force_sigpending - let tracing force signal_pending(current) on
430 *
431 * Called when recomputing our signal_pending() flag. Return nonzero
432 * to force the signal_pending() flag on, so that tracehook_get_signal()
433 * will be called before the next return to user mode.
434 *
435 * Called with @current->sighand->siglock held.
436 */
437static inline int tracehook_force_sigpending(void)
438{
439 return 0;
440}
441
442/**
443 * tracehook_get_signal - deliver synthetic signal to traced task
444 * @task: @current
445 * @regs: task_pt_regs(@current)
446 * @info: details of synthetic signal
447 * @return_ka: sigaction for synthetic signal
448 *
449 * Return zero to check for a real pending signal normally.
450 * Return -1 after releasing the siglock to repeat the check.
451 * Return a signal number to induce an artificial signal delivery,
452 * setting *@info and *@return_ka to specify its details and behavior.
453 *
454 * The @return_ka->sa_handler value controls the disposition of the
455 * signal, no matter the signal number. For %SIG_DFL, the return value
456 * is a representative signal to indicate the behavior (e.g. %SIGTERM
457 * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
458 * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
459 * reported will be @info->si_signo instead.
460 *
461 * Called with @task->sighand->siglock held, before dequeuing pending signals.
462 */
463static inline int tracehook_get_signal(struct task_struct *task,
464 struct pt_regs *regs,
465 siginfo_t *info,
466 struct k_sigaction *return_ka)
467{
468 return 0;
469}
470
471/**
472 * tracehook_finish_jctl - report about return from job control stop
473 *
474 * This is called by do_signal_stop() after wakeup.
475 */
476static inline void tracehook_finish_jctl(void)
477{
478}
479
480#define DEATH_REAP -1
481#define DEATH_DELAYED_GROUP_LEADER -2
482
483/**
484 * tracehook_notify_death - task is dead, ready to notify parent
485 * @task: @current task now exiting
486 * @death_cookie: value to pass to tracehook_report_death()
487 * @group_dead: nonzero if this was the last thread in the group to die
488 *
489 * A return value >= 0 means call do_notify_parent() with that signal
490 * number. Negative return value can be %DEATH_REAP to self-reap right
491 * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
492 * parent. Note that a return value of 0 means a do_notify_parent() call
493 * that sends no signal, but still wakes up a parent blocked in wait*().
494 *
495 * Called with write_lock_irq(&tasklist_lock) held.
496 */
497static inline int tracehook_notify_death(struct task_struct *task,
498 void **death_cookie, int group_dead)
499{
500 if (task_detached(task))
501 return task->ptrace ? SIGCHLD : DEATH_REAP;
502
503 /*
504 * If something other than our normal parent is ptracing us, then
505 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
506 * only has special meaning to our real parent.
507 */
508 if (thread_group_empty(task) && !ptrace_reparented(task))
509 return task->exit_signal;
510
511 return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
512}
513
514/**
515 * tracehook_report_death - task is dead and ready to be reaped
516 * @task: @current task now exiting
517 * @signal: return value from tracheook_notify_death()
518 * @death_cookie: value passed back from tracehook_notify_death()
519 * @group_dead: nonzero if this was the last thread in the group to die
520 *
521 * Thread has just become a zombie or is about to self-reap. If positive,
522 * @signal is the signal number just sent to the parent (usually %SIGCHLD).
523 * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
524 * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
525 * The @death_cookie was passed back by tracehook_notify_death().
526 *
527 * If normal reaping is not inhibited, @task->exit_state might be changing
528 * in parallel.
529 *
530 * Called without locks.
531 */
532static inline void tracehook_report_death(struct task_struct *task,
533 int signal, void *death_cookie,
534 int group_dead)
535{
536}
537
538#ifdef TIF_NOTIFY_RESUME 155#ifdef TIF_NOTIFY_RESUME
539/** 156/**
540 * set_notify_resume - cause tracehook_notify_resume() to be called 157 * set_notify_resume - cause tracehook_notify_resume() to be called
diff --git a/kernel/exit.c b/kernel/exit.c
index f2b321bae440..73bb192a3d32 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -169,7 +169,6 @@ void release_task(struct task_struct * p)
169 struct task_struct *leader; 169 struct task_struct *leader;
170 int zap_leader; 170 int zap_leader;
171repeat: 171repeat:
172 tracehook_prepare_release_task(p);
173 /* don't need to get the RCU readlock here - the process is dead and 172 /* don't need to get the RCU readlock here - the process is dead and
174 * can't be modifying its own credentials. But shut RCU-lockdep up */ 173 * can't be modifying its own credentials. But shut RCU-lockdep up */
175 rcu_read_lock(); 174 rcu_read_lock();
@@ -179,7 +178,7 @@ repeat:
179 proc_flush_task(p); 178 proc_flush_task(p);
180 179
181 write_lock_irq(&tasklist_lock); 180 write_lock_irq(&tasklist_lock);
182 tracehook_finish_release_task(p); 181 ptrace_release_task(p);
183 __exit_signal(p); 182 __exit_signal(p);
184 183
185 /* 184 /*
@@ -190,22 +189,12 @@ repeat:
190 zap_leader = 0; 189 zap_leader = 0;
191 leader = p->group_leader; 190 leader = p->group_leader;
192 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { 191 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
193 BUG_ON(task_detached(leader));
194 do_notify_parent(leader, leader->exit_signal);
195 /* 192 /*
196 * If we were the last child thread and the leader has 193 * If we were the last child thread and the leader has
197 * exited already, and the leader's parent ignores SIGCHLD, 194 * exited already, and the leader's parent ignores SIGCHLD,
198 * then we are the one who should release the leader. 195 * then we are the one who should release the leader.
199 *
200 * do_notify_parent() will have marked it self-reaping in
201 * that case.
202 */
203 zap_leader = task_detached(leader);
204
205 /*
206 * This maintains the invariant that release_task()
207 * only runs on a task in EXIT_DEAD, just for sanity.
208 */ 196 */
197 zap_leader = do_notify_parent(leader, leader->exit_signal);
209 if (zap_leader) 198 if (zap_leader)
210 leader->exit_state = EXIT_DEAD; 199 leader->exit_state = EXIT_DEAD;
211 } 200 }
@@ -277,18 +266,16 @@ int is_current_pgrp_orphaned(void)
277 return retval; 266 return retval;
278} 267}
279 268
280static int has_stopped_jobs(struct pid *pgrp) 269static bool has_stopped_jobs(struct pid *pgrp)
281{ 270{
282 int retval = 0;
283 struct task_struct *p; 271 struct task_struct *p;
284 272
285 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 273 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
286 if (!task_is_stopped(p)) 274 if (p->signal->flags & SIGNAL_STOP_STOPPED)
287 continue; 275 return true;
288 retval = 1;
289 break;
290 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 276 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
291 return retval; 277
278 return false;
292} 279}
293 280
294/* 281/*
@@ -751,7 +738,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
751{ 738{
752 list_move_tail(&p->sibling, &p->real_parent->children); 739 list_move_tail(&p->sibling, &p->real_parent->children);
753 740
754 if (task_detached(p)) 741 if (p->exit_state == EXIT_DEAD)
755 return; 742 return;
756 /* 743 /*
757 * If this is a threaded reparent there is no need to 744 * If this is a threaded reparent there is no need to
@@ -764,10 +751,9 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
764 p->exit_signal = SIGCHLD; 751 p->exit_signal = SIGCHLD;
765 752
766 /* If it has exited notify the new parent about this child's death. */ 753 /* If it has exited notify the new parent about this child's death. */
767 if (!task_ptrace(p) && 754 if (!p->ptrace &&
768 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 755 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
769 do_notify_parent(p, p->exit_signal); 756 if (do_notify_parent(p, p->exit_signal)) {
770 if (task_detached(p)) {
771 p->exit_state = EXIT_DEAD; 757 p->exit_state = EXIT_DEAD;
772 list_move_tail(&p->sibling, dead); 758 list_move_tail(&p->sibling, dead);
773 } 759 }
@@ -794,7 +780,7 @@ static void forget_original_parent(struct task_struct *father)
794 do { 780 do {
795 t->real_parent = reaper; 781 t->real_parent = reaper;
796 if (t->parent == father) { 782 if (t->parent == father) {
797 BUG_ON(task_ptrace(t)); 783 BUG_ON(t->ptrace);
798 t->parent = t->real_parent; 784 t->parent = t->real_parent;
799 } 785 }
800 if (t->pdeath_signal) 786 if (t->pdeath_signal)
@@ -819,8 +805,7 @@ static void forget_original_parent(struct task_struct *father)
819 */ 805 */
820static void exit_notify(struct task_struct *tsk, int group_dead) 806static void exit_notify(struct task_struct *tsk, int group_dead)
821{ 807{
822 int signal; 808 bool autoreap;
823 void *cookie;
824 809
825 /* 810 /*
826 * This does two things: 811 * This does two things:
@@ -851,26 +836,33 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
851 * we have changed execution domain as these two values started 836 * we have changed execution domain as these two values started
852 * the same after a fork. 837 * the same after a fork.
853 */ 838 */
854 if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && 839 if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
855 (tsk->parent_exec_id != tsk->real_parent->self_exec_id || 840 (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
856 tsk->self_exec_id != tsk->parent_exec_id)) 841 tsk->self_exec_id != tsk->parent_exec_id))
857 tsk->exit_signal = SIGCHLD; 842 tsk->exit_signal = SIGCHLD;
858 843
859 signal = tracehook_notify_death(tsk, &cookie, group_dead); 844 if (unlikely(tsk->ptrace)) {
860 if (signal >= 0) 845 int sig = thread_group_leader(tsk) &&
861 signal = do_notify_parent(tsk, signal); 846 thread_group_empty(tsk) &&
847 !ptrace_reparented(tsk) ?
848 tsk->exit_signal : SIGCHLD;
849 autoreap = do_notify_parent(tsk, sig);
850 } else if (thread_group_leader(tsk)) {
851 autoreap = thread_group_empty(tsk) &&
852 do_notify_parent(tsk, tsk->exit_signal);
853 } else {
854 autoreap = true;
855 }
862 856
863 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; 857 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
864 858
865 /* mt-exec, de_thread() is waiting for group leader */ 859 /* mt-exec, de_thread() is waiting for group leader */
866 if (unlikely(tsk->signal->notify_count < 0)) 860 if (unlikely(tsk->signal->notify_count < 0))
867 wake_up_process(tsk->signal->group_exit_task); 861 wake_up_process(tsk->signal->group_exit_task);
868 write_unlock_irq(&tasklist_lock); 862 write_unlock_irq(&tasklist_lock);
869 863
870 tracehook_report_death(tsk, signal, cookie, group_dead);
871
872 /* If the process is dead, release it - nobody will wait for it */ 864 /* If the process is dead, release it - nobody will wait for it */
873 if (signal == DEATH_REAP) 865 if (autoreap)
874 release_task(tsk); 866 release_task(tsk);
875} 867}
876 868
@@ -923,7 +915,7 @@ NORET_TYPE void do_exit(long code)
923 */ 915 */
924 set_fs(USER_DS); 916 set_fs(USER_DS);
925 917
926 tracehook_report_exit(&code); 918 ptrace_event(PTRACE_EVENT_EXIT, code);
927 919
928 validate_creds_for_do_exit(tsk); 920 validate_creds_for_do_exit(tsk);
929 921
@@ -1235,9 +1227,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1235 traced = ptrace_reparented(p); 1227 traced = ptrace_reparented(p);
1236 /* 1228 /*
1237 * It can be ptraced but not reparented, check 1229 * It can be ptraced but not reparented, check
1238 * !task_detached() to filter out sub-threads. 1230 * thread_group_leader() to filter out sub-threads.
1239 */ 1231 */
1240 if (likely(!traced) && likely(!task_detached(p))) { 1232 if (likely(!traced) && thread_group_leader(p)) {
1241 struct signal_struct *psig; 1233 struct signal_struct *psig;
1242 struct signal_struct *sig; 1234 struct signal_struct *sig;
1243 unsigned long maxrss; 1235 unsigned long maxrss;
@@ -1345,16 +1337,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1345 /* We dropped tasklist, ptracer could die and untrace */ 1337 /* We dropped tasklist, ptracer could die and untrace */
1346 ptrace_unlink(p); 1338 ptrace_unlink(p);
1347 /* 1339 /*
1348 * If this is not a detached task, notify the parent. 1340 * If this is not a sub-thread, notify the parent.
1349 * If it's still not detached after that, don't release 1341 * If parent wants a zombie, don't release it now.
1350 * it now.
1351 */ 1342 */
1352 if (!task_detached(p)) { 1343 if (thread_group_leader(p) &&
1353 do_notify_parent(p, p->exit_signal); 1344 !do_notify_parent(p, p->exit_signal)) {
1354 if (!task_detached(p)) { 1345 p->exit_state = EXIT_ZOMBIE;
1355 p->exit_state = EXIT_ZOMBIE; 1346 p = NULL;
1356 p = NULL;
1357 }
1358 } 1347 }
1359 write_unlock_irq(&tasklist_lock); 1348 write_unlock_irq(&tasklist_lock);
1360 } 1349 }
@@ -1367,7 +1356,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1367static int *task_stopped_code(struct task_struct *p, bool ptrace) 1356static int *task_stopped_code(struct task_struct *p, bool ptrace)
1368{ 1357{
1369 if (ptrace) { 1358 if (ptrace) {
1370 if (task_is_stopped_or_traced(p)) 1359 if (task_is_stopped_or_traced(p) &&
1360 !(p->jobctl & JOBCTL_LISTENING))
1371 return &p->exit_code; 1361 return &p->exit_code;
1372 } else { 1362 } else {
1373 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1363 if (p->signal->flags & SIGNAL_STOP_STOPPED)
@@ -1563,7 +1553,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1563 * Notification and reaping will be cascaded to the real 1553 * Notification and reaping will be cascaded to the real
1564 * parent when the ptracer detaches. 1554 * parent when the ptracer detaches.
1565 */ 1555 */
1566 if (likely(!ptrace) && unlikely(task_ptrace(p))) { 1556 if (likely(!ptrace) && unlikely(p->ptrace)) {
1567 /* it will become visible, clear notask_error */ 1557 /* it will become visible, clear notask_error */
1568 wo->notask_error = 0; 1558 wo->notask_error = 0;
1569 return 0; 1559 return 0;
@@ -1606,8 +1596,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1606 * own children, it should create a separate process which 1596 * own children, it should create a separate process which
1607 * takes the role of real parent. 1597 * takes the role of real parent.
1608 */ 1598 */
1609 if (likely(!ptrace) && task_ptrace(p) && 1599 if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
1610 same_thread_group(p->parent, p->real_parent))
1611 return 0; 1600 return 0;
1612 1601
1613 /* 1602 /*
diff --git a/kernel/fork.c b/kernel/fork.c
index 0276c30401a0..4d4117e01504 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,7 +37,6 @@
37#include <linux/swap.h> 37#include <linux/swap.h>
38#include <linux/syscalls.h> 38#include <linux/syscalls.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/tracehook.h>
41#include <linux/futex.h> 40#include <linux/futex.h>
42#include <linux/compat.h> 41#include <linux/compat.h>
43#include <linux/kthread.h> 42#include <linux/kthread.h>
@@ -1340,7 +1339,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1340 } 1339 }
1341 1340
1342 if (likely(p->pid)) { 1341 if (likely(p->pid)) {
1343 tracehook_finish_clone(p, clone_flags, trace); 1342 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1344 1343
1345 if (thread_group_leader(p)) { 1344 if (thread_group_leader(p)) {
1346 if (is_child_reaper(pid)) 1345 if (is_child_reaper(pid))
@@ -1481,10 +1480,22 @@ long do_fork(unsigned long clone_flags,
1481 } 1480 }
1482 1481
1483 /* 1482 /*
1484 * When called from kernel_thread, don't do user tracing stuff. 1483 * Determine whether and which event to report to ptracer. When
1484 * called from kernel_thread or CLONE_UNTRACED is explicitly
1485 * requested, no event is reported; otherwise, report if the event
1486 * for the type of forking is enabled.
1485 */ 1487 */
1486 if (likely(user_mode(regs))) 1488 if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
1487 trace = tracehook_prepare_clone(clone_flags); 1489 if (clone_flags & CLONE_VFORK)
1490 trace = PTRACE_EVENT_VFORK;
1491 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1492 trace = PTRACE_EVENT_CLONE;
1493 else
1494 trace = PTRACE_EVENT_FORK;
1495
1496 if (likely(!ptrace_event_enabled(current, trace)))
1497 trace = 0;
1498 }
1488 1499
1489 p = copy_process(clone_flags, stack_start, regs, stack_size, 1500 p = copy_process(clone_flags, stack_start, regs, stack_size,
1490 child_tidptr, NULL, trace); 1501 child_tidptr, NULL, trace);
@@ -1508,26 +1519,26 @@ long do_fork(unsigned long clone_flags,
1508 } 1519 }
1509 1520
1510 audit_finish_fork(p); 1521 audit_finish_fork(p);
1511 tracehook_report_clone(regs, clone_flags, nr, p);
1512 1522
1513 /* 1523 /*
1514 * We set PF_STARTING at creation in case tracing wants to 1524 * We set PF_STARTING at creation in case tracing wants to
1515 * use this to distinguish a fully live task from one that 1525 * use this to distinguish a fully live task from one that
1516 * hasn't gotten to tracehook_report_clone() yet. Now we 1526 * hasn't finished SIGSTOP raising yet. Now we clear it
1517 * clear it and set the child going. 1527 * and set the child going.
1518 */ 1528 */
1519 p->flags &= ~PF_STARTING; 1529 p->flags &= ~PF_STARTING;
1520 1530
1521 wake_up_new_task(p); 1531 wake_up_new_task(p);
1522 1532
1523 tracehook_report_clone_complete(trace, regs, 1533 /* forking complete and child started to run, tell ptracer */
1524 clone_flags, nr, p); 1534 if (unlikely(trace))
1535 ptrace_event(trace, nr);
1525 1536
1526 if (clone_flags & CLONE_VFORK) { 1537 if (clone_flags & CLONE_VFORK) {
1527 freezer_do_not_count(); 1538 freezer_do_not_count();
1528 wait_for_completion(&vfork); 1539 wait_for_completion(&vfork);
1529 freezer_count(); 1540 freezer_count();
1530 tracehook_report_vfork_done(p, nr); 1541 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1531 } 1542 }
1532 } else { 1543 } else {
1533 nr = PTR_ERR(p); 1544 nr = PTR_ERR(p);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2df115790cd9..9de3ecfd20f9 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -23,8 +23,15 @@
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/hw_breakpoint.h> 25#include <linux/hw_breakpoint.h>
26#include <linux/cn_proc.h>
26 27
27 28
29static int ptrace_trapping_sleep_fn(void *flags)
30{
31 schedule();
32 return 0;
33}
34
28/* 35/*
29 * ptrace a task: make the debugger its new parent and 36 * ptrace a task: make the debugger its new parent and
30 * move it to the ptrace list. 37 * move it to the ptrace list.
@@ -77,13 +84,20 @@ void __ptrace_unlink(struct task_struct *child)
77 spin_lock(&child->sighand->siglock); 84 spin_lock(&child->sighand->siglock);
78 85
79 /* 86 /*
80 * Reinstate GROUP_STOP_PENDING if group stop is in effect and 87 * Clear all pending traps and TRAPPING. TRAPPING should be
88 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
89 */
90 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
91 task_clear_jobctl_trapping(child);
92
93 /*
94 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
81 * @child isn't dead. 95 * @child isn't dead.
82 */ 96 */
83 if (!(child->flags & PF_EXITING) && 97 if (!(child->flags & PF_EXITING) &&
84 (child->signal->flags & SIGNAL_STOP_STOPPED || 98 (child->signal->flags & SIGNAL_STOP_STOPPED ||
85 child->signal->group_stop_count)) 99 child->signal->group_stop_count))
86 child->group_stop |= GROUP_STOP_PENDING; 100 child->jobctl |= JOBCTL_STOP_PENDING;
87 101
88 /* 102 /*
89 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 103 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
@@ -91,16 +105,30 @@ void __ptrace_unlink(struct task_struct *child)
91 * is in TASK_TRACED; otherwise, we might unduly disrupt 105 * is in TASK_TRACED; otherwise, we might unduly disrupt
92 * TASK_KILLABLE sleeps. 106 * TASK_KILLABLE sleeps.
93 */ 107 */
94 if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) 108 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
95 signal_wake_up(child, task_is_traced(child)); 109 signal_wake_up(child, task_is_traced(child));
96 110
97 spin_unlock(&child->sighand->siglock); 111 spin_unlock(&child->sighand->siglock);
98} 112}
99 113
100/* 114/**
101 * Check that we have indeed attached to the thing.. 115 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
116 * @child: ptracee to check for
117 * @ignore_state: don't check whether @child is currently %TASK_TRACED
118 *
119 * Check whether @child is being ptraced by %current and ready for further
120 * ptrace operations. If @ignore_state is %false, @child also should be in
121 * %TASK_TRACED state and on return the child is guaranteed to be traced
122 * and not executing. If @ignore_state is %true, @child can be in any
123 * state.
124 *
125 * CONTEXT:
126 * Grabs and releases tasklist_lock and @child->sighand->siglock.
127 *
128 * RETURNS:
129 * 0 on success, -ESRCH if %child is not ready.
102 */ 130 */
103int ptrace_check_attach(struct task_struct *child, int kill) 131int ptrace_check_attach(struct task_struct *child, bool ignore_state)
104{ 132{
105 int ret = -ESRCH; 133 int ret = -ESRCH;
106 134
@@ -119,13 +147,14 @@ int ptrace_check_attach(struct task_struct *child, int kill)
119 */ 147 */
120 spin_lock_irq(&child->sighand->siglock); 148 spin_lock_irq(&child->sighand->siglock);
121 WARN_ON_ONCE(task_is_stopped(child)); 149 WARN_ON_ONCE(task_is_stopped(child));
122 if (task_is_traced(child) || kill) 150 if (ignore_state || (task_is_traced(child) &&
151 !(child->jobctl & JOBCTL_LISTENING)))
123 ret = 0; 152 ret = 0;
124 spin_unlock_irq(&child->sighand->siglock); 153 spin_unlock_irq(&child->sighand->siglock);
125 } 154 }
126 read_unlock(&tasklist_lock); 155 read_unlock(&tasklist_lock);
127 156
128 if (!ret && !kill) 157 if (!ret && !ignore_state)
129 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 158 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
130 159
131 /* All systems go.. */ 160 /* All systems go.. */
@@ -182,11 +211,28 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
182 return !err; 211 return !err;
183} 212}
184 213
185static int ptrace_attach(struct task_struct *task) 214static int ptrace_attach(struct task_struct *task, long request,
215 unsigned long flags)
186{ 216{
187 bool wait_trap = false; 217 bool seize = (request == PTRACE_SEIZE);
188 int retval; 218 int retval;
189 219
220 /*
221 * SEIZE will enable new ptrace behaviors which will be implemented
222 * gradually. SEIZE_DEVEL is used to prevent applications
223 * expecting full SEIZE behaviors trapping on kernel commits which
224 * are still in the process of implementing them.
225 *
226 * Only test programs for new ptrace behaviors being implemented
227 * should set SEIZE_DEVEL. If unset, SEIZE will fail with -EIO.
228 *
229 * Once SEIZE behaviors are completely implemented, this flag and
230 * the following test will be removed.
231 */
232 retval = -EIO;
233 if (seize && !(flags & PTRACE_SEIZE_DEVEL))
234 goto out;
235
190 audit_ptrace(task); 236 audit_ptrace(task);
191 237
192 retval = -EPERM; 238 retval = -EPERM;
@@ -218,16 +264,21 @@ static int ptrace_attach(struct task_struct *task)
218 goto unlock_tasklist; 264 goto unlock_tasklist;
219 265
220 task->ptrace = PT_PTRACED; 266 task->ptrace = PT_PTRACED;
267 if (seize)
268 task->ptrace |= PT_SEIZED;
221 if (task_ns_capable(task, CAP_SYS_PTRACE)) 269 if (task_ns_capable(task, CAP_SYS_PTRACE))
222 task->ptrace |= PT_PTRACE_CAP; 270 task->ptrace |= PT_PTRACE_CAP;
223 271
224 __ptrace_link(task, current); 272 __ptrace_link(task, current);
225 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 273
274 /* SEIZE doesn't trap tracee on attach */
275 if (!seize)
276 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
226 277
227 spin_lock(&task->sighand->siglock); 278 spin_lock(&task->sighand->siglock);
228 279
229 /* 280 /*
230 * If the task is already STOPPED, set GROUP_STOP_PENDING and 281 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
231 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 282 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
232 * will be cleared if the child completes the transition or any 283 * will be cleared if the child completes the transition or any
233 * event which clears the group stop states happens. We'll wait 284 * event which clears the group stop states happens. We'll wait
@@ -243,11 +294,9 @@ static int ptrace_attach(struct task_struct *task)
243 * The following task_is_stopped() test is safe as both transitions 294 * The following task_is_stopped() test is safe as both transitions
244 * in and out of STOPPED are protected by siglock. 295 * in and out of STOPPED are protected by siglock.
245 */ 296 */
246 if (task_is_stopped(task)) { 297 if (task_is_stopped(task) &&
247 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; 298 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
248 signal_wake_up(task, 1); 299 signal_wake_up(task, 1);
249 wait_trap = true;
250 }
251 300
252 spin_unlock(&task->sighand->siglock); 301 spin_unlock(&task->sighand->siglock);
253 302
@@ -257,9 +306,12 @@ unlock_tasklist:
257unlock_creds: 306unlock_creds:
258 mutex_unlock(&task->signal->cred_guard_mutex); 307 mutex_unlock(&task->signal->cred_guard_mutex);
259out: 308out:
260 if (wait_trap) 309 if (!retval) {
261 wait_event(current->signal->wait_chldexit, 310 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
262 !(task->group_stop & GROUP_STOP_TRAPPING)); 311 ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
312 proc_ptrace_connector(task, PTRACE_ATTACH);
313 }
314
263 return retval; 315 return retval;
264} 316}
265 317
@@ -322,25 +374,27 @@ static int ignoring_children(struct sighand_struct *sigh)
322 */ 374 */
323static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 375static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
324{ 376{
377 bool dead;
378
325 __ptrace_unlink(p); 379 __ptrace_unlink(p);
326 380
327 if (p->exit_state == EXIT_ZOMBIE) { 381 if (p->exit_state != EXIT_ZOMBIE)
328 if (!task_detached(p) && thread_group_empty(p)) { 382 return false;
329 if (!same_thread_group(p->real_parent, tracer)) 383
330 do_notify_parent(p, p->exit_signal); 384 dead = !thread_group_leader(p);
331 else if (ignoring_children(tracer->sighand)) { 385
332 __wake_up_parent(p, tracer); 386 if (!dead && thread_group_empty(p)) {
333 p->exit_signal = -1; 387 if (!same_thread_group(p->real_parent, tracer))
334 } 388 dead = do_notify_parent(p, p->exit_signal);
335 } 389 else if (ignoring_children(tracer->sighand)) {
336 if (task_detached(p)) { 390 __wake_up_parent(p, tracer);
337 /* Mark it as in the process of being reaped. */ 391 dead = true;
338 p->exit_state = EXIT_DEAD;
339 return true;
340 } 392 }
341 } 393 }
342 394 /* Mark it as in the process of being reaped. */
343 return false; 395 if (dead)
396 p->exit_state = EXIT_DEAD;
397 return dead;
344} 398}
345 399
346static int ptrace_detach(struct task_struct *child, unsigned int data) 400static int ptrace_detach(struct task_struct *child, unsigned int data)
@@ -365,6 +419,7 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
365 } 419 }
366 write_unlock_irq(&tasklist_lock); 420 write_unlock_irq(&tasklist_lock);
367 421
422 proc_ptrace_connector(child, PTRACE_DETACH);
368 if (unlikely(dead)) 423 if (unlikely(dead))
369 release_task(child); 424 release_task(child);
370 425
@@ -611,10 +666,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
611int ptrace_request(struct task_struct *child, long request, 666int ptrace_request(struct task_struct *child, long request,
612 unsigned long addr, unsigned long data) 667 unsigned long addr, unsigned long data)
613{ 668{
669 bool seized = child->ptrace & PT_SEIZED;
614 int ret = -EIO; 670 int ret = -EIO;
615 siginfo_t siginfo; 671 siginfo_t siginfo, *si;
616 void __user *datavp = (void __user *) data; 672 void __user *datavp = (void __user *) data;
617 unsigned long __user *datalp = datavp; 673 unsigned long __user *datalp = datavp;
674 unsigned long flags;
618 675
619 switch (request) { 676 switch (request) {
620 case PTRACE_PEEKTEXT: 677 case PTRACE_PEEKTEXT:
@@ -647,6 +704,62 @@ int ptrace_request(struct task_struct *child, long request,
647 ret = ptrace_setsiginfo(child, &siginfo); 704 ret = ptrace_setsiginfo(child, &siginfo);
648 break; 705 break;
649 706
707 case PTRACE_INTERRUPT:
708 /*
709 * Stop tracee without any side-effect on signal or job
710 * control. At least one trap is guaranteed to happen
711 * after this request. If @child is already trapped, the
712 * current trap is not disturbed and another trap will
713 * happen after the current trap is ended with PTRACE_CONT.
714 *
715 * The actual trap might not be PTRACE_EVENT_STOP trap but
716 * the pending condition is cleared regardless.
717 */
718 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
719 break;
720
721 /*
722 * INTERRUPT doesn't disturb existing trap sans one
723 * exception. If ptracer issued LISTEN for the current
724 * STOP, this INTERRUPT should clear LISTEN and re-trap
725 * tracee into STOP.
726 */
727 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
728 signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
729
730 unlock_task_sighand(child, &flags);
731 ret = 0;
732 break;
733
734 case PTRACE_LISTEN:
735 /*
736 * Listen for events. Tracee must be in STOP. It's not
737 * resumed per-se but is not considered to be in TRACED by
738 * wait(2) or ptrace(2). If an async event (e.g. group
739 * stop state change) happens, tracee will enter STOP trap
740 * again. Alternatively, ptracer can issue INTERRUPT to
741 * finish listening and re-trap tracee into STOP.
742 */
743 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
744 break;
745
746 si = child->last_siginfo;
747 if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
748 break;
749
750 child->jobctl |= JOBCTL_LISTENING;
751
752 /*
753 * If NOTIFY is set, it means event happened between start
754 * of this trap and now. Trigger re-trap immediately.
755 */
756 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
757 signal_wake_up(child, true);
758
759 unlock_task_sighand(child, &flags);
760 ret = 0;
761 break;
762
650 case PTRACE_DETACH: /* detach a process that was attached. */ 763 case PTRACE_DETACH: /* detach a process that was attached. */
651 ret = ptrace_detach(child, data); 764 ret = ptrace_detach(child, data);
652 break; 765 break;
@@ -761,8 +874,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
761 goto out; 874 goto out;
762 } 875 }
763 876
764 if (request == PTRACE_ATTACH) { 877 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
765 ret = ptrace_attach(child); 878 ret = ptrace_attach(child, request, data);
766 /* 879 /*
767 * Some architectures need to do book-keeping after 880 * Some architectures need to do book-keeping after
768 * a ptrace attach. 881 * a ptrace attach.
@@ -772,7 +885,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
772 goto out_put_task_struct; 885 goto out_put_task_struct;
773 } 886 }
774 887
775 ret = ptrace_check_attach(child, request == PTRACE_KILL); 888 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
889 request == PTRACE_INTERRUPT);
776 if (ret < 0) 890 if (ret < 0)
777 goto out_put_task_struct; 891 goto out_put_task_struct;
778 892
@@ -903,8 +1017,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
903 goto out; 1017 goto out;
904 } 1018 }
905 1019
906 if (request == PTRACE_ATTACH) { 1020 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
907 ret = ptrace_attach(child); 1021 ret = ptrace_attach(child, request, data);
908 /* 1022 /*
909 * Some architectures need to do book-keeping after 1023 * Some architectures need to do book-keeping after
910 * a ptrace attach. 1024 * a ptrace attach.
@@ -914,7 +1028,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
914 goto out_put_task_struct; 1028 goto out_put_task_struct;
915 } 1029 }
916 1030
917 ret = ptrace_check_attach(child, request == PTRACE_KILL); 1031 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1032 request == PTRACE_INTERRUPT);
918 if (!ret) 1033 if (!ret)
919 ret = compat_arch_ptrace(child, request, addr, data); 1034 ret = compat_arch_ptrace(child, request, addr, data);
920 1035
diff --git a/kernel/signal.c b/kernel/signal.c
index 415d85d6f6c6..d7f70aed1cc0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -87,7 +87,7 @@ static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
87 /* 87 /*
88 * Tracers may want to know about even ignored signals. 88 * Tracers may want to know about even ignored signals.
89 */ 89 */
90 return !tracehook_consider_ignored_signal(t, sig); 90 return !t->ptrace;
91} 91}
92 92
93/* 93/*
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
124 124
125static int recalc_sigpending_tsk(struct task_struct *t) 125static int recalc_sigpending_tsk(struct task_struct *t)
126{ 126{
127 if ((t->group_stop & GROUP_STOP_PENDING) || 127 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
128 PENDING(&t->pending, &t->blocked) || 128 PENDING(&t->pending, &t->blocked) ||
129 PENDING(&t->signal->shared_pending, &t->blocked)) { 129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 set_tsk_thread_flag(t, TIF_SIGPENDING); 130 set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -150,9 +150,7 @@ void recalc_sigpending_and_wake(struct task_struct *t)
150 150
151void recalc_sigpending(void) 151void recalc_sigpending(void)
152{ 152{
153 if (unlikely(tracehook_force_sigpending())) 153 if (!recalc_sigpending_tsk(current) && !freezing(current))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 clear_thread_flag(TIF_SIGPENDING); 154 clear_thread_flag(TIF_SIGPENDING);
157 155
158} 156}
@@ -224,47 +222,93 @@ static inline void print_dropped_signal(int sig)
224} 222}
225 223
226/** 224/**
227 * task_clear_group_stop_trapping - clear group stop trapping bit 225 * task_set_jobctl_pending - set jobctl pending bits
228 * @task: target task 226 * @task: target task
227 * @mask: pending bits to set
229 * 228 *
230 * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it 229 * Clear @mask from @task->jobctl. @mask must be subset of
231 * and wake up the ptracer. Note that we don't need any further locking. 230 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
232 * @task->siglock guarantees that @task->parent points to the ptracer. 231 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
232 * cleared. If @task is already being killed or exiting, this function
233 * becomes noop.
234 *
235 * CONTEXT:
236 * Must be called with @task->sighand->siglock held.
237 *
238 * RETURNS:
239 * %true if @mask is set, %false if made noop because @task was dying.
240 */
241bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
242{
243 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
244 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
245 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
246
247 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
248 return false;
249
250 if (mask & JOBCTL_STOP_SIGMASK)
251 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
252
253 task->jobctl |= mask;
254 return true;
255}
256
257/**
258 * task_clear_jobctl_trapping - clear jobctl trapping bit
259 * @task: target task
260 *
261 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
262 * Clear it and wake up the ptracer. Note that we don't need any further
263 * locking. @task->siglock guarantees that @task->parent points to the
264 * ptracer.
233 * 265 *
234 * CONTEXT: 266 * CONTEXT:
235 * Must be called with @task->sighand->siglock held. 267 * Must be called with @task->sighand->siglock held.
236 */ 268 */
237static void task_clear_group_stop_trapping(struct task_struct *task) 269void task_clear_jobctl_trapping(struct task_struct *task)
238{ 270{
239 if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { 271 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
240 task->group_stop &= ~GROUP_STOP_TRAPPING; 272 task->jobctl &= ~JOBCTL_TRAPPING;
241 __wake_up_sync_key(&task->parent->signal->wait_chldexit, 273 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
242 TASK_UNINTERRUPTIBLE, 1, task);
243 } 274 }
244} 275}
245 276
246/** 277/**
247 * task_clear_group_stop_pending - clear pending group stop 278 * task_clear_jobctl_pending - clear jobctl pending bits
248 * @task: target task 279 * @task: target task
280 * @mask: pending bits to clear
281 *
282 * Clear @mask from @task->jobctl. @mask must be subset of
283 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
284 * STOP bits are cleared together.
249 * 285 *
250 * Clear group stop states for @task. 286 * If clearing of @mask leaves no stop or trap pending, this function calls
287 * task_clear_jobctl_trapping().
251 * 288 *
252 * CONTEXT: 289 * CONTEXT:
253 * Must be called with @task->sighand->siglock held. 290 * Must be called with @task->sighand->siglock held.
254 */ 291 */
255void task_clear_group_stop_pending(struct task_struct *task) 292void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
256{ 293{
257 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | 294 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
258 GROUP_STOP_DEQUEUED); 295
296 if (mask & JOBCTL_STOP_PENDING)
297 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
298
299 task->jobctl &= ~mask;
300
301 if (!(task->jobctl & JOBCTL_PENDING_MASK))
302 task_clear_jobctl_trapping(task);
259} 303}
260 304
261/** 305/**
262 * task_participate_group_stop - participate in a group stop 306 * task_participate_group_stop - participate in a group stop
263 * @task: task participating in a group stop 307 * @task: task participating in a group stop
264 * 308 *
265 * @task has GROUP_STOP_PENDING set and is participating in a group stop. 309 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
266 * Group stop states are cleared and the group stop count is consumed if 310 * Group stop states are cleared and the group stop count is consumed if
267 * %GROUP_STOP_CONSUME was set. If the consumption completes the group 311 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
268 * stop, the appropriate %SIGNAL_* flags are set. 312 * stop, the appropriate %SIGNAL_* flags are set.
269 * 313 *
270 * CONTEXT: 314 * CONTEXT:
@@ -277,11 +321,11 @@ void task_clear_group_stop_pending(struct task_struct *task)
277static bool task_participate_group_stop(struct task_struct *task) 321static bool task_participate_group_stop(struct task_struct *task)
278{ 322{
279 struct signal_struct *sig = task->signal; 323 struct signal_struct *sig = task->signal;
280 bool consume = task->group_stop & GROUP_STOP_CONSUME; 324 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
281 325
282 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); 326 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
283 327
284 task_clear_group_stop_pending(task); 328 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
285 329
286 if (!consume) 330 if (!consume)
287 return false; 331 return false;
@@ -449,7 +493,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
449 return 1; 493 return 1;
450 if (handler != SIG_IGN && handler != SIG_DFL) 494 if (handler != SIG_IGN && handler != SIG_DFL)
451 return 0; 495 return 0;
452 return !tracehook_consider_fatal_signal(tsk, sig); 496 /* if ptraced, let the tracer determine */
497 return !tsk->ptrace;
453} 498}
454 499
455/* 500/*
@@ -604,7 +649,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
604 * is to alert stop-signal processing code when another 649 * is to alert stop-signal processing code when another
605 * processor has come along and cleared the flag. 650 * processor has come along and cleared the flag.
606 */ 651 */
607 current->group_stop |= GROUP_STOP_DEQUEUED; 652 current->jobctl |= JOBCTL_STOP_DEQUEUED;
608 } 653 }
609 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 654 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
610 /* 655 /*
@@ -773,6 +818,32 @@ static int check_kill_permission(int sig, struct siginfo *info,
773 return security_task_kill(t, info, sig, 0); 818 return security_task_kill(t, info, sig, 0);
774} 819}
775 820
821/**
822 * ptrace_trap_notify - schedule trap to notify ptracer
823 * @t: tracee wanting to notify tracer
824 *
825 * This function schedules sticky ptrace trap which is cleared on the next
826 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
827 * ptracer.
828 *
829 * If @t is running, STOP trap will be taken. If trapped for STOP and
830 * ptracer is listening for events, tracee is woken up so that it can
831 * re-trap for the new event. If trapped otherwise, STOP trap will be
832 * eventually taken without returning to userland after the existing traps
833 * are finished by PTRACE_CONT.
834 *
835 * CONTEXT:
836 * Must be called with @task->sighand->siglock held.
837 */
838static void ptrace_trap_notify(struct task_struct *t)
839{
840 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
841 assert_spin_locked(&t->sighand->siglock);
842
843 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
844 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
845}
846
776/* 847/*
777 * Handle magic process-wide effects of stop/continue signals. Unlike 848 * Handle magic process-wide effects of stop/continue signals. Unlike
778 * the signal actions, these happen immediately at signal-generation 849 * the signal actions, these happen immediately at signal-generation
@@ -809,9 +880,12 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
809 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); 880 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
810 t = p; 881 t = p;
811 do { 882 do {
812 task_clear_group_stop_pending(t); 883 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
813 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 884 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
814 wake_up_state(t, __TASK_STOPPED); 885 if (likely(!(t->ptrace & PT_SEIZED)))
886 wake_up_state(t, __TASK_STOPPED);
887 else
888 ptrace_trap_notify(t);
815 } while_each_thread(p, t); 889 } while_each_thread(p, t);
816 890
817 /* 891 /*
@@ -908,8 +982,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
908 if (sig_fatal(p, sig) && 982 if (sig_fatal(p, sig) &&
909 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 983 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
910 !sigismember(&t->real_blocked, sig) && 984 !sigismember(&t->real_blocked, sig) &&
911 (sig == SIGKILL || 985 (sig == SIGKILL || !t->ptrace)) {
912 !tracehook_consider_fatal_signal(t, sig))) {
913 /* 986 /*
914 * This signal will be fatal to the whole group. 987 * This signal will be fatal to the whole group.
915 */ 988 */
@@ -925,7 +998,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
925 signal->group_stop_count = 0; 998 signal->group_stop_count = 0;
926 t = p; 999 t = p;
927 do { 1000 do {
928 task_clear_group_stop_pending(t); 1001 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
929 sigaddset(&t->pending.signal, SIGKILL); 1002 sigaddset(&t->pending.signal, SIGKILL);
930 signal_wake_up(t, 1); 1003 signal_wake_up(t, 1);
931 } while_each_thread(p, t); 1004 } while_each_thread(p, t);
@@ -1160,7 +1233,7 @@ int zap_other_threads(struct task_struct *p)
1160 p->signal->group_stop_count = 0; 1233 p->signal->group_stop_count = 0;
1161 1234
1162 while_each_thread(p, t) { 1235 while_each_thread(p, t) {
1163 task_clear_group_stop_pending(t); 1236 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1164 count++; 1237 count++;
1165 1238
1166 /* Don't bother with already dead threads */ 1239 /* Don't bother with already dead threads */
@@ -1511,22 +1584,22 @@ ret:
1511 * Let a parent know about the death of a child. 1584 * Let a parent know about the death of a child.
1512 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1585 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1513 * 1586 *
1514 * Returns -1 if our parent ignored us and so we've switched to 1587 * Returns true if our parent ignored us and so we've switched to
1515 * self-reaping, or else @sig. 1588 * self-reaping.
1516 */ 1589 */
1517int do_notify_parent(struct task_struct *tsk, int sig) 1590bool do_notify_parent(struct task_struct *tsk, int sig)
1518{ 1591{
1519 struct siginfo info; 1592 struct siginfo info;
1520 unsigned long flags; 1593 unsigned long flags;
1521 struct sighand_struct *psig; 1594 struct sighand_struct *psig;
1522 int ret = sig; 1595 bool autoreap = false;
1523 1596
1524 BUG_ON(sig == -1); 1597 BUG_ON(sig == -1);
1525 1598
1526 /* do_notify_parent_cldstop should have been called instead. */ 1599 /* do_notify_parent_cldstop should have been called instead. */
1527 BUG_ON(task_is_stopped_or_traced(tsk)); 1600 BUG_ON(task_is_stopped_or_traced(tsk));
1528 1601
1529 BUG_ON(!task_ptrace(tsk) && 1602 BUG_ON(!tsk->ptrace &&
1530 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1603 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1531 1604
1532 info.si_signo = sig; 1605 info.si_signo = sig;
@@ -1565,7 +1638,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1565 1638
1566 psig = tsk->parent->sighand; 1639 psig = tsk->parent->sighand;
1567 spin_lock_irqsave(&psig->siglock, flags); 1640 spin_lock_irqsave(&psig->siglock, flags);
1568 if (!task_ptrace(tsk) && sig == SIGCHLD && 1641 if (!tsk->ptrace && sig == SIGCHLD &&
1569 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1642 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1570 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1643 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1571 /* 1644 /*
@@ -1583,16 +1656,16 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1583 * is implementation-defined: we do (if you don't want 1656 * is implementation-defined: we do (if you don't want
1584 * it, just use SIG_IGN instead). 1657 * it, just use SIG_IGN instead).
1585 */ 1658 */
1586 ret = tsk->exit_signal = -1; 1659 autoreap = true;
1587 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1660 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1588 sig = -1; 1661 sig = 0;
1589 } 1662 }
1590 if (valid_signal(sig) && sig > 0) 1663 if (valid_signal(sig) && sig)
1591 __group_send_sig_info(sig, &info, tsk->parent); 1664 __group_send_sig_info(sig, &info, tsk->parent);
1592 __wake_up_parent(tsk, tsk->parent); 1665 __wake_up_parent(tsk, tsk->parent);
1593 spin_unlock_irqrestore(&psig->siglock, flags); 1666 spin_unlock_irqrestore(&psig->siglock, flags);
1594 1667
1595 return ret; 1668 return autoreap;
1596} 1669}
1597 1670
1598/** 1671/**
@@ -1665,7 +1738,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
1665 1738
1666static inline int may_ptrace_stop(void) 1739static inline int may_ptrace_stop(void)
1667{ 1740{
1668 if (!likely(task_ptrace(current))) 1741 if (!likely(current->ptrace))
1669 return 0; 1742 return 0;
1670 /* 1743 /*
1671 * Are we in the middle of do_coredump? 1744 * Are we in the middle of do_coredump?
@@ -1694,15 +1767,6 @@ static int sigkill_pending(struct task_struct *tsk)
1694} 1767}
1695 1768
1696/* 1769/*
1697 * Test whether the target task of the usual cldstop notification - the
1698 * real_parent of @child - is in the same group as the ptracer.
1699 */
1700static bool real_parent_is_ptracer(struct task_struct *child)
1701{
1702 return same_thread_group(child->parent, child->real_parent);
1703}
1704
1705/*
1706 * This must be called with current->sighand->siglock held. 1770 * This must be called with current->sighand->siglock held.
1707 * 1771 *
1708 * This should be the path for all ptrace stops. 1772 * This should be the path for all ptrace stops.
@@ -1739,31 +1803,34 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1739 } 1803 }
1740 1804
1741 /* 1805 /*
1742 * If @why is CLD_STOPPED, we're trapping to participate in a group 1806 * We're committing to trapping. TRACED should be visible before
1743 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 1807 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1744 * while siglock was released for the arch hook, PENDING could be 1808 * Also, transition to TRACED and updates to ->jobctl should be
1745 * clear now. We act as if SIGCONT is received after TASK_TRACED 1809 * atomic with respect to siglock and should be done after the arch
1746 * is entered - ignore it. 1810 * hook as siglock is released and regrabbed across it.
1747 */ 1811 */
1748 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) 1812 set_current_state(TASK_TRACED);
1749 gstop_done = task_participate_group_stop(current);
1750 1813
1751 current->last_siginfo = info; 1814 current->last_siginfo = info;
1752 current->exit_code = exit_code; 1815 current->exit_code = exit_code;
1753 1816
1754 /* 1817 /*
1755 * TRACED should be visible before TRAPPING is cleared; otherwise, 1818 * If @why is CLD_STOPPED, we're trapping to participate in a group
1756 * the tracer might fail do_wait(). 1819 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1820 * across siglock relocks since INTERRUPT was scheduled, PENDING
1821 * could be clear now. We act as if SIGCONT is received after
1822 * TASK_TRACED is entered - ignore it.
1757 */ 1823 */
1758 set_current_state(TASK_TRACED); 1824 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1825 gstop_done = task_participate_group_stop(current);
1759 1826
1760 /* 1827 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1761 * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and 1828 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1762 * transition to TASK_TRACED should be atomic with respect to 1829 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1763 * siglock. This hsould be done after the arch hook as siglock is 1830 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1764 * released and regrabbed across it. 1831
1765 */ 1832 /* entering a trap, clear TRAPPING */
1766 task_clear_group_stop_trapping(current); 1833 task_clear_jobctl_trapping(current);
1767 1834
1768 spin_unlock_irq(&current->sighand->siglock); 1835 spin_unlock_irq(&current->sighand->siglock);
1769 read_lock(&tasklist_lock); 1836 read_lock(&tasklist_lock);
@@ -1779,7 +1846,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1779 * separately unless they're gonna be duplicates. 1846 * separately unless they're gonna be duplicates.
1780 */ 1847 */
1781 do_notify_parent_cldstop(current, true, why); 1848 do_notify_parent_cldstop(current, true, why);
1782 if (gstop_done && !real_parent_is_ptracer(current)) 1849 if (gstop_done && ptrace_reparented(current))
1783 do_notify_parent_cldstop(current, false, why); 1850 do_notify_parent_cldstop(current, false, why);
1784 1851
1785 /* 1852 /*
@@ -1799,9 +1866,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1799 * 1866 *
1800 * If @gstop_done, the ptracer went away between group stop 1867 * If @gstop_done, the ptracer went away between group stop
1801 * completion and here. During detach, it would have set 1868 * completion and here. During detach, it would have set
1802 * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED 1869 * JOBCTL_STOP_PENDING on us and we'll re-enter
1803 * in do_signal_stop() on return, so notifying the real 1870 * TASK_STOPPED in do_signal_stop() on return, so notifying
1804 * parent of the group stop completion is enough. 1871 * the real parent of the group stop completion is enough.
1805 */ 1872 */
1806 if (gstop_done) 1873 if (gstop_done)
1807 do_notify_parent_cldstop(current, false, why); 1874 do_notify_parent_cldstop(current, false, why);
@@ -1827,6 +1894,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1827 spin_lock_irq(&current->sighand->siglock); 1894 spin_lock_irq(&current->sighand->siglock);
1828 current->last_siginfo = NULL; 1895 current->last_siginfo = NULL;
1829 1896
1897 /* LISTENING can be set only during STOP traps, clear it */
1898 current->jobctl &= ~JOBCTL_LISTENING;
1899
1830 /* 1900 /*
1831 * Queued signals ignored us while we were stopped for tracing. 1901 * Queued signals ignored us while we were stopped for tracing.
1832 * So check for any that we should take before resuming user mode. 1902 * So check for any that we should take before resuming user mode.
@@ -1835,44 +1905,66 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1835 recalc_sigpending_tsk(current); 1905 recalc_sigpending_tsk(current);
1836} 1906}
1837 1907
1838void ptrace_notify(int exit_code) 1908static void ptrace_do_notify(int signr, int exit_code, int why)
1839{ 1909{
1840 siginfo_t info; 1910 siginfo_t info;
1841 1911
1842 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1843
1844 memset(&info, 0, sizeof info); 1912 memset(&info, 0, sizeof info);
1845 info.si_signo = SIGTRAP; 1913 info.si_signo = signr;
1846 info.si_code = exit_code; 1914 info.si_code = exit_code;
1847 info.si_pid = task_pid_vnr(current); 1915 info.si_pid = task_pid_vnr(current);
1848 info.si_uid = current_uid(); 1916 info.si_uid = current_uid();
1849 1917
1850 /* Let the debugger run. */ 1918 /* Let the debugger run. */
1919 ptrace_stop(exit_code, why, 1, &info);
1920}
1921
1922void ptrace_notify(int exit_code)
1923{
1924 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1925
1851 spin_lock_irq(&current->sighand->siglock); 1926 spin_lock_irq(&current->sighand->siglock);
1852 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info); 1927 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1853 spin_unlock_irq(&current->sighand->siglock); 1928 spin_unlock_irq(&current->sighand->siglock);
1854} 1929}
1855 1930
1856/* 1931/**
1857 * This performs the stopping for SIGSTOP and other stop signals. 1932 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1858 * We have to stop all threads in the thread group. 1933 * @signr: signr causing group stop if initiating
1859 * Returns non-zero if we've actually stopped and released the siglock. 1934 *
1860 * Returns zero if we didn't stop and still hold the siglock. 1935 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1936 * and participate in it. If already set, participate in the existing
1937 * group stop. If participated in a group stop (and thus slept), %true is
1938 * returned with siglock released.
1939 *
1940 * If ptraced, this function doesn't handle stop itself. Instead,
1941 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1942 * untouched. The caller must ensure that INTERRUPT trap handling takes
1943 * places afterwards.
1944 *
1945 * CONTEXT:
1946 * Must be called with @current->sighand->siglock held, which is released
1947 * on %true return.
1948 *
1949 * RETURNS:
1950 * %false if group stop is already cancelled or ptrace trap is scheduled.
1951 * %true if participated in group stop.
1861 */ 1952 */
1862static int do_signal_stop(int signr) 1953static bool do_signal_stop(int signr)
1954 __releases(&current->sighand->siglock)
1863{ 1955{
1864 struct signal_struct *sig = current->signal; 1956 struct signal_struct *sig = current->signal;
1865 1957
1866 if (!(current->group_stop & GROUP_STOP_PENDING)) { 1958 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1867 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; 1959 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1868 struct task_struct *t; 1960 struct task_struct *t;
1869 1961
1870 /* signr will be recorded in task->group_stop for retries */ 1962 /* signr will be recorded in task->jobctl for retries */
1871 WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); 1963 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1872 1964
1873 if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || 1965 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1874 unlikely(signal_group_exit(sig))) 1966 unlikely(signal_group_exit(sig)))
1875 return 0; 1967 return false;
1876 /* 1968 /*
1877 * There is no group stop already in progress. We must 1969 * There is no group stop already in progress. We must
1878 * initiate one now. 1970 * initiate one now.
@@ -1895,28 +1987,32 @@ static int do_signal_stop(int signr)
1895 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 1987 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1896 sig->group_exit_code = signr; 1988 sig->group_exit_code = signr;
1897 else 1989 else
1898 WARN_ON_ONCE(!task_ptrace(current)); 1990 WARN_ON_ONCE(!current->ptrace);
1991
1992 sig->group_stop_count = 0;
1993
1994 if (task_set_jobctl_pending(current, signr | gstop))
1995 sig->group_stop_count++;
1899 1996
1900 current->group_stop &= ~GROUP_STOP_SIGMASK;
1901 current->group_stop |= signr | gstop;
1902 sig->group_stop_count = 1;
1903 for (t = next_thread(current); t != current; 1997 for (t = next_thread(current); t != current;
1904 t = next_thread(t)) { 1998 t = next_thread(t)) {
1905 t->group_stop &= ~GROUP_STOP_SIGMASK;
1906 /* 1999 /*
1907 * Setting state to TASK_STOPPED for a group 2000 * Setting state to TASK_STOPPED for a group
1908 * stop is always done with the siglock held, 2001 * stop is always done with the siglock held,
1909 * so this check has no races. 2002 * so this check has no races.
1910 */ 2003 */
1911 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { 2004 if (!task_is_stopped(t) &&
1912 t->group_stop |= signr | gstop; 2005 task_set_jobctl_pending(t, signr | gstop)) {
1913 sig->group_stop_count++; 2006 sig->group_stop_count++;
1914 signal_wake_up(t, 0); 2007 if (likely(!(t->ptrace & PT_SEIZED)))
2008 signal_wake_up(t, 0);
2009 else
2010 ptrace_trap_notify(t);
1915 } 2011 }
1916 } 2012 }
1917 } 2013 }
1918retry: 2014
1919 if (likely(!task_ptrace(current))) { 2015 if (likely(!current->ptrace)) {
1920 int notify = 0; 2016 int notify = 0;
1921 2017
1922 /* 2018 /*
@@ -1947,43 +2043,65 @@ retry:
1947 2043
1948 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2044 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1949 schedule(); 2045 schedule();
1950 2046 return true;
1951 spin_lock_irq(&current->sighand->siglock);
1952 } else { 2047 } else {
1953 ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, 2048 /*
1954 CLD_STOPPED, 0, NULL); 2049 * While ptraced, group stop is handled by STOP trap.
1955 current->exit_code = 0; 2050 * Schedule it and let the caller deal with it.
2051 */
2052 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2053 return false;
1956 } 2054 }
2055}
1957 2056
1958 /* 2057/**
1959 * GROUP_STOP_PENDING could be set if another group stop has 2058 * do_jobctl_trap - take care of ptrace jobctl traps
1960 * started since being woken up or ptrace wants us to transit 2059 *
1961 * between TASK_STOPPED and TRACED. Retry group stop. 2060 * When PT_SEIZED, it's used for both group stop and explicit
1962 */ 2061 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
1963 if (current->group_stop & GROUP_STOP_PENDING) { 2062 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
1964 WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); 2063 * the stop signal; otherwise, %SIGTRAP.
1965 goto retry; 2064 *
2065 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2066 * number as exit_code and no siginfo.
2067 *
2068 * CONTEXT:
2069 * Must be called with @current->sighand->siglock held, which may be
2070 * released and re-acquired before returning with intervening sleep.
2071 */
2072static void do_jobctl_trap(void)
2073{
2074 struct signal_struct *signal = current->signal;
2075 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2076
2077 if (current->ptrace & PT_SEIZED) {
2078 if (!signal->group_stop_count &&
2079 !(signal->flags & SIGNAL_STOP_STOPPED))
2080 signr = SIGTRAP;
2081 WARN_ON_ONCE(!signr);
2082 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2083 CLD_STOPPED);
2084 } else {
2085 WARN_ON_ONCE(!signr);
2086 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2087 current->exit_code = 0;
1966 } 2088 }
1967
1968 /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1969 task_clear_group_stop_trapping(current);
1970
1971 spin_unlock_irq(&current->sighand->siglock);
1972
1973 tracehook_finish_jctl();
1974
1975 return 1;
1976} 2089}
1977 2090
1978static int ptrace_signal(int signr, siginfo_t *info, 2091static int ptrace_signal(int signr, siginfo_t *info,
1979 struct pt_regs *regs, void *cookie) 2092 struct pt_regs *regs, void *cookie)
1980{ 2093{
1981 if (!task_ptrace(current))
1982 return signr;
1983
1984 ptrace_signal_deliver(regs, cookie); 2094 ptrace_signal_deliver(regs, cookie);
1985 2095 /*
1986 /* Let the debugger run. */ 2096 * We do not check sig_kernel_stop(signr) but set this marker
2097 * unconditionally because we do not know whether debugger will
2098 * change signr. This flag has no meaning unless we are going
2099 * to stop after return from ptrace_stop(). In this case it will
2100 * be checked in do_signal_stop(), we should only stop if it was
2101 * not cleared by SIGCONT while we were sleeping. See also the
2102 * comment in dequeue_signal().
2103 */
2104 current->jobctl |= JOBCTL_STOP_DEQUEUED;
1987 ptrace_stop(signr, CLD_TRAPPED, 0, info); 2105 ptrace_stop(signr, CLD_TRAPPED, 0, info);
1988 2106
1989 /* We're back. Did the debugger cancel the sig? */ 2107 /* We're back. Did the debugger cancel the sig? */
@@ -2039,7 +2157,6 @@ relock:
2039 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2157 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2040 */ 2158 */
2041 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2159 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2042 struct task_struct *leader;
2043 int why; 2160 int why;
2044 2161
2045 if (signal->flags & SIGNAL_CLD_CONTINUED) 2162 if (signal->flags & SIGNAL_CLD_CONTINUED)
@@ -2060,13 +2177,11 @@ relock:
2060 * a duplicate. 2177 * a duplicate.
2061 */ 2178 */
2062 read_lock(&tasklist_lock); 2179 read_lock(&tasklist_lock);
2063
2064 do_notify_parent_cldstop(current, false, why); 2180 do_notify_parent_cldstop(current, false, why);
2065 2181
2066 leader = current->group_leader; 2182 if (ptrace_reparented(current->group_leader))
2067 if (task_ptrace(leader) && !real_parent_is_ptracer(leader)) 2183 do_notify_parent_cldstop(current->group_leader,
2068 do_notify_parent_cldstop(leader, true, why); 2184 true, why);
2069
2070 read_unlock(&tasklist_lock); 2185 read_unlock(&tasklist_lock);
2071 2186
2072 goto relock; 2187 goto relock;
@@ -2074,37 +2189,31 @@ relock:
2074 2189
2075 for (;;) { 2190 for (;;) {
2076 struct k_sigaction *ka; 2191 struct k_sigaction *ka;
2077 /* 2192
2078 * Tracing can induce an artificial signal and choose sigaction. 2193 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2079 * The return value in @signr determines the default action, 2194 do_signal_stop(0))
2080 * but @info->si_signo is the signal number we will report.
2081 */
2082 signr = tracehook_get_signal(current, regs, info, return_ka);
2083 if (unlikely(signr < 0))
2084 goto relock; 2195 goto relock;
2085 if (unlikely(signr != 0))
2086 ka = return_ka;
2087 else {
2088 if (unlikely(current->group_stop &
2089 GROUP_STOP_PENDING) && do_signal_stop(0))
2090 goto relock;
2091 2196
2092 signr = dequeue_signal(current, &current->blocked, 2197 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2093 info); 2198 do_jobctl_trap();
2199 spin_unlock_irq(&sighand->siglock);
2200 goto relock;
2201 }
2094 2202
2095 if (!signr) 2203 signr = dequeue_signal(current, &current->blocked, info);
2096 break; /* will return 0 */
2097 2204
2098 if (signr != SIGKILL) { 2205 if (!signr)
2099 signr = ptrace_signal(signr, info, 2206 break; /* will return 0 */
2100 regs, cookie);
2101 if (!signr)
2102 continue;
2103 }
2104 2207
2105 ka = &sighand->action[signr-1]; 2208 if (unlikely(current->ptrace) && signr != SIGKILL) {
2209 signr = ptrace_signal(signr, info,
2210 regs, cookie);
2211 if (!signr)
2212 continue;
2106 } 2213 }
2107 2214
2215 ka = &sighand->action[signr-1];
2216
2108 /* Trace actually delivered signals. */ 2217 /* Trace actually delivered signals. */
2109 trace_signal_deliver(signr, info, ka); 2218 trace_signal_deliver(signr, info, ka);
2110 2219
@@ -2260,7 +2369,7 @@ void exit_signals(struct task_struct *tsk)
2260 signotset(&unblocked); 2369 signotset(&unblocked);
2261 retarget_shared_pending(tsk, &unblocked); 2370 retarget_shared_pending(tsk, &unblocked);
2262 2371
2263 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && 2372 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2264 task_participate_group_stop(tsk)) 2373 task_participate_group_stop(tsk))
2265 group_stop = CLD_STOPPED; 2374 group_stop = CLD_STOPPED;
2266out: 2375out:
diff --git a/mm/nommu.c b/mm/nommu.c
index 9edc897a3970..5c5c2d4b1807 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -22,7 +22,6 @@
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
25#include <linux/tracehook.h>
26#include <linux/blkdev.h> 25#include <linux/blkdev.h>
27#include <linux/backing-dev.h> 26#include <linux/backing-dev.h>
28#include <linux/mount.h> 27#include <linux/mount.h>
@@ -1087,7 +1086,7 @@ static unsigned long determine_vm_flags(struct file *file,
1087 * it's being traced - otherwise breakpoints set in it may interfere 1086 * it's being traced - otherwise breakpoints set in it may interfere
1088 * with another untraced process 1087 * with another untraced process
1089 */ 1088 */
1090 if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current)) 1089 if ((flags & MAP_PRIVATE) && current->ptrace)
1091 vm_flags &= ~VM_MAYSHARE; 1090 vm_flags &= ~VM_MAYSHARE;
1092 1091
1093 return vm_flags; 1092 return vm_flags;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index e4b0991ca351..b0be989d4365 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -339,8 +339,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
339 * then wait for it to finish before killing 339 * then wait for it to finish before killing
340 * some other task unnecessarily. 340 * some other task unnecessarily.
341 */ 341 */
342 if (!(task_ptrace(p->group_leader) & 342 if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
343 PT_TRACE_EXIT))
344 return ERR_PTR(-1UL); 343 return ERR_PTR(-1UL);
345 } 344 }
346 } 345 }
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index c825c6e0b636..7312bf9f7afc 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -67,7 +67,7 @@ static int may_change_ptraced_domain(struct task_struct *task,
67 int error = 0; 67 int error = 0;
68 68
69 rcu_read_lock(); 69 rcu_read_lock();
70 tracer = tracehook_tracer_task(task); 70 tracer = ptrace_parent(task);
71 if (tracer) { 71 if (tracer) {
72 /* released below */ 72 /* released below */
73 cred = get_task_cred(tracer); 73 cred = get_task_cred(tracer);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 20219ef5439a..422515509f3d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2053,7 +2053,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
2053 u32 ptsid = 0; 2053 u32 ptsid = 0;
2054 2054
2055 rcu_read_lock(); 2055 rcu_read_lock();
2056 tracer = tracehook_tracer_task(current); 2056 tracer = ptrace_parent(current);
2057 if (likely(tracer != NULL)) { 2057 if (likely(tracer != NULL)) {
2058 sec = __task_cred(tracer)->security; 2058 sec = __task_cred(tracer)->security;
2059 ptsid = sec->sid; 2059 ptsid = sec->sid;
@@ -5319,7 +5319,7 @@ static int selinux_setprocattr(struct task_struct *p,
5319 Otherwise, leave SID unchanged and fail. */ 5319 Otherwise, leave SID unchanged and fail. */
5320 ptsid = 0; 5320 ptsid = 0;
5321 task_lock(p); 5321 task_lock(p);
5322 tracer = tracehook_tracer_task(p); 5322 tracer = ptrace_parent(p);
5323 if (tracer) 5323 if (tracer)
5324 ptsid = task_sid(tracer); 5324 ptsid = task_sid(tracer);
5325 task_unlock(p); 5325 task_unlock(p);