aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 18:06:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 18:06:50 -0400
commit8209f53d79444747782a28520187abaf689761f2 (patch)
tree726270ea29e037f026d77a99787b9d844531ac42 /include
parent22a3b9771117d566def0150ea787fcc95f16e724 (diff)
parenteac1b5e57d7abc836e78fd3fbcf77dbeed01edc9 (diff)
Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc
* 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc: (39 commits) ptrace: do_wait(traced_leader_killed_by_mt_exec) can block forever ptrace: fix ptrace_signal() && STOP_DEQUEUED interaction connector: add an event for monitoring process tracers ptrace: dont send SIGSTOP on auto-attach if PT_SEIZED ptrace: mv send-SIGSTOP from do_fork() to ptrace_init_task() ptrace_init_task: initialize child->jobctl explicitly has_stopped_jobs: s/task_is_stopped/SIGNAL_STOP_STOPPED/ ptrace: make former thread ID available via PTRACE_GETEVENTMSG after PTRACE_EVENT_EXEC stop ptrace: wait_consider_task: s/same_thread_group/ptrace_reparented/ ptrace: kill real_parent_is_ptracer() in in favor of ptrace_reparented() ptrace: ptrace_reparented() should check same_thread_group() redefine thread_group_leader() as exit_signal >= 0 do not change dead_task->exit_signal kill task_detached() reparent_leader: check EXIT_DEAD instead of task_detached() make do_notify_parent() __must_check, update the callers __ptrace_detach: avoid task_detached(), check do_notify_parent() kill tracehook_notify_death() make do_notify_parent() return bool ptrace: s/tracehook_tracer_task()/ptrace_parent()/ ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/cn_proc.h13
-rw-r--r--include/linux/ptrace.h104
-rw-r--r--include/linux/sched.h52
-rw-r--r--include/linux/tracehook.h385
4 files changed, 121 insertions, 433 deletions
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 47dac5ea8d3a..12c517b51ca2 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -53,6 +53,7 @@ struct proc_event {
53 PROC_EVENT_UID = 0x00000004, 53 PROC_EVENT_UID = 0x00000004,
54 PROC_EVENT_GID = 0x00000040, 54 PROC_EVENT_GID = 0x00000040,
55 PROC_EVENT_SID = 0x00000080, 55 PROC_EVENT_SID = 0x00000080,
56 PROC_EVENT_PTRACE = 0x00000100,
56 /* "next" should be 0x00000400 */ 57 /* "next" should be 0x00000400 */
57 /* "last" is the last process event: exit */ 58 /* "last" is the last process event: exit */
58 PROC_EVENT_EXIT = 0x80000000 59 PROC_EVENT_EXIT = 0x80000000
@@ -95,6 +96,13 @@ struct proc_event {
95 __kernel_pid_t process_tgid; 96 __kernel_pid_t process_tgid;
96 } sid; 97 } sid;
97 98
99 struct ptrace_proc_event {
100 __kernel_pid_t process_pid;
101 __kernel_pid_t process_tgid;
102 __kernel_pid_t tracer_pid;
103 __kernel_pid_t tracer_tgid;
104 } ptrace;
105
98 struct exit_proc_event { 106 struct exit_proc_event {
99 __kernel_pid_t process_pid; 107 __kernel_pid_t process_pid;
100 __kernel_pid_t process_tgid; 108 __kernel_pid_t process_tgid;
@@ -109,6 +117,7 @@ void proc_fork_connector(struct task_struct *task);
109void proc_exec_connector(struct task_struct *task); 117void proc_exec_connector(struct task_struct *task);
110void proc_id_connector(struct task_struct *task, int which_id); 118void proc_id_connector(struct task_struct *task, int which_id);
111void proc_sid_connector(struct task_struct *task); 119void proc_sid_connector(struct task_struct *task);
120void proc_ptrace_connector(struct task_struct *task, int which_id);
112void proc_exit_connector(struct task_struct *task); 121void proc_exit_connector(struct task_struct *task);
113#else 122#else
114static inline void proc_fork_connector(struct task_struct *task) 123static inline void proc_fork_connector(struct task_struct *task)
@@ -124,6 +133,10 @@ static inline void proc_id_connector(struct task_struct *task,
124static inline void proc_sid_connector(struct task_struct *task) 133static inline void proc_sid_connector(struct task_struct *task)
125{} 134{}
126 135
136static inline void proc_ptrace_connector(struct task_struct *task,
137 int ptrace_id)
138{}
139
127static inline void proc_exit_connector(struct task_struct *task) 140static inline void proc_exit_connector(struct task_struct *task)
128{} 141{}
129#endif /* CONFIG_PROC_EVENTS */ 142#endif /* CONFIG_PROC_EVENTS */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 9178d5cc0b01..800f113bea66 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -47,6 +47,13 @@
47#define PTRACE_GETREGSET 0x4204 47#define PTRACE_GETREGSET 0x4204
48#define PTRACE_SETREGSET 0x4205 48#define PTRACE_SETREGSET 0x4205
49 49
50#define PTRACE_SEIZE 0x4206
51#define PTRACE_INTERRUPT 0x4207
52#define PTRACE_LISTEN 0x4208
53
54/* flags in @data for PTRACE_SEIZE */
55#define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */
56
50/* options set using PTRACE_SETOPTIONS */ 57/* options set using PTRACE_SETOPTIONS */
51#define PTRACE_O_TRACESYSGOOD 0x00000001 58#define PTRACE_O_TRACESYSGOOD 0x00000001
52#define PTRACE_O_TRACEFORK 0x00000002 59#define PTRACE_O_TRACEFORK 0x00000002
@@ -65,6 +72,7 @@
65#define PTRACE_EVENT_EXEC 4 72#define PTRACE_EVENT_EXEC 4
66#define PTRACE_EVENT_VFORK_DONE 5 73#define PTRACE_EVENT_VFORK_DONE 5
67#define PTRACE_EVENT_EXIT 6 74#define PTRACE_EVENT_EXIT 6
75#define PTRACE_EVENT_STOP 7
68 76
69#include <asm/ptrace.h> 77#include <asm/ptrace.h>
70 78
@@ -77,16 +85,22 @@
77 * flags. When the a task is stopped the ptracer owns task->ptrace. 85 * flags. When the a task is stopped the ptracer owns task->ptrace.
78 */ 86 */
79 87
88#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
80#define PT_PTRACED 0x00000001 89#define PT_PTRACED 0x00000001
81#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ 90#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
82#define PT_TRACESYSGOOD 0x00000004 91#define PT_TRACESYSGOOD 0x00000004
83#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */ 92#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
84#define PT_TRACE_FORK 0x00000010 93
85#define PT_TRACE_VFORK 0x00000020 94/* PT_TRACE_* event enable flags */
86#define PT_TRACE_CLONE 0x00000040 95#define PT_EVENT_FLAG_SHIFT 4
87#define PT_TRACE_EXEC 0x00000080 96#define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1))
88#define PT_TRACE_VFORK_DONE 0x00000100 97
89#define PT_TRACE_EXIT 0x00000200 98#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
99#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
100#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
101#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
102#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
103#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
90 104
91#define PT_TRACE_MASK 0x000003f4 105#define PT_TRACE_MASK 0x000003f4
92 106
@@ -105,7 +119,7 @@ extern long arch_ptrace(struct task_struct *child, long request,
105extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 119extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
106extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 120extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
107extern void ptrace_disable(struct task_struct *); 121extern void ptrace_disable(struct task_struct *);
108extern int ptrace_check_attach(struct task_struct *task, int kill); 122extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
109extern int ptrace_request(struct task_struct *child, long request, 123extern int ptrace_request(struct task_struct *child, long request,
110 unsigned long addr, unsigned long data); 124 unsigned long addr, unsigned long data);
111extern void ptrace_notify(int exit_code); 125extern void ptrace_notify(int exit_code);
@@ -122,7 +136,7 @@ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
122 136
123static inline int ptrace_reparented(struct task_struct *child) 137static inline int ptrace_reparented(struct task_struct *child)
124{ 138{
125 return child->real_parent != child->parent; 139 return !same_thread_group(child->real_parent, child->parent);
126} 140}
127 141
128static inline void ptrace_unlink(struct task_struct *child) 142static inline void ptrace_unlink(struct task_struct *child)
@@ -137,36 +151,56 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
137 unsigned long data); 151 unsigned long data);
138 152
139/** 153/**
140 * task_ptrace - return %PT_* flags that apply to a task 154 * ptrace_parent - return the task that is tracing the given task
141 * @task: pointer to &task_struct in question 155 * @task: task to consider
142 * 156 *
143 * Returns the %PT_* flags that apply to @task. 157 * Returns %NULL if no one is tracing @task, or the &struct task_struct
158 * pointer to its tracer.
159 *
160 * Must called under rcu_read_lock(). The pointer returned might be kept
161 * live only by RCU. During exec, this may be called with task_lock() held
162 * on @task, still held from when check_unsafe_exec() was called.
144 */ 163 */
145static inline int task_ptrace(struct task_struct *task) 164static inline struct task_struct *ptrace_parent(struct task_struct *task)
146{ 165{
147 return task->ptrace; 166 if (unlikely(task->ptrace))
167 return rcu_dereference(task->parent);
168 return NULL;
169}
170
171/**
172 * ptrace_event_enabled - test whether a ptrace event is enabled
173 * @task: ptracee of interest
174 * @event: %PTRACE_EVENT_* to test
175 *
176 * Test whether @event is enabled for ptracee @task.
177 *
178 * Returns %true if @event is enabled, %false otherwise.
179 */
180static inline bool ptrace_event_enabled(struct task_struct *task, int event)
181{
182 return task->ptrace & PT_EVENT_FLAG(event);
148} 183}
149 184
150/** 185/**
151 * ptrace_event - possibly stop for a ptrace event notification 186 * ptrace_event - possibly stop for a ptrace event notification
152 * @mask: %PT_* bit to check in @current->ptrace 187 * @event: %PTRACE_EVENT_* value to report
153 * @event: %PTRACE_EVENT_* value to report if @mask is set
154 * @message: value for %PTRACE_GETEVENTMSG to return 188 * @message: value for %PTRACE_GETEVENTMSG to return
155 * 189 *
156 * This checks the @mask bit to see if ptrace wants stops for this event. 190 * Check whether @event is enabled and, if so, report @event and @message
157 * If so we stop, reporting @event and @message to the ptrace parent. 191 * to the ptrace parent.
158 *
159 * Returns nonzero if we did a ptrace notification, zero if not.
160 * 192 *
161 * Called without locks. 193 * Called without locks.
162 */ 194 */
163static inline int ptrace_event(int mask, int event, unsigned long message) 195static inline void ptrace_event(int event, unsigned long message)
164{ 196{
165 if (mask && likely(!(current->ptrace & mask))) 197 if (unlikely(ptrace_event_enabled(current, event))) {
166 return 0; 198 current->ptrace_message = message;
167 current->ptrace_message = message; 199 ptrace_notify((event << 8) | SIGTRAP);
168 ptrace_notify((event << 8) | SIGTRAP); 200 } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
169 return 1; 201 /* legacy EXEC report via SIGTRAP */
202 send_sig(SIGTRAP, current, 0);
203 }
170} 204}
171 205
172/** 206/**
@@ -183,16 +217,24 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
183{ 217{
184 INIT_LIST_HEAD(&child->ptrace_entry); 218 INIT_LIST_HEAD(&child->ptrace_entry);
185 INIT_LIST_HEAD(&child->ptraced); 219 INIT_LIST_HEAD(&child->ptraced);
186 child->parent = child->real_parent; 220#ifdef CONFIG_HAVE_HW_BREAKPOINT
221 atomic_set(&child->ptrace_bp_refcnt, 1);
222#endif
223 child->jobctl = 0;
187 child->ptrace = 0; 224 child->ptrace = 0;
188 if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) { 225 child->parent = child->real_parent;
226
227 if (unlikely(ptrace) && current->ptrace) {
189 child->ptrace = current->ptrace; 228 child->ptrace = current->ptrace;
190 __ptrace_link(child, current->parent); 229 __ptrace_link(child, current->parent);
191 }
192 230
193#ifdef CONFIG_HAVE_HW_BREAKPOINT 231 if (child->ptrace & PT_SEIZED)
194 atomic_set(&child->ptrace_bp_refcnt, 1); 232 task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
195#endif 233 else
234 sigaddset(&child->pending.signal, SIGSTOP);
235
236 set_tsk_thread_flag(child, TIF_SIGPENDING);
237 }
196} 238}
197 239
198/** 240/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 14a6c7b545de..f6ef727ee4fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1292,7 +1292,7 @@ struct task_struct {
1292 int exit_state; 1292 int exit_state;
1293 int exit_code, exit_signal; 1293 int exit_code, exit_signal;
1294 int pdeath_signal; /* The signal sent when the parent dies */ 1294 int pdeath_signal; /* The signal sent when the parent dies */
1295 unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ 1295 unsigned int jobctl; /* JOBCTL_*, siglock protected */
1296 /* ??? */ 1296 /* ??? */
1297 unsigned int personality; 1297 unsigned int personality;
1298 unsigned did_exec:1; 1298 unsigned did_exec:1;
@@ -1813,15 +1813,34 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1813#define used_math() tsk_used_math(current) 1813#define used_math() tsk_used_math(current)
1814 1814
1815/* 1815/*
1816 * task->group_stop flags 1816 * task->jobctl flags
1817 */ 1817 */
1818#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ 1818#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
1819#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ 1819
1820#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ 1820#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
1821#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ 1821#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
1822#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ 1822#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
1823 1823#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
1824extern void task_clear_group_stop_pending(struct task_struct *task); 1824#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
1825#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
1826#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
1827
1828#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1829#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1830#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1831#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1832#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1833#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1834#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1835
1836#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1837#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1838
1839extern bool task_set_jobctl_pending(struct task_struct *task,
1840 unsigned int mask);
1841extern void task_clear_jobctl_trapping(struct task_struct *task);
1842extern void task_clear_jobctl_pending(struct task_struct *task,
1843 unsigned int mask);
1825 1844
1826#ifdef CONFIG_PREEMPT_RCU 1845#ifdef CONFIG_PREEMPT_RCU
1827 1846
@@ -2136,7 +2155,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
2136 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2155 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2137 2156
2138 return ret; 2157 return ret;
2139} 2158}
2140 2159
2141extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2160extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2142 sigset_t *mask); 2161 sigset_t *mask);
@@ -2151,7 +2170,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
2151extern int kill_pgrp(struct pid *pid, int sig, int priv); 2170extern int kill_pgrp(struct pid *pid, int sig, int priv);
2152extern int kill_pid(struct pid *pid, int sig, int priv); 2171extern int kill_pid(struct pid *pid, int sig, int priv);
2153extern int kill_proc_info(int, struct siginfo *, pid_t); 2172extern int kill_proc_info(int, struct siginfo *, pid_t);
2154extern int do_notify_parent(struct task_struct *, int); 2173extern __must_check bool do_notify_parent(struct task_struct *, int);
2155extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2174extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2156extern void force_sig(int, struct task_struct *); 2175extern void force_sig(int, struct task_struct *);
2157extern int send_sig(int, struct task_struct *, int); 2176extern int send_sig(int, struct task_struct *, int);
@@ -2275,8 +2294,10 @@ static inline int get_nr_threads(struct task_struct *tsk)
2275 return tsk->signal->nr_threads; 2294 return tsk->signal->nr_threads;
2276} 2295}
2277 2296
2278/* de_thread depends on thread_group_leader not being a pid based check */ 2297static inline bool thread_group_leader(struct task_struct *p)
2279#define thread_group_leader(p) (p == p->group_leader) 2298{
2299 return p->exit_signal >= 0;
2300}
2280 2301
2281/* Do to the insanities of de_thread it is possible for a process 2302/* Do to the insanities of de_thread it is possible for a process
2282 * to have the pid of the thread group leader without actually being 2303 * to have the pid of the thread group leader without actually being
@@ -2309,11 +2330,6 @@ static inline int thread_group_empty(struct task_struct *p)
2309#define delay_group_leader(p) \ 2330#define delay_group_leader(p) \
2310 (thread_group_leader(p) && !thread_group_empty(p)) 2331 (thread_group_leader(p) && !thread_group_empty(p))
2311 2332
2312static inline int task_detached(struct task_struct *p)
2313{
2314 return p->exit_signal == -1;
2315}
2316
2317/* 2333/*
2318 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2334 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2319 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2335 * subscriptions and synchronises with wait4(). Also used in procfs. Also
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index e95f5236611f..a71a2927a6a0 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,27 +51,12 @@
51#include <linux/security.h> 51#include <linux/security.h>
52struct linux_binprm; 52struct linux_binprm;
53 53
54/**
55 * tracehook_expect_breakpoints - guess if task memory might be touched
56 * @task: current task, making a new mapping
57 *
58 * Return nonzero if @task is expected to want breakpoint insertion in
59 * its memory at some point. A zero return is no guarantee it won't
60 * be done, but this is a hint that it's known to be likely.
61 *
62 * May be called with @task->mm->mmap_sem held for writing.
63 */
64static inline int tracehook_expect_breakpoints(struct task_struct *task)
65{
66 return (task_ptrace(task) & PT_PTRACED) != 0;
67}
68
69/* 54/*
70 * ptrace report for syscall entry and exit looks identical. 55 * ptrace report for syscall entry and exit looks identical.
71 */ 56 */
72static inline void ptrace_report_syscall(struct pt_regs *regs) 57static inline void ptrace_report_syscall(struct pt_regs *regs)
73{ 58{
74 int ptrace = task_ptrace(current); 59 int ptrace = current->ptrace;
75 60
76 if (!(ptrace & PT_PTRACED)) 61 if (!(ptrace & PT_PTRACED))
77 return; 62 return;
@@ -145,229 +130,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
145} 130}
146 131
147/** 132/**
148 * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
149 * @task: current task doing exec
150 *
151 * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
152 *
153 * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
154 */
155static inline int tracehook_unsafe_exec(struct task_struct *task)
156{
157 int unsafe = 0;
158 int ptrace = task_ptrace(task);
159 if (ptrace & PT_PTRACED) {
160 if (ptrace & PT_PTRACE_CAP)
161 unsafe |= LSM_UNSAFE_PTRACE_CAP;
162 else
163 unsafe |= LSM_UNSAFE_PTRACE;
164 }
165 return unsafe;
166}
167
168/**
169 * tracehook_tracer_task - return the task that is tracing the given task
170 * @tsk: task to consider
171 *
172 * Returns NULL if no one is tracing @task, or the &struct task_struct
173 * pointer to its tracer.
174 *
175 * Must called under rcu_read_lock(). The pointer returned might be kept
176 * live only by RCU. During exec, this may be called with task_lock()
177 * held on @task, still held from when tracehook_unsafe_exec() was called.
178 */
179static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
180{
181 if (task_ptrace(tsk) & PT_PTRACED)
182 return rcu_dereference(tsk->parent);
183 return NULL;
184}
185
186/**
187 * tracehook_report_exec - a successful exec was completed
188 * @fmt: &struct linux_binfmt that performed the exec
189 * @bprm: &struct linux_binprm containing exec details
190 * @regs: user-mode register state
191 *
192 * An exec just completed, we are shortly going to return to user mode.
193 * The freshly initialized register state can be seen and changed in @regs.
194 * The name, file and other pointers in @bprm are still on hand to be
195 * inspected, but will be freed as soon as this returns.
196 *
197 * Called with no locks, but with some kernel resources held live
198 * and a reference on @fmt->module.
199 */
200static inline void tracehook_report_exec(struct linux_binfmt *fmt,
201 struct linux_binprm *bprm,
202 struct pt_regs *regs)
203{
204 if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
205 unlikely(task_ptrace(current) & PT_PTRACED))
206 send_sig(SIGTRAP, current, 0);
207}
208
209/**
210 * tracehook_report_exit - task has begun to exit
211 * @exit_code: pointer to value destined for @current->exit_code
212 *
213 * @exit_code points to the value passed to do_exit(), which tracing
214 * might change here. This is almost the first thing in do_exit(),
215 * before freeing any resources or setting the %PF_EXITING flag.
216 *
217 * Called with no locks held.
218 */
219static inline void tracehook_report_exit(long *exit_code)
220{
221 ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
222}
223
224/**
225 * tracehook_prepare_clone - prepare for new child to be cloned
226 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
227 *
228 * This is called before a new user task is to be cloned.
229 * Its return value will be passed to tracehook_finish_clone().
230 *
231 * Called with no locks held.
232 */
233static inline int tracehook_prepare_clone(unsigned clone_flags)
234{
235 if (clone_flags & CLONE_UNTRACED)
236 return 0;
237
238 if (clone_flags & CLONE_VFORK) {
239 if (current->ptrace & PT_TRACE_VFORK)
240 return PTRACE_EVENT_VFORK;
241 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
242 if (current->ptrace & PT_TRACE_CLONE)
243 return PTRACE_EVENT_CLONE;
244 } else if (current->ptrace & PT_TRACE_FORK)
245 return PTRACE_EVENT_FORK;
246
247 return 0;
248}
249
250/**
251 * tracehook_finish_clone - new child created and being attached
252 * @child: new child task
253 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
254 * @trace: return value from tracehook_prepare_clone()
255 *
256 * This is called immediately after adding @child to its parent's children list.
257 * The @trace value is that returned by tracehook_prepare_clone().
258 *
259 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
260 */
261static inline void tracehook_finish_clone(struct task_struct *child,
262 unsigned long clone_flags, int trace)
263{
264 ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
265}
266
267/**
268 * tracehook_report_clone - in parent, new child is about to start running
269 * @regs: parent's user register state
270 * @clone_flags: flags from parent's system call
271 * @pid: new child's PID in the parent's namespace
272 * @child: new child task
273 *
274 * Called after a child is set up, but before it has been started running.
275 * This is not a good place to block, because the child has not started
276 * yet. Suspend the child here if desired, and then block in
277 * tracehook_report_clone_complete(). This must prevent the child from
278 * self-reaping if tracehook_report_clone_complete() uses the @child
279 * pointer; otherwise it might have died and been released by the time
280 * tracehook_report_clone_complete() is called.
281 *
282 * Called with no locks held, but the child cannot run until this returns.
283 */
284static inline void tracehook_report_clone(struct pt_regs *regs,
285 unsigned long clone_flags,
286 pid_t pid, struct task_struct *child)
287{
288 if (unlikely(task_ptrace(child))) {
289 /*
290 * It doesn't matter who attached/attaching to this
291 * task, the pending SIGSTOP is right in any case.
292 */
293 sigaddset(&child->pending.signal, SIGSTOP);
294 set_tsk_thread_flag(child, TIF_SIGPENDING);
295 }
296}
297
298/**
299 * tracehook_report_clone_complete - new child is running
300 * @trace: return value from tracehook_prepare_clone()
301 * @regs: parent's user register state
302 * @clone_flags: flags from parent's system call
303 * @pid: new child's PID in the parent's namespace
304 * @child: child task, already running
305 *
306 * This is called just after the child has started running. This is
307 * just before the clone/fork syscall returns, or blocks for vfork
308 * child completion if @clone_flags has the %CLONE_VFORK bit set.
309 * The @child pointer may be invalid if a self-reaping child died and
310 * tracehook_report_clone() took no action to prevent it from self-reaping.
311 *
312 * Called with no locks held.
313 */
314static inline void tracehook_report_clone_complete(int trace,
315 struct pt_regs *regs,
316 unsigned long clone_flags,
317 pid_t pid,
318 struct task_struct *child)
319{
320 if (unlikely(trace))
321 ptrace_event(0, trace, pid);
322}
323
324/**
325 * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
326 * @child: child task, already running
327 * @pid: new child's PID in the parent's namespace
328 *
329 * Called after a %CLONE_VFORK parent has waited for the child to complete.
330 * The clone/vfork system call will return immediately after this.
331 * The @child pointer may be invalid if a self-reaping child died and
332 * tracehook_report_clone() took no action to prevent it from self-reaping.
333 *
334 * Called with no locks held.
335 */
336static inline void tracehook_report_vfork_done(struct task_struct *child,
337 pid_t pid)
338{
339 ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
340}
341
342/**
343 * tracehook_prepare_release_task - task is being reaped, clean up tracing
344 * @task: task in %EXIT_DEAD state
345 *
346 * This is called in release_task() just before @task gets finally reaped
347 * and freed. This would be the ideal place to remove and clean up any
348 * tracing-related state for @task.
349 *
350 * Called with no locks held.
351 */
352static inline void tracehook_prepare_release_task(struct task_struct *task)
353{
354}
355
356/**
357 * tracehook_finish_release_task - final tracing clean-up
358 * @task: task in %EXIT_DEAD state
359 *
360 * This is called in release_task() when @task is being in the middle of
361 * being reaped. After this, there must be no tracing entanglements.
362 *
363 * Called with write_lock_irq(&tasklist_lock) held.
364 */
365static inline void tracehook_finish_release_task(struct task_struct *task)
366{
367 ptrace_release_task(task);
368}
369
370/**
371 * tracehook_signal_handler - signal handler setup is complete 133 * tracehook_signal_handler - signal handler setup is complete
372 * @sig: number of signal being delivered 134 * @sig: number of signal being delivered
373 * @info: siginfo_t of signal being delivered 135 * @info: siginfo_t of signal being delivered
@@ -390,151 +152,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
390 ptrace_notify(SIGTRAP); 152 ptrace_notify(SIGTRAP);
391} 153}
392 154
393/**
394 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
395 * @task: task receiving the signal
396 * @sig: signal number being sent
397 *
398 * Return zero iff tracing doesn't care to examine this ignored signal,
399 * so it can short-circuit normal delivery and never even get queued.
400 *
401 * Called with @task->sighand->siglock held.
402 */
403static inline int tracehook_consider_ignored_signal(struct task_struct *task,
404 int sig)
405{
406 return (task_ptrace(task) & PT_PTRACED) != 0;
407}
408
409/**
410 * tracehook_consider_fatal_signal - suppress special handling of fatal signal
411 * @task: task receiving the signal
412 * @sig: signal number being sent
413 *
414 * Return nonzero to prevent special handling of this termination signal.
415 * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
416 * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
417 * When this returns zero, this signal might cause a quick termination
418 * that does not give the debugger a chance to intercept the signal.
419 *
420 * Called with or without @task->sighand->siglock held.
421 */
422static inline int tracehook_consider_fatal_signal(struct task_struct *task,
423 int sig)
424{
425 return (task_ptrace(task) & PT_PTRACED) != 0;
426}
427
428/**
429 * tracehook_force_sigpending - let tracing force signal_pending(current) on
430 *
431 * Called when recomputing our signal_pending() flag. Return nonzero
432 * to force the signal_pending() flag on, so that tracehook_get_signal()
433 * will be called before the next return to user mode.
434 *
435 * Called with @current->sighand->siglock held.
436 */
437static inline int tracehook_force_sigpending(void)
438{
439 return 0;
440}
441
442/**
443 * tracehook_get_signal - deliver synthetic signal to traced task
444 * @task: @current
445 * @regs: task_pt_regs(@current)
446 * @info: details of synthetic signal
447 * @return_ka: sigaction for synthetic signal
448 *
449 * Return zero to check for a real pending signal normally.
450 * Return -1 after releasing the siglock to repeat the check.
451 * Return a signal number to induce an artificial signal delivery,
452 * setting *@info and *@return_ka to specify its details and behavior.
453 *
454 * The @return_ka->sa_handler value controls the disposition of the
455 * signal, no matter the signal number. For %SIG_DFL, the return value
456 * is a representative signal to indicate the behavior (e.g. %SIGTERM
457 * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
458 * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
459 * reported will be @info->si_signo instead.
460 *
461 * Called with @task->sighand->siglock held, before dequeuing pending signals.
462 */
463static inline int tracehook_get_signal(struct task_struct *task,
464 struct pt_regs *regs,
465 siginfo_t *info,
466 struct k_sigaction *return_ka)
467{
468 return 0;
469}
470
471/**
472 * tracehook_finish_jctl - report about return from job control stop
473 *
474 * This is called by do_signal_stop() after wakeup.
475 */
476static inline void tracehook_finish_jctl(void)
477{
478}
479
480#define DEATH_REAP -1
481#define DEATH_DELAYED_GROUP_LEADER -2
482
483/**
484 * tracehook_notify_death - task is dead, ready to notify parent
485 * @task: @current task now exiting
486 * @death_cookie: value to pass to tracehook_report_death()
487 * @group_dead: nonzero if this was the last thread in the group to die
488 *
489 * A return value >= 0 means call do_notify_parent() with that signal
490 * number. Negative return value can be %DEATH_REAP to self-reap right
491 * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
492 * parent. Note that a return value of 0 means a do_notify_parent() call
493 * that sends no signal, but still wakes up a parent blocked in wait*().
494 *
495 * Called with write_lock_irq(&tasklist_lock) held.
496 */
497static inline int tracehook_notify_death(struct task_struct *task,
498 void **death_cookie, int group_dead)
499{
500 if (task_detached(task))
501 return task->ptrace ? SIGCHLD : DEATH_REAP;
502
503 /*
504 * If something other than our normal parent is ptracing us, then
505 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
506 * only has special meaning to our real parent.
507 */
508 if (thread_group_empty(task) && !ptrace_reparented(task))
509 return task->exit_signal;
510
511 return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
512}
513
514/**
515 * tracehook_report_death - task is dead and ready to be reaped
516 * @task: @current task now exiting
517 * @signal: return value from tracheook_notify_death()
518 * @death_cookie: value passed back from tracehook_notify_death()
519 * @group_dead: nonzero if this was the last thread in the group to die
520 *
521 * Thread has just become a zombie or is about to self-reap. If positive,
522 * @signal is the signal number just sent to the parent (usually %SIGCHLD).
523 * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
524 * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
525 * The @death_cookie was passed back by tracehook_notify_death().
526 *
527 * If normal reaping is not inhibited, @task->exit_state might be changing
528 * in parallel.
529 *
530 * Called without locks.
531 */
532static inline void tracehook_report_death(struct task_struct *task,
533 int signal, void *death_cookie,
534 int group_dead)
535{
536}
537
538#ifdef TIF_NOTIFY_RESUME 155#ifdef TIF_NOTIFY_RESUME
539/** 156/**
540 * set_notify_resume - cause tracehook_notify_resume() to be called 157 * set_notify_resume - cause tracehook_notify_resume() to be called