diff options
Diffstat (limited to 'kernel/ptrace.c')
-rw-r--r-- | kernel/ptrace.c | 303 |
1 files changed, 163 insertions, 140 deletions
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index c9cf48b21f05..23bd09cd042e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -21,22 +21,10 @@ | |||
21 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
22 | #include <linux/pid_namespace.h> | 22 | #include <linux/pid_namespace.h> |
23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
24 | 24 | #include <linux/uaccess.h> | |
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | 25 | ||
28 | 26 | ||
29 | /* | 27 | /* |
30 | * Initialize a new task whose father had been ptraced. | ||
31 | * | ||
32 | * Called from copy_process(). | ||
33 | */ | ||
34 | void ptrace_fork(struct task_struct *child, unsigned long clone_flags) | ||
35 | { | ||
36 | arch_ptrace_fork(child, clone_flags); | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * ptrace a task: make the debugger its new parent and | 28 | * ptrace a task: make the debugger its new parent and |
41 | * move it to the ptrace list. | 29 | * move it to the ptrace list. |
42 | * | 30 | * |
@@ -48,7 +36,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) | |||
48 | list_add(&child->ptrace_entry, &new_parent->ptraced); | 36 | list_add(&child->ptrace_entry, &new_parent->ptraced); |
49 | child->parent = new_parent; | 37 | child->parent = new_parent; |
50 | } | 38 | } |
51 | 39 | ||
52 | /* | 40 | /* |
53 | * Turn a tracing stop into a normal stop now, since with no tracer there | 41 | * Turn a tracing stop into a normal stop now, since with no tracer there |
54 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a | 42 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a |
@@ -60,11 +48,15 @@ static void ptrace_untrace(struct task_struct *child) | |||
60 | { | 48 | { |
61 | spin_lock(&child->sighand->siglock); | 49 | spin_lock(&child->sighand->siglock); |
62 | if (task_is_traced(child)) { | 50 | if (task_is_traced(child)) { |
63 | if (child->signal->flags & SIGNAL_STOP_STOPPED) { | 51 | /* |
52 | * If the group stop is completed or in progress, | ||
53 | * this thread was already counted as stopped. | ||
54 | */ | ||
55 | if (child->signal->flags & SIGNAL_STOP_STOPPED || | ||
56 | child->signal->group_stop_count) | ||
64 | __set_task_state(child, TASK_STOPPED); | 57 | __set_task_state(child, TASK_STOPPED); |
65 | } else { | 58 | else |
66 | signal_wake_up(child, 1); | 59 | signal_wake_up(child, 1); |
67 | } | ||
68 | } | 60 | } |
69 | spin_unlock(&child->sighand->siglock); | 61 | spin_unlock(&child->sighand->siglock); |
70 | } | 62 | } |
@@ -160,7 +152,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
160 | if (!dumpable && !capable(CAP_SYS_PTRACE)) | 152 | if (!dumpable && !capable(CAP_SYS_PTRACE)) |
161 | return -EPERM; | 153 | return -EPERM; |
162 | 154 | ||
163 | return security_ptrace_may_access(task, mode); | 155 | return security_ptrace_access_check(task, mode); |
164 | } | 156 | } |
165 | 157 | ||
166 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) | 158 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
@@ -169,84 +161,143 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
169 | task_lock(task); | 161 | task_lock(task); |
170 | err = __ptrace_may_access(task, mode); | 162 | err = __ptrace_may_access(task, mode); |
171 | task_unlock(task); | 163 | task_unlock(task); |
172 | return (!err ? true : false); | 164 | return !err; |
173 | } | 165 | } |
174 | 166 | ||
175 | int ptrace_attach(struct task_struct *task) | 167 | int ptrace_attach(struct task_struct *task) |
176 | { | 168 | { |
177 | int retval; | 169 | int retval; |
178 | unsigned long flags; | ||
179 | 170 | ||
180 | audit_ptrace(task); | 171 | audit_ptrace(task); |
181 | 172 | ||
182 | retval = -EPERM; | 173 | retval = -EPERM; |
174 | if (unlikely(task->flags & PF_KTHREAD)) | ||
175 | goto out; | ||
183 | if (same_thread_group(task, current)) | 176 | if (same_thread_group(task, current)) |
184 | goto out; | 177 | goto out; |
185 | 178 | ||
186 | /* Protect exec's credential calculations against our interference; | 179 | /* |
187 | * SUID, SGID and LSM creds get determined differently under ptrace. | 180 | * Protect exec's credential calculations against our interference; |
181 | * interference; SUID, SGID and LSM creds get determined differently | ||
182 | * under ptrace. | ||
188 | */ | 183 | */ |
189 | retval = mutex_lock_interruptible(¤t->cred_exec_mutex); | 184 | retval = -ERESTARTNOINTR; |
190 | if (retval < 0) | 185 | if (mutex_lock_interruptible(&task->cred_guard_mutex)) |
191 | goto out; | 186 | goto out; |
192 | 187 | ||
193 | retval = -EPERM; | ||
194 | repeat: | ||
195 | /* | ||
196 | * Nasty, nasty. | ||
197 | * | ||
198 | * We want to hold both the task-lock and the | ||
199 | * tasklist_lock for writing at the same time. | ||
200 | * But that's against the rules (tasklist_lock | ||
201 | * is taken for reading by interrupts on other | ||
202 | * cpu's that may have task_lock). | ||
203 | */ | ||
204 | task_lock(task); | 188 | task_lock(task); |
205 | if (!write_trylock_irqsave(&tasklist_lock, flags)) { | ||
206 | task_unlock(task); | ||
207 | do { | ||
208 | cpu_relax(); | ||
209 | } while (!write_can_lock(&tasklist_lock)); | ||
210 | goto repeat; | ||
211 | } | ||
212 | |||
213 | if (!task->mm) | ||
214 | goto bad; | ||
215 | /* the same process cannot be attached many times */ | ||
216 | if (task->ptrace & PT_PTRACED) | ||
217 | goto bad; | ||
218 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); | 189 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); |
190 | task_unlock(task); | ||
219 | if (retval) | 191 | if (retval) |
220 | goto bad; | 192 | goto unlock_creds; |
221 | 193 | ||
222 | /* Go */ | 194 | write_lock_irq(&tasklist_lock); |
223 | task->ptrace |= PT_PTRACED; | 195 | retval = -EPERM; |
196 | if (unlikely(task->exit_state)) | ||
197 | goto unlock_tasklist; | ||
198 | if (task->ptrace) | ||
199 | goto unlock_tasklist; | ||
200 | |||
201 | task->ptrace = PT_PTRACED; | ||
224 | if (capable(CAP_SYS_PTRACE)) | 202 | if (capable(CAP_SYS_PTRACE)) |
225 | task->ptrace |= PT_PTRACE_CAP; | 203 | task->ptrace |= PT_PTRACE_CAP; |
226 | 204 | ||
227 | __ptrace_link(task, current); | 205 | __ptrace_link(task, current); |
228 | |||
229 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); | 206 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); |
230 | bad: | 207 | |
231 | write_unlock_irqrestore(&tasklist_lock, flags); | 208 | retval = 0; |
232 | task_unlock(task); | 209 | unlock_tasklist: |
233 | mutex_unlock(¤t->cred_exec_mutex); | 210 | write_unlock_irq(&tasklist_lock); |
211 | unlock_creds: | ||
212 | mutex_unlock(&task->cred_guard_mutex); | ||
234 | out: | 213 | out: |
235 | return retval; | 214 | return retval; |
236 | } | 215 | } |
237 | 216 | ||
238 | static inline void __ptrace_detach(struct task_struct *child, unsigned int data) | 217 | /** |
218 | * ptrace_traceme -- helper for PTRACE_TRACEME | ||
219 | * | ||
220 | * Performs checks and sets PT_PTRACED. | ||
221 | * Should be used by all ptrace implementations for PTRACE_TRACEME. | ||
222 | */ | ||
223 | int ptrace_traceme(void) | ||
239 | { | 224 | { |
240 | child->exit_code = data; | 225 | int ret = -EPERM; |
241 | /* .. re-parent .. */ | 226 | |
242 | __ptrace_unlink(child); | 227 | write_lock_irq(&tasklist_lock); |
243 | /* .. and wake it up. */ | 228 | /* Are we already being traced? */ |
244 | if (child->exit_state != EXIT_ZOMBIE) | 229 | if (!current->ptrace) { |
245 | wake_up_process(child); | 230 | ret = security_ptrace_traceme(current->parent); |
231 | /* | ||
232 | * Check PF_EXITING to ensure ->real_parent has not passed | ||
233 | * exit_ptrace(). Otherwise we don't report the error but | ||
234 | * pretend ->real_parent untraces us right after return. | ||
235 | */ | ||
236 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { | ||
237 | current->ptrace = PT_PTRACED; | ||
238 | __ptrace_link(current, current->real_parent); | ||
239 | } | ||
240 | } | ||
241 | write_unlock_irq(&tasklist_lock); | ||
242 | |||
243 | return ret; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Called with irqs disabled, returns true if childs should reap themselves. | ||
248 | */ | ||
249 | static int ignoring_children(struct sighand_struct *sigh) | ||
250 | { | ||
251 | int ret; | ||
252 | spin_lock(&sigh->siglock); | ||
253 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || | ||
254 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); | ||
255 | spin_unlock(&sigh->siglock); | ||
256 | return ret; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Called with tasklist_lock held for writing. | ||
261 | * Unlink a traced task, and clean it up if it was a traced zombie. | ||
262 | * Return true if it needs to be reaped with release_task(). | ||
263 | * (We can't call release_task() here because we already hold tasklist_lock.) | ||
264 | * | ||
265 | * If it's a zombie, our attachedness prevented normal parent notification | ||
266 | * or self-reaping. Do notification now if it would have happened earlier. | ||
267 | * If it should reap itself, return true. | ||
268 | * | ||
269 | * If it's our own child, there is no notification to do. But if our normal | ||
270 | * children self-reap, then this child was prevented by ptrace and we must | ||
271 | * reap it now, in that case we must also wake up sub-threads sleeping in | ||
272 | * do_wait(). | ||
273 | */ | ||
274 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | ||
275 | { | ||
276 | __ptrace_unlink(p); | ||
277 | |||
278 | if (p->exit_state == EXIT_ZOMBIE) { | ||
279 | if (!task_detached(p) && thread_group_empty(p)) { | ||
280 | if (!same_thread_group(p->real_parent, tracer)) | ||
281 | do_notify_parent(p, p->exit_signal); | ||
282 | else if (ignoring_children(tracer->sighand)) { | ||
283 | __wake_up_parent(p, tracer); | ||
284 | p->exit_signal = -1; | ||
285 | } | ||
286 | } | ||
287 | if (task_detached(p)) { | ||
288 | /* Mark it as in the process of being reaped. */ | ||
289 | p->exit_state = EXIT_DEAD; | ||
290 | return true; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | return false; | ||
246 | } | 295 | } |
247 | 296 | ||
248 | int ptrace_detach(struct task_struct *child, unsigned int data) | 297 | int ptrace_detach(struct task_struct *child, unsigned int data) |
249 | { | 298 | { |
299 | bool dead = false; | ||
300 | |||
250 | if (!valid_signal(data)) | 301 | if (!valid_signal(data)) |
251 | return -EIO; | 302 | return -EIO; |
252 | 303 | ||
@@ -255,14 +306,47 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
255 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 306 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
256 | 307 | ||
257 | write_lock_irq(&tasklist_lock); | 308 | write_lock_irq(&tasklist_lock); |
258 | /* protect against de_thread()->release_task() */ | 309 | /* |
259 | if (child->ptrace) | 310 | * This child can be already killed. Make sure de_thread() or |
260 | __ptrace_detach(child, data); | 311 | * our sub-thread doing do_wait() didn't do release_task() yet. |
312 | */ | ||
313 | if (child->ptrace) { | ||
314 | child->exit_code = data; | ||
315 | dead = __ptrace_detach(current, child); | ||
316 | if (!child->exit_state) | ||
317 | wake_up_process(child); | ||
318 | } | ||
261 | write_unlock_irq(&tasklist_lock); | 319 | write_unlock_irq(&tasklist_lock); |
262 | 320 | ||
321 | if (unlikely(dead)) | ||
322 | release_task(child); | ||
323 | |||
263 | return 0; | 324 | return 0; |
264 | } | 325 | } |
265 | 326 | ||
327 | /* | ||
328 | * Detach all tasks we were using ptrace on. | ||
329 | */ | ||
330 | void exit_ptrace(struct task_struct *tracer) | ||
331 | { | ||
332 | struct task_struct *p, *n; | ||
333 | LIST_HEAD(ptrace_dead); | ||
334 | |||
335 | write_lock_irq(&tasklist_lock); | ||
336 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { | ||
337 | if (__ptrace_detach(tracer, p)) | ||
338 | list_add(&p->ptrace_entry, &ptrace_dead); | ||
339 | } | ||
340 | write_unlock_irq(&tasklist_lock); | ||
341 | |||
342 | BUG_ON(!list_empty(&tracer->ptraced)); | ||
343 | |||
344 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { | ||
345 | list_del_init(&p->ptrace_entry); | ||
346 | release_task(p); | ||
347 | } | ||
348 | } | ||
349 | |||
266 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) | 350 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
267 | { | 351 | { |
268 | int copied = 0; | 352 | int copied = 0; |
@@ -283,7 +367,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst | |||
283 | copied += retval; | 367 | copied += retval; |
284 | src += retval; | 368 | src += retval; |
285 | dst += retval; | 369 | dst += retval; |
286 | len -= retval; | 370 | len -= retval; |
287 | } | 371 | } |
288 | return copied; | 372 | return copied; |
289 | } | 373 | } |
@@ -308,7 +392,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds | |||
308 | copied += retval; | 392 | copied += retval; |
309 | src += retval; | 393 | src += retval; |
310 | dst += retval; | 394 | dst += retval; |
311 | len -= retval; | 395 | len -= retval; |
312 | } | 396 | } |
313 | return copied; | 397 | return copied; |
314 | } | 398 | } |
@@ -343,37 +427,33 @@ static int ptrace_setoptions(struct task_struct *child, long data) | |||
343 | 427 | ||
344 | static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) | 428 | static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) |
345 | { | 429 | { |
430 | unsigned long flags; | ||
346 | int error = -ESRCH; | 431 | int error = -ESRCH; |
347 | 432 | ||
348 | read_lock(&tasklist_lock); | 433 | if (lock_task_sighand(child, &flags)) { |
349 | if (likely(child->sighand != NULL)) { | ||
350 | error = -EINVAL; | 434 | error = -EINVAL; |
351 | spin_lock_irq(&child->sighand->siglock); | ||
352 | if (likely(child->last_siginfo != NULL)) { | 435 | if (likely(child->last_siginfo != NULL)) { |
353 | *info = *child->last_siginfo; | 436 | *info = *child->last_siginfo; |
354 | error = 0; | 437 | error = 0; |
355 | } | 438 | } |
356 | spin_unlock_irq(&child->sighand->siglock); | 439 | unlock_task_sighand(child, &flags); |
357 | } | 440 | } |
358 | read_unlock(&tasklist_lock); | ||
359 | return error; | 441 | return error; |
360 | } | 442 | } |
361 | 443 | ||
362 | static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) | 444 | static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) |
363 | { | 445 | { |
446 | unsigned long flags; | ||
364 | int error = -ESRCH; | 447 | int error = -ESRCH; |
365 | 448 | ||
366 | read_lock(&tasklist_lock); | 449 | if (lock_task_sighand(child, &flags)) { |
367 | if (likely(child->sighand != NULL)) { | ||
368 | error = -EINVAL; | 450 | error = -EINVAL; |
369 | spin_lock_irq(&child->sighand->siglock); | ||
370 | if (likely(child->last_siginfo != NULL)) { | 451 | if (likely(child->last_siginfo != NULL)) { |
371 | *child->last_siginfo = *info; | 452 | *child->last_siginfo = *info; |
372 | error = 0; | 453 | error = 0; |
373 | } | 454 | } |
374 | spin_unlock_irq(&child->sighand->siglock); | 455 | unlock_task_sighand(child, &flags); |
375 | } | 456 | } |
376 | read_unlock(&tasklist_lock); | ||
377 | return error; | 457 | return error; |
378 | } | 458 | } |
379 | 459 | ||
@@ -421,9 +501,9 @@ static int ptrace_resume(struct task_struct *child, long request, long data) | |||
421 | if (unlikely(!arch_has_single_step())) | 501 | if (unlikely(!arch_has_single_step())) |
422 | return -EIO; | 502 | return -EIO; |
423 | user_enable_single_step(child); | 503 | user_enable_single_step(child); |
424 | } | 504 | } else { |
425 | else | ||
426 | user_disable_single_step(child); | 505 | user_disable_single_step(child); |
506 | } | ||
427 | 507 | ||
428 | child->exit_code = data; | 508 | child->exit_code = data; |
429 | wake_up_process(child); | 509 | wake_up_process(child); |
@@ -500,71 +580,16 @@ int ptrace_request(struct task_struct *child, long request, | |||
500 | return ret; | 580 | return ret; |
501 | } | 581 | } |
502 | 582 | ||
503 | /** | 583 | static struct task_struct *ptrace_get_task_struct(pid_t pid) |
504 | * ptrace_traceme -- helper for PTRACE_TRACEME | ||
505 | * | ||
506 | * Performs checks and sets PT_PTRACED. | ||
507 | * Should be used by all ptrace implementations for PTRACE_TRACEME. | ||
508 | */ | ||
509 | int ptrace_traceme(void) | ||
510 | { | ||
511 | int ret = -EPERM; | ||
512 | |||
513 | /* | ||
514 | * Are we already being traced? | ||
515 | */ | ||
516 | repeat: | ||
517 | task_lock(current); | ||
518 | if (!(current->ptrace & PT_PTRACED)) { | ||
519 | /* | ||
520 | * See ptrace_attach() comments about the locking here. | ||
521 | */ | ||
522 | unsigned long flags; | ||
523 | if (!write_trylock_irqsave(&tasklist_lock, flags)) { | ||
524 | task_unlock(current); | ||
525 | do { | ||
526 | cpu_relax(); | ||
527 | } while (!write_can_lock(&tasklist_lock)); | ||
528 | goto repeat; | ||
529 | } | ||
530 | |||
531 | ret = security_ptrace_traceme(current->parent); | ||
532 | |||
533 | /* | ||
534 | * Set the ptrace bit in the process ptrace flags. | ||
535 | * Then link us on our parent's ptraced list. | ||
536 | */ | ||
537 | if (!ret) { | ||
538 | current->ptrace |= PT_PTRACED; | ||
539 | __ptrace_link(current, current->real_parent); | ||
540 | } | ||
541 | |||
542 | write_unlock_irqrestore(&tasklist_lock, flags); | ||
543 | } | ||
544 | task_unlock(current); | ||
545 | return ret; | ||
546 | } | ||
547 | |||
548 | /** | ||
549 | * ptrace_get_task_struct -- grab a task struct reference for ptrace | ||
550 | * @pid: process id to grab a task_struct reference of | ||
551 | * | ||
552 | * This function is a helper for ptrace implementations. It checks | ||
553 | * permissions and then grabs a task struct for use of the actual | ||
554 | * ptrace implementation. | ||
555 | * | ||
556 | * Returns the task_struct for @pid or an ERR_PTR() on failure. | ||
557 | */ | ||
558 | struct task_struct *ptrace_get_task_struct(pid_t pid) | ||
559 | { | 584 | { |
560 | struct task_struct *child; | 585 | struct task_struct *child; |
561 | 586 | ||
562 | read_lock(&tasklist_lock); | 587 | rcu_read_lock(); |
563 | child = find_task_by_vpid(pid); | 588 | child = find_task_by_vpid(pid); |
564 | if (child) | 589 | if (child) |
565 | get_task_struct(child); | 590 | get_task_struct(child); |
591 | rcu_read_unlock(); | ||
566 | 592 | ||
567 | read_unlock(&tasklist_lock); | ||
568 | if (!child) | 593 | if (!child) |
569 | return ERR_PTR(-ESRCH); | 594 | return ERR_PTR(-ESRCH); |
570 | return child; | 595 | return child; |
@@ -612,8 +637,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) | |||
612 | goto out_put_task_struct; | 637 | goto out_put_task_struct; |
613 | 638 | ||
614 | ret = arch_ptrace(child, request, addr, data); | 639 | ret = arch_ptrace(child, request, addr, data); |
615 | if (ret < 0) | ||
616 | goto out_put_task_struct; | ||
617 | 640 | ||
618 | out_put_task_struct: | 641 | out_put_task_struct: |
619 | put_task_struct(child); | 642 | put_task_struct(child); |