diff options
Diffstat (limited to 'kernel/ptrace.c')
| -rw-r--r-- | kernel/ptrace.c | 103 |
1 files changed, 88 insertions, 15 deletions
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index c9cf48b21f05..aaad0ec34194 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -60,11 +60,15 @@ static void ptrace_untrace(struct task_struct *child) | |||
| 60 | { | 60 | { |
| 61 | spin_lock(&child->sighand->siglock); | 61 | spin_lock(&child->sighand->siglock); |
| 62 | if (task_is_traced(child)) { | 62 | if (task_is_traced(child)) { |
| 63 | if (child->signal->flags & SIGNAL_STOP_STOPPED) { | 63 | /* |
| 64 | * If the group stop is completed or in progress, | ||
| 65 | * this thread was already counted as stopped. | ||
| 66 | */ | ||
| 67 | if (child->signal->flags & SIGNAL_STOP_STOPPED || | ||
| 68 | child->signal->group_stop_count) | ||
| 64 | __set_task_state(child, TASK_STOPPED); | 69 | __set_task_state(child, TASK_STOPPED); |
| 65 | } else { | 70 | else |
| 66 | signal_wake_up(child, 1); | 71 | signal_wake_up(child, 1); |
| 67 | } | ||
| 68 | } | 72 | } |
| 69 | spin_unlock(&child->sighand->siglock); | 73 | spin_unlock(&child->sighand->siglock); |
| 70 | } | 74 | } |
| @@ -235,18 +239,58 @@ out: | |||
| 235 | return retval; | 239 | return retval; |
| 236 | } | 240 | } |
| 237 | 241 | ||
| 238 | static inline void __ptrace_detach(struct task_struct *child, unsigned int data) | 242 | /* |
| 243 | * Called with irqs disabled, returns true if childs should reap themselves. | ||
| 244 | */ | ||
| 245 | static int ignoring_children(struct sighand_struct *sigh) | ||
| 239 | { | 246 | { |
| 240 | child->exit_code = data; | 247 | int ret; |
| 241 | /* .. re-parent .. */ | 248 | spin_lock(&sigh->siglock); |
| 242 | __ptrace_unlink(child); | 249 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || |
| 243 | /* .. and wake it up. */ | 250 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); |
| 244 | if (child->exit_state != EXIT_ZOMBIE) | 251 | spin_unlock(&sigh->siglock); |
| 245 | wake_up_process(child); | 252 | return ret; |
| 253 | } | ||
| 254 | |||
| 255 | /* | ||
| 256 | * Called with tasklist_lock held for writing. | ||
| 257 | * Unlink a traced task, and clean it up if it was a traced zombie. | ||
| 258 | * Return true if it needs to be reaped with release_task(). | ||
| 259 | * (We can't call release_task() here because we already hold tasklist_lock.) | ||
| 260 | * | ||
| 261 | * If it's a zombie, our attachedness prevented normal parent notification | ||
| 262 | * or self-reaping. Do notification now if it would have happened earlier. | ||
| 263 | * If it should reap itself, return true. | ||
| 264 | * | ||
| 265 | * If it's our own child, there is no notification to do. | ||
| 266 | * But if our normal children self-reap, then this child | ||
| 267 | * was prevented by ptrace and we must reap it now. | ||
| 268 | */ | ||
| 269 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | ||
| 270 | { | ||
| 271 | __ptrace_unlink(p); | ||
| 272 | |||
| 273 | if (p->exit_state == EXIT_ZOMBIE) { | ||
| 274 | if (!task_detached(p) && thread_group_empty(p)) { | ||
| 275 | if (!same_thread_group(p->real_parent, tracer)) | ||
| 276 | do_notify_parent(p, p->exit_signal); | ||
| 277 | else if (ignoring_children(tracer->sighand)) | ||
| 278 | p->exit_signal = -1; | ||
| 279 | } | ||
| 280 | if (task_detached(p)) { | ||
| 281 | /* Mark it as in the process of being reaped. */ | ||
| 282 | p->exit_state = EXIT_DEAD; | ||
| 283 | return true; | ||
| 284 | } | ||
| 285 | } | ||
| 286 | |||
| 287 | return false; | ||
| 246 | } | 288 | } |
| 247 | 289 | ||
| 248 | int ptrace_detach(struct task_struct *child, unsigned int data) | 290 | int ptrace_detach(struct task_struct *child, unsigned int data) |
| 249 | { | 291 | { |
| 292 | bool dead = false; | ||
| 293 | |||
| 250 | if (!valid_signal(data)) | 294 | if (!valid_signal(data)) |
| 251 | return -EIO; | 295 | return -EIO; |
| 252 | 296 | ||
| @@ -255,14 +299,45 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
| 255 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 299 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
| 256 | 300 | ||
| 257 | write_lock_irq(&tasklist_lock); | 301 | write_lock_irq(&tasklist_lock); |
| 258 | /* protect against de_thread()->release_task() */ | 302 | /* |
| 259 | if (child->ptrace) | 303 | * This child can be already killed. Make sure de_thread() or |
| 260 | __ptrace_detach(child, data); | 304 | * our sub-thread doing do_wait() didn't do release_task() yet. |
| 305 | */ | ||
| 306 | if (child->ptrace) { | ||
| 307 | child->exit_code = data; | ||
| 308 | dead = __ptrace_detach(current, child); | ||
| 309 | } | ||
| 261 | write_unlock_irq(&tasklist_lock); | 310 | write_unlock_irq(&tasklist_lock); |
| 262 | 311 | ||
| 312 | if (unlikely(dead)) | ||
| 313 | release_task(child); | ||
| 314 | |||
| 263 | return 0; | 315 | return 0; |
| 264 | } | 316 | } |
| 265 | 317 | ||
| 318 | /* | ||
| 319 | * Detach all tasks we were using ptrace on. | ||
| 320 | */ | ||
| 321 | void exit_ptrace(struct task_struct *tracer) | ||
| 322 | { | ||
| 323 | struct task_struct *p, *n; | ||
| 324 | LIST_HEAD(ptrace_dead); | ||
| 325 | |||
| 326 | write_lock_irq(&tasklist_lock); | ||
| 327 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { | ||
| 328 | if (__ptrace_detach(tracer, p)) | ||
| 329 | list_add(&p->ptrace_entry, &ptrace_dead); | ||
| 330 | } | ||
| 331 | write_unlock_irq(&tasklist_lock); | ||
| 332 | |||
| 333 | BUG_ON(!list_empty(&tracer->ptraced)); | ||
| 334 | |||
| 335 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { | ||
| 336 | list_del_init(&p->ptrace_entry); | ||
| 337 | release_task(p); | ||
| 338 | } | ||
| 339 | } | ||
| 340 | |||
| 266 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) | 341 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
| 267 | { | 342 | { |
| 268 | int copied = 0; | 343 | int copied = 0; |
| @@ -612,8 +687,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) | |||
| 612 | goto out_put_task_struct; | 687 | goto out_put_task_struct; |
| 613 | 688 | ||
| 614 | ret = arch_ptrace(child, request, addr, data); | 689 | ret = arch_ptrace(child, request, addr, data); |
| 615 | if (ret < 0) | ||
| 616 | goto out_put_task_struct; | ||
| 617 | 690 | ||
| 618 | out_put_task_struct: | 691 | out_put_task_struct: |
| 619 | put_task_struct(child); | 692 | put_task_struct(child); |
