aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c342
1 files changed, 133 insertions, 209 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index eb9934a82fc1..3b893e78ce61 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -293,26 +293,27 @@ static void reparent_to_kthreadd(void)
293 switch_uid(INIT_USER); 293 switch_uid(INIT_USER);
294} 294}
295 295
296void __set_special_pids(pid_t session, pid_t pgrp) 296void __set_special_pids(struct pid *pid)
297{ 297{
298 struct task_struct *curr = current->group_leader; 298 struct task_struct *curr = current->group_leader;
299 pid_t nr = pid_nr(pid);
299 300
300 if (task_session_nr(curr) != session) { 301 if (task_session(curr) != pid) {
301 detach_pid(curr, PIDTYPE_SID); 302 detach_pid(curr, PIDTYPE_SID);
302 set_task_session(curr, session); 303 attach_pid(curr, PIDTYPE_SID, pid);
303 attach_pid(curr, PIDTYPE_SID, find_pid(session)); 304 set_task_session(curr, nr);
304 } 305 }
305 if (task_pgrp_nr(curr) != pgrp) { 306 if (task_pgrp(curr) != pid) {
306 detach_pid(curr, PIDTYPE_PGID); 307 detach_pid(curr, PIDTYPE_PGID);
307 set_task_pgrp(curr, pgrp); 308 attach_pid(curr, PIDTYPE_PGID, pid);
308 attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp)); 309 set_task_pgrp(curr, nr);
309 } 310 }
310} 311}
311 312
312static void set_special_pids(pid_t session, pid_t pgrp) 313static void set_special_pids(struct pid *pid)
313{ 314{
314 write_lock_irq(&tasklist_lock); 315 write_lock_irq(&tasklist_lock);
315 __set_special_pids(session, pgrp); 316 __set_special_pids(pid);
316 write_unlock_irq(&tasklist_lock); 317 write_unlock_irq(&tasklist_lock);
317} 318}
318 319
@@ -383,7 +384,11 @@ void daemonize(const char *name, ...)
383 */ 384 */
384 current->flags |= PF_NOFREEZE; 385 current->flags |= PF_NOFREEZE;
385 386
386 set_special_pids(1, 1); 387 if (current->nsproxy != &init_nsproxy) {
388 get_nsproxy(&init_nsproxy);
389 switch_task_namespaces(current, &init_nsproxy);
390 }
391 set_special_pids(&init_struct_pid);
387 proc_clear_tty(current); 392 proc_clear_tty(current);
388 393
389 /* Block and flush all signals */ 394 /* Block and flush all signals */
@@ -398,11 +403,6 @@ void daemonize(const char *name, ...)
398 current->fs = fs; 403 current->fs = fs;
399 atomic_inc(&fs->count); 404 atomic_inc(&fs->count);
400 405
401 if (current->nsproxy != init_task.nsproxy) {
402 get_nsproxy(init_task.nsproxy);
403 switch_task_namespaces(current, init_task.nsproxy);
404 }
405
406 exit_files(current); 406 exit_files(current);
407 current->files = init_task.files; 407 current->files = init_task.files;
408 atomic_inc(&current->files->count); 408 atomic_inc(&current->files->count);
@@ -458,7 +458,7 @@ struct files_struct *get_files_struct(struct task_struct *task)
458 return files; 458 return files;
459} 459}
460 460
461void fastcall put_files_struct(struct files_struct *files) 461void put_files_struct(struct files_struct *files)
462{ 462{
463 struct fdtable *fdt; 463 struct fdtable *fdt;
464 464
@@ -745,24 +745,6 @@ static void exit_notify(struct task_struct *tsk)
745 struct task_struct *t; 745 struct task_struct *t;
746 struct pid *pgrp; 746 struct pid *pgrp;
747 747
748 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
749 && !thread_group_empty(tsk)) {
750 /*
751 * This occurs when there was a race between our exit
752 * syscall and a group signal choosing us as the one to
753 * wake up. It could be that we are the only thread
754 * alerted to check for pending signals, but another thread
755 * should be woken now to take the signal since we will not.
756 * Now we'll wake all the threads in the group just to make
757 * sure someone gets all the pending signals.
758 */
759 spin_lock_irq(&tsk->sighand->siglock);
760 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
761 if (!signal_pending(t) && !(t->flags & PF_EXITING))
762 recalc_sigpending_and_wake(t);
763 spin_unlock_irq(&tsk->sighand->siglock);
764 }
765
766 /* 748 /*
767 * This does two things: 749 * This does two things:
768 * 750 *
@@ -905,7 +887,7 @@ static inline void exit_child_reaper(struct task_struct *tsk)
905 zap_pid_ns_processes(tsk->nsproxy->pid_ns); 887 zap_pid_ns_processes(tsk->nsproxy->pid_ns);
906} 888}
907 889
908fastcall NORET_TYPE void do_exit(long code) 890NORET_TYPE void do_exit(long code)
909{ 891{
910 struct task_struct *tsk = current; 892 struct task_struct *tsk = current;
911 int group_dead; 893 int group_dead;
@@ -947,7 +929,7 @@ fastcall NORET_TYPE void do_exit(long code)
947 schedule(); 929 schedule();
948 } 930 }
949 931
950 tsk->flags |= PF_EXITING; 932 exit_signals(tsk); /* sets PF_EXITING */
951 /* 933 /*
952 * tsk->flags are checked in the futex code to protect against 934 * tsk->flags are checked in the futex code to protect against
953 * an exiting task cleaning up the robust pi futexes. 935 * an exiting task cleaning up the robust pi futexes.
@@ -1108,20 +1090,23 @@ asmlinkage void sys_exit_group(int error_code)
1108 do_group_exit((error_code & 0xff) << 8); 1090 do_group_exit((error_code & 0xff) << 8);
1109} 1091}
1110 1092
1111static int eligible_child(pid_t pid, int options, struct task_struct *p) 1093static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
1094{
1095 struct pid *pid = NULL;
1096 if (type == PIDTYPE_PID)
1097 pid = task->pids[type].pid;
1098 else if (type < PIDTYPE_MAX)
1099 pid = task->group_leader->pids[type].pid;
1100 return pid;
1101}
1102
1103static int eligible_child(enum pid_type type, struct pid *pid, int options,
1104 struct task_struct *p)
1112{ 1105{
1113 int err; 1106 int err;
1114 struct pid_namespace *ns;
1115 1107
1116 ns = current->nsproxy->pid_ns; 1108 if (type < PIDTYPE_MAX) {
1117 if (pid > 0) { 1109 if (task_pid_type(p, type) != pid)
1118 if (task_pid_nr_ns(p, ns) != pid)
1119 return 0;
1120 } else if (!pid) {
1121 if (task_pgrp_nr_ns(p, ns) != task_pgrp_vnr(current))
1122 return 0;
1123 } else if (pid != -1) {
1124 if (task_pgrp_nr_ns(p, ns) != -pid)
1125 return 0; 1110 return 0;
1126 } 1111 }
1127 1112
@@ -1140,18 +1125,16 @@ static int eligible_child(pid_t pid, int options, struct task_struct *p)
1140 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) 1125 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1141 && !(options & __WALL)) 1126 && !(options & __WALL))
1142 return 0; 1127 return 0;
1143 /*
1144 * Do not consider thread group leaders that are
1145 * in a non-empty thread group:
1146 */
1147 if (delay_group_leader(p))
1148 return 2;
1149 1128
1150 err = security_task_wait(p); 1129 err = security_task_wait(p);
1151 if (err) 1130 if (likely(!err))
1152 return err; 1131 return 1;
1153 1132
1154 return 1; 1133 if (type != PIDTYPE_PID)
1134 return 0;
1135 /* This child was explicitly requested, abort */
1136 read_unlock(&tasklist_lock);
1137 return err;
1155} 1138}
1156 1139
1157static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, 1140static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
@@ -1191,20 +1174,13 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1191{ 1174{
1192 unsigned long state; 1175 unsigned long state;
1193 int retval, status, traced; 1176 int retval, status, traced;
1194 struct pid_namespace *ns; 1177 pid_t pid = task_pid_vnr(p);
1195
1196 ns = current->nsproxy->pid_ns;
1197 1178
1198 if (unlikely(noreap)) { 1179 if (unlikely(noreap)) {
1199 pid_t pid = task_pid_nr_ns(p, ns);
1200 uid_t uid = p->uid; 1180 uid_t uid = p->uid;
1201 int exit_code = p->exit_code; 1181 int exit_code = p->exit_code;
1202 int why, status; 1182 int why, status;
1203 1183
1204 if (unlikely(p->exit_state != EXIT_ZOMBIE))
1205 return 0;
1206 if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1207 return 0;
1208 get_task_struct(p); 1184 get_task_struct(p);
1209 read_unlock(&tasklist_lock); 1185 read_unlock(&tasklist_lock);
1210 if ((exit_code & 0x7f) == 0) { 1186 if ((exit_code & 0x7f) == 0) {
@@ -1315,11 +1291,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1315 retval = put_user(status, &infop->si_status); 1291 retval = put_user(status, &infop->si_status);
1316 } 1292 }
1317 if (!retval && infop) 1293 if (!retval && infop)
1318 retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid); 1294 retval = put_user(pid, &infop->si_pid);
1319 if (!retval && infop) 1295 if (!retval && infop)
1320 retval = put_user(p->uid, &infop->si_uid); 1296 retval = put_user(p->uid, &infop->si_uid);
1321 if (!retval) 1297 if (!retval)
1322 retval = task_pid_nr_ns(p, ns); 1298 retval = pid;
1323 1299
1324 if (traced) { 1300 if (traced) {
1325 write_lock_irq(&tasklist_lock); 1301 write_lock_irq(&tasklist_lock);
@@ -1351,21 +1327,38 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1351 * the lock and this task is uninteresting. If we return nonzero, we have 1327 * the lock and this task is uninteresting. If we return nonzero, we have
1352 * released the lock and the system call should return. 1328 * released the lock and the system call should return.
1353 */ 1329 */
1354static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, 1330static int wait_task_stopped(struct task_struct *p,
1355 int noreap, struct siginfo __user *infop, 1331 int noreap, struct siginfo __user *infop,
1356 int __user *stat_addr, struct rusage __user *ru) 1332 int __user *stat_addr, struct rusage __user *ru)
1357{ 1333{
1358 int retval, exit_code; 1334 int retval, exit_code, why;
1335 uid_t uid = 0; /* unneeded, required by compiler */
1359 pid_t pid; 1336 pid_t pid;
1360 1337
1361 if (!p->exit_code) 1338 exit_code = 0;
1362 return 0; 1339 spin_lock_irq(&p->sighand->siglock);
1363 if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && 1340
1364 p->signal->group_stop_count > 0) 1341 if (unlikely(!task_is_stopped_or_traced(p)))
1342 goto unlock_sig;
1343
1344 if (!(p->ptrace & PT_PTRACED) && p->signal->group_stop_count > 0)
1365 /* 1345 /*
1366 * A group stop is in progress and this is the group leader. 1346 * A group stop is in progress and this is the group leader.
1367 * We won't report until all threads have stopped. 1347 * We won't report until all threads have stopped.
1368 */ 1348 */
1349 goto unlock_sig;
1350
1351 exit_code = p->exit_code;
1352 if (!exit_code)
1353 goto unlock_sig;
1354
1355 if (!noreap)
1356 p->exit_code = 0;
1357
1358 uid = p->uid;
1359unlock_sig:
1360 spin_unlock_irq(&p->sighand->siglock);
1361 if (!exit_code)
1369 return 0; 1362 return 0;
1370 1363
1371 /* 1364 /*
@@ -1375,65 +1368,15 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1375 * keep holding onto the tasklist_lock while we call getrusage and 1368 * keep holding onto the tasklist_lock while we call getrusage and
1376 * possibly take page faults for user memory. 1369 * possibly take page faults for user memory.
1377 */ 1370 */
1378 pid = task_pid_nr_ns(p, current->nsproxy->pid_ns);
1379 get_task_struct(p); 1371 get_task_struct(p);
1372 pid = task_pid_vnr(p);
1373 why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1380 read_unlock(&tasklist_lock); 1374 read_unlock(&tasklist_lock);
1381 1375
1382 if (unlikely(noreap)) { 1376 if (unlikely(noreap))
1383 uid_t uid = p->uid;
1384 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1385
1386 exit_code = p->exit_code;
1387 if (unlikely(!exit_code) || unlikely(p->exit_state))
1388 goto bail_ref;
1389 return wait_noreap_copyout(p, pid, uid, 1377 return wait_noreap_copyout(p, pid, uid,
1390 why, exit_code, 1378 why, exit_code,
1391 infop, ru); 1379 infop, ru);
1392 }
1393
1394 write_lock_irq(&tasklist_lock);
1395
1396 /*
1397 * This uses xchg to be atomic with the thread resuming and setting
1398 * it. It must also be done with the write lock held to prevent a
1399 * race with the EXIT_ZOMBIE case.
1400 */
1401 exit_code = xchg(&p->exit_code, 0);
1402 if (unlikely(p->exit_state)) {
1403 /*
1404 * The task resumed and then died. Let the next iteration
1405 * catch it in EXIT_ZOMBIE. Note that exit_code might
1406 * already be zero here if it resumed and did _exit(0).
1407 * The task itself is dead and won't touch exit_code again;
1408 * other processors in this function are locked out.
1409 */
1410 p->exit_code = exit_code;
1411 exit_code = 0;
1412 }
1413 if (unlikely(exit_code == 0)) {
1414 /*
1415 * Another thread in this function got to it first, or it
1416 * resumed, or it resumed and then died.
1417 */
1418 write_unlock_irq(&tasklist_lock);
1419bail_ref:
1420 put_task_struct(p);
1421 /*
1422 * We are returning to the wait loop without having successfully
1423 * removed the process and having released the lock. We cannot
1424 * continue, since the "p" task pointer is potentially stale.
1425 *
1426 * Return -EAGAIN, and do_wait() will restart the loop from the
1427 * beginning. Do _not_ re-acquire the lock.
1428 */
1429 return -EAGAIN;
1430 }
1431
1432 /* move to end of parent's list to avoid starvation */
1433 remove_parent(p);
1434 add_parent(p);
1435
1436 write_unlock_irq(&tasklist_lock);
1437 1380
1438 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1381 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1439 if (!retval && stat_addr) 1382 if (!retval && stat_addr)
@@ -1443,15 +1386,13 @@ bail_ref:
1443 if (!retval && infop) 1386 if (!retval && infop)
1444 retval = put_user(0, &infop->si_errno); 1387 retval = put_user(0, &infop->si_errno);
1445 if (!retval && infop) 1388 if (!retval && infop)
1446 retval = put_user((short)((p->ptrace & PT_PTRACED) 1389 retval = put_user(why, &infop->si_code);
1447 ? CLD_TRAPPED : CLD_STOPPED),
1448 &infop->si_code);
1449 if (!retval && infop) 1390 if (!retval && infop)
1450 retval = put_user(exit_code, &infop->si_status); 1391 retval = put_user(exit_code, &infop->si_status);
1451 if (!retval && infop) 1392 if (!retval && infop)
1452 retval = put_user(pid, &infop->si_pid); 1393 retval = put_user(pid, &infop->si_pid);
1453 if (!retval && infop) 1394 if (!retval && infop)
1454 retval = put_user(p->uid, &infop->si_uid); 1395 retval = put_user(uid, &infop->si_uid);
1455 if (!retval) 1396 if (!retval)
1456 retval = pid; 1397 retval = pid;
1457 put_task_struct(p); 1398 put_task_struct(p);
@@ -1473,7 +1414,6 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1473 int retval; 1414 int retval;
1474 pid_t pid; 1415 pid_t pid;
1475 uid_t uid; 1416 uid_t uid;
1476 struct pid_namespace *ns;
1477 1417
1478 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1418 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1479 return 0; 1419 return 0;
@@ -1488,8 +1428,7 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1488 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1428 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1489 spin_unlock_irq(&p->sighand->siglock); 1429 spin_unlock_irq(&p->sighand->siglock);
1490 1430
1491 ns = current->nsproxy->pid_ns; 1431 pid = task_pid_vnr(p);
1492 pid = task_pid_nr_ns(p, ns);
1493 uid = p->uid; 1432 uid = p->uid;
1494 get_task_struct(p); 1433 get_task_struct(p);
1495 read_unlock(&tasklist_lock); 1434 read_unlock(&tasklist_lock);
@@ -1500,7 +1439,7 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1500 if (!retval && stat_addr) 1439 if (!retval && stat_addr)
1501 retval = put_user(0xffff, stat_addr); 1440 retval = put_user(0xffff, stat_addr);
1502 if (!retval) 1441 if (!retval)
1503 retval = task_pid_nr_ns(p, ns); 1442 retval = pid;
1504 } else { 1443 } else {
1505 retval = wait_noreap_copyout(p, pid, uid, 1444 retval = wait_noreap_copyout(p, pid, uid,
1506 CLD_CONTINUED, SIGCONT, 1445 CLD_CONTINUED, SIGCONT,
@@ -1511,101 +1450,63 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1511 return retval; 1450 return retval;
1512} 1451}
1513 1452
1514 1453static long do_wait(enum pid_type type, struct pid *pid, int options,
1515static inline int my_ptrace_child(struct task_struct *p) 1454 struct siginfo __user *infop, int __user *stat_addr,
1516{ 1455 struct rusage __user *ru)
1517 if (!(p->ptrace & PT_PTRACED))
1518 return 0;
1519 if (!(p->ptrace & PT_ATTACHED))
1520 return 1;
1521 /*
1522 * This child was PTRACE_ATTACH'd. We should be seeing it only if
1523 * we are the attacher. If we are the real parent, this is a race
1524 * inside ptrace_attach. It is waiting for the tasklist_lock,
1525 * which we have to switch the parent links, but has already set
1526 * the flags in p->ptrace.
1527 */
1528 return (p->parent != p->real_parent);
1529}
1530
1531static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1532 int __user *stat_addr, struct rusage __user *ru)
1533{ 1456{
1534 DECLARE_WAITQUEUE(wait, current); 1457 DECLARE_WAITQUEUE(wait, current);
1535 struct task_struct *tsk; 1458 struct task_struct *tsk;
1536 int flag, retval; 1459 int flag, retval;
1537 int allowed, denied;
1538 1460
1539 add_wait_queue(&current->signal->wait_chldexit,&wait); 1461 add_wait_queue(&current->signal->wait_chldexit,&wait);
1540repeat: 1462repeat:
1463 /* If there is nothing that can match our critier just get out */
1464 retval = -ECHILD;
1465 if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
1466 goto end;
1467
1541 /* 1468 /*
1542 * We will set this flag if we see any child that might later 1469 * We will set this flag if we see any child that might later
1543 * match our criteria, even if we are not able to reap it yet. 1470 * match our criteria, even if we are not able to reap it yet.
1544 */ 1471 */
1545 flag = 0; 1472 flag = retval = 0;
1546 allowed = denied = 0;
1547 current->state = TASK_INTERRUPTIBLE; 1473 current->state = TASK_INTERRUPTIBLE;
1548 read_lock(&tasklist_lock); 1474 read_lock(&tasklist_lock);
1549 tsk = current; 1475 tsk = current;
1550 do { 1476 do {
1551 struct task_struct *p; 1477 struct task_struct *p;
1552 int ret;
1553 1478
1554 list_for_each_entry(p, &tsk->children, sibling) { 1479 list_for_each_entry(p, &tsk->children, sibling) {
1555 ret = eligible_child(pid, options, p); 1480 int ret = eligible_child(type, pid, options, p);
1556 if (!ret) 1481 if (!ret)
1557 continue; 1482 continue;
1558 1483
1559 if (unlikely(ret < 0)) { 1484 if (unlikely(ret < 0)) {
1560 denied = ret; 1485 retval = ret;
1561 continue; 1486 } else if (task_is_stopped_or_traced(p)) {
1562 }
1563 allowed = 1;
1564
1565 if (task_is_stopped_or_traced(p)) {
1566 /* 1487 /*
1567 * It's stopped now, so it might later 1488 * It's stopped now, so it might later
1568 * continue, exit, or stop again. 1489 * continue, exit, or stop again.
1569 *
1570 * When we hit the race with PTRACE_ATTACH, we
1571 * will not report this child. But the race
1572 * means it has not yet been moved to our
1573 * ptrace_children list, so we need to set the
1574 * flag here to avoid a spurious ECHILD when
1575 * the race happens with the only child.
1576 */ 1490 */
1577 flag = 1; 1491 flag = 1;
1492 if (!(p->ptrace & PT_PTRACED) &&
1493 !(options & WUNTRACED))
1494 continue;
1578 1495
1579 if (!my_ptrace_child(p)) { 1496 retval = wait_task_stopped(p,
1580 if (task_is_traced(p))
1581 continue;
1582 if (!(options & WUNTRACED))
1583 continue;
1584 }
1585
1586 retval = wait_task_stopped(p, ret == 2,
1587 (options & WNOWAIT), infop, 1497 (options & WNOWAIT), infop,
1588 stat_addr, ru); 1498 stat_addr, ru);
1589 if (retval == -EAGAIN) 1499 } else if (p->exit_state == EXIT_ZOMBIE &&
1590 goto repeat; 1500 !delay_group_leader(p)) {
1591 if (retval != 0) /* He released the lock. */
1592 goto end;
1593 } else if (p->exit_state == EXIT_ZOMBIE) {
1594 /* 1501 /*
1595 * Eligible but we cannot release it yet: 1502 * We don't reap group leaders with subthreads.
1596 */ 1503 */
1597 if (ret == 2)
1598 goto check_continued;
1599 if (!likely(options & WEXITED)) 1504 if (!likely(options & WEXITED))
1600 continue; 1505 continue;
1601 retval = wait_task_zombie(p, 1506 retval = wait_task_zombie(p,
1602 (options & WNOWAIT), infop, 1507 (options & WNOWAIT), infop,
1603 stat_addr, ru); 1508 stat_addr, ru);
1604 /* He released the lock. */
1605 if (retval != 0)
1606 goto end;
1607 } else if (p->exit_state != EXIT_DEAD) { 1509 } else if (p->exit_state != EXIT_DEAD) {
1608check_continued:
1609 /* 1510 /*
1610 * It's running now, so it might later 1511 * It's running now, so it might later
1611 * exit, stop, or stop and then continue. 1512 * exit, stop, or stop and then continue.
@@ -1616,17 +1517,20 @@ check_continued:
1616 retval = wait_task_continued(p, 1517 retval = wait_task_continued(p,
1617 (options & WNOWAIT), infop, 1518 (options & WNOWAIT), infop,
1618 stat_addr, ru); 1519 stat_addr, ru);
1619 if (retval != 0) /* He released the lock. */
1620 goto end;
1621 } 1520 }
1521 if (retval != 0) /* tasklist_lock released */
1522 goto end;
1622 } 1523 }
1623 if (!flag) { 1524 if (!flag) {
1624 list_for_each_entry(p, &tsk->ptrace_children, 1525 list_for_each_entry(p, &tsk->ptrace_children,
1625 ptrace_list) { 1526 ptrace_list) {
1626 if (!eligible_child(pid, options, p)) 1527 flag = eligible_child(type, pid, options, p);
1528 if (!flag)
1627 continue; 1529 continue;
1628 flag = 1; 1530 if (likely(flag > 0))
1629 break; 1531 break;
1532 retval = flag;
1533 goto end;
1630 } 1534 }
1631 } 1535 }
1632 if (options & __WNOTHREAD) 1536 if (options & __WNOTHREAD)
@@ -1634,10 +1538,9 @@ check_continued:
1634 tsk = next_thread(tsk); 1538 tsk = next_thread(tsk);
1635 BUG_ON(tsk->signal != current->signal); 1539 BUG_ON(tsk->signal != current->signal);
1636 } while (tsk != current); 1540 } while (tsk != current);
1637
1638 read_unlock(&tasklist_lock); 1541 read_unlock(&tasklist_lock);
1542
1639 if (flag) { 1543 if (flag) {
1640 retval = 0;
1641 if (options & WNOHANG) 1544 if (options & WNOHANG)
1642 goto end; 1545 goto end;
1643 retval = -ERESTARTSYS; 1546 retval = -ERESTARTSYS;
@@ -1647,14 +1550,12 @@ check_continued:
1647 goto repeat; 1550 goto repeat;
1648 } 1551 }
1649 retval = -ECHILD; 1552 retval = -ECHILD;
1650 if (unlikely(denied) && !allowed)
1651 retval = denied;
1652end: 1553end:
1653 current->state = TASK_RUNNING; 1554 current->state = TASK_RUNNING;
1654 remove_wait_queue(&current->signal->wait_chldexit,&wait); 1555 remove_wait_queue(&current->signal->wait_chldexit,&wait);
1655 if (infop) { 1556 if (infop) {
1656 if (retval > 0) 1557 if (retval > 0)
1657 retval = 0; 1558 retval = 0;
1658 else { 1559 else {
1659 /* 1560 /*
1660 * For a WNOHANG return, clear out all the fields 1561 * For a WNOHANG return, clear out all the fields
@@ -1678,10 +1579,12 @@ end:
1678 return retval; 1579 return retval;
1679} 1580}
1680 1581
1681asmlinkage long sys_waitid(int which, pid_t pid, 1582asmlinkage long sys_waitid(int which, pid_t upid,
1682 struct siginfo __user *infop, int options, 1583 struct siginfo __user *infop, int options,
1683 struct rusage __user *ru) 1584 struct rusage __user *ru)
1684{ 1585{
1586 struct pid *pid = NULL;
1587 enum pid_type type;
1685 long ret; 1588 long ret;
1686 1589
1687 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) 1590 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
@@ -1691,37 +1594,58 @@ asmlinkage long sys_waitid(int which, pid_t pid,
1691 1594
1692 switch (which) { 1595 switch (which) {
1693 case P_ALL: 1596 case P_ALL:
1694 pid = -1; 1597 type = PIDTYPE_MAX;
1695 break; 1598 break;
1696 case P_PID: 1599 case P_PID:
1697 if (pid <= 0) 1600 type = PIDTYPE_PID;
1601 if (upid <= 0)
1698 return -EINVAL; 1602 return -EINVAL;
1699 break; 1603 break;
1700 case P_PGID: 1604 case P_PGID:
1701 if (pid <= 0) 1605 type = PIDTYPE_PGID;
1606 if (upid <= 0)
1702 return -EINVAL; 1607 return -EINVAL;
1703 pid = -pid;
1704 break; 1608 break;
1705 default: 1609 default:
1706 return -EINVAL; 1610 return -EINVAL;
1707 } 1611 }
1708 1612
1709 ret = do_wait(pid, options, infop, NULL, ru); 1613 if (type < PIDTYPE_MAX)
1614 pid = find_get_pid(upid);
1615 ret = do_wait(type, pid, options, infop, NULL, ru);
1616 put_pid(pid);
1710 1617
1711 /* avoid REGPARM breakage on x86: */ 1618 /* avoid REGPARM breakage on x86: */
1712 prevent_tail_call(ret); 1619 prevent_tail_call(ret);
1713 return ret; 1620 return ret;
1714} 1621}
1715 1622
1716asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, 1623asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
1717 int options, struct rusage __user *ru) 1624 int options, struct rusage __user *ru)
1718{ 1625{
1626 struct pid *pid = NULL;
1627 enum pid_type type;
1719 long ret; 1628 long ret;
1720 1629
1721 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1630 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1722 __WNOTHREAD|__WCLONE|__WALL)) 1631 __WNOTHREAD|__WCLONE|__WALL))
1723 return -EINVAL; 1632 return -EINVAL;
1724 ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); 1633
1634 if (upid == -1)
1635 type = PIDTYPE_MAX;
1636 else if (upid < 0) {
1637 type = PIDTYPE_PGID;
1638 pid = find_get_pid(-upid);
1639 } else if (upid == 0) {
1640 type = PIDTYPE_PGID;
1641 pid = get_pid(task_pgrp(current));
1642 } else /* upid > 0 */ {
1643 type = PIDTYPE_PID;
1644 pid = find_get_pid(upid);
1645 }
1646
1647 ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
1648 put_pid(pid);
1725 1649
1726 /* avoid REGPARM breakage on x86: */ 1650 /* avoid REGPARM breakage on x86: */
1727 prevent_tail_call(ret); 1651 prevent_tail_call(ret);