aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-15 07:46:29 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-15 07:46:29 -0400
commitb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch)
tree53ccb1c2c14751fe69cf93102e76e97021f6df07 /kernel/signal.c
parent4f962d4d65923d7b722192e729840cfb79af0a5a (diff)
parent278429cff8809958d25415ba0ed32b59866ab1a8 (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c183
1 files changed, 93 insertions, 90 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 6c0958e52ea7..e661b01d340f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
25#include <linux/capability.h> 26#include <linux/capability.h>
26#include <linux/freezer.h> 27#include <linux/freezer.h>
27#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
@@ -39,24 +40,21 @@
39 40
40static struct kmem_cache *sigqueue_cachep; 41static struct kmem_cache *sigqueue_cachep;
41 42
42static int __sig_ignored(struct task_struct *t, int sig) 43static void __user *sig_handler(struct task_struct *t, int sig)
43{ 44{
44 void __user *handler; 45 return t->sighand->action[sig - 1].sa.sa_handler;
46}
45 47
48static int sig_handler_ignored(void __user *handler, int sig)
49{
46 /* Is it explicitly or implicitly ignored? */ 50 /* Is it explicitly or implicitly ignored? */
47
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN || 51 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig)); 52 (handler == SIG_DFL && sig_kernel_ignore(sig));
51} 53}
52 54
53static int sig_ignored(struct task_struct *t, int sig) 55static int sig_ignored(struct task_struct *t, int sig)
54{ 56{
55 /* 57 void __user *handler;
56 * Tracers always want to know about signals..
57 */
58 if (t->ptrace & PT_PTRACED)
59 return 0;
60 58
61 /* 59 /*
62 * Blocked signals are never ignored, since the 60 * Blocked signals are never ignored, since the
@@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig)
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 64 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
67 return 0; 65 return 0;
68 66
69 return __sig_ignored(t, sig); 67 handler = sig_handler(t, sig);
68 if (!sig_handler_ignored(handler, sig))
69 return 0;
70
71 /*
72 * Tracers may want to know about even ignored signals.
73 */
74 return !tracehook_consider_ignored_signal(t, sig, handler);
70} 75}
71 76
72/* 77/*
@@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t)
129 134
130void recalc_sigpending(void) 135void recalc_sigpending(void)
131{ 136{
132 if (!recalc_sigpending_tsk(current) && !freezing(current)) 137 if (unlikely(tracehook_force_sigpending()))
138 set_thread_flag(TIF_SIGPENDING);
139 else if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING); 140 clear_thread_flag(TIF_SIGPENDING);
134 141
135} 142}
@@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default)
295 302
296int unhandled_signal(struct task_struct *tsk, int sig) 303int unhandled_signal(struct task_struct *tsk, int sig)
297{ 304{
305 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
298 if (is_global_init(tsk)) 306 if (is_global_init(tsk))
299 return 1; 307 return 1;
300 if (tsk->ptrace & PT_PTRACED) 308 if (handler != SIG_IGN && handler != SIG_DFL)
301 return 0; 309 return 0;
302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || 310 return !tracehook_consider_fatal_signal(tsk, sig, handler);
303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
304} 311}
305 312
306 313
@@ -338,13 +345,9 @@ unblock_all_signals(void)
338 spin_unlock_irqrestore(&current->sighand->siglock, flags); 345 spin_unlock_irqrestore(&current->sighand->siglock, flags);
339} 346}
340 347
341static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 348static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
342{ 349{
343 struct sigqueue *q, *first = NULL; 350 struct sigqueue *q, *first = NULL;
344 int still_pending = 0;
345
346 if (unlikely(!sigismember(&list->signal, sig)))
347 return 0;
348 351
349 /* 352 /*
350 * Collect the siginfo appropriate to this signal. Check if 353 * Collect the siginfo appropriate to this signal. Check if
@@ -352,33 +355,30 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
352 */ 355 */
353 list_for_each_entry(q, &list->list, list) { 356 list_for_each_entry(q, &list->list, list) {
354 if (q->info.si_signo == sig) { 357 if (q->info.si_signo == sig) {
355 if (first) { 358 if (first)
356 still_pending = 1; 359 goto still_pending;
357 break;
358 }
359 first = q; 360 first = q;
360 } 361 }
361 } 362 }
363
364 sigdelset(&list->signal, sig);
365
362 if (first) { 366 if (first) {
367still_pending:
363 list_del_init(&first->list); 368 list_del_init(&first->list);
364 copy_siginfo(info, &first->info); 369 copy_siginfo(info, &first->info);
365 __sigqueue_free(first); 370 __sigqueue_free(first);
366 if (!still_pending)
367 sigdelset(&list->signal, sig);
368 } else { 371 } else {
369
370 /* Ok, it wasn't in the queue. This must be 372 /* Ok, it wasn't in the queue. This must be
371 a fast-pathed signal or we must have been 373 a fast-pathed signal or we must have been
372 out of queue space. So zero out the info. 374 out of queue space. So zero out the info.
373 */ 375 */
374 sigdelset(&list->signal, sig);
375 info->si_signo = sig; 376 info->si_signo = sig;
376 info->si_errno = 0; 377 info->si_errno = 0;
377 info->si_code = 0; 378 info->si_code = 0;
378 info->si_pid = 0; 379 info->si_pid = 0;
379 info->si_uid = 0; 380 info->si_uid = 0;
380 } 381 }
381 return 1;
382} 382}
383 383
384static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 384static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
@@ -396,8 +396,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
396 } 396 }
397 } 397 }
398 398
399 if (!collect_signal(sig, pending, info)) 399 collect_signal(sig, pending, info);
400 sig = 0;
401 } 400 }
402 401
403 return sig; 402 return sig;
@@ -462,8 +461,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
462 * is to alert stop-signal processing code when another 461 * is to alert stop-signal processing code when another
463 * processor has come along and cleared the flag. 462 * processor has come along and cleared the flag.
464 */ 463 */
465 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 464 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
467 } 465 }
468 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 466 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
469 /* 467 /*
@@ -600,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info,
600 return security_task_kill(t, info, sig, 0); 598 return security_task_kill(t, info, sig, 0);
601} 599}
602 600
603/* forward decl */
604static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
605
606/* 601/*
607 * Handle magic process-wide effects of stop/continue signals. Unlike 602 * Handle magic process-wide effects of stop/continue signals. Unlike
608 * the signal actions, these happen immediately at signal-generation 603 * the signal actions, these happen immediately at signal-generation
@@ -765,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group)
765 if (sig_fatal(p, sig) && 760 if (sig_fatal(p, sig) &&
766 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 761 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
767 !sigismember(&t->real_blocked, sig) && 762 !sigismember(&t->real_blocked, sig) &&
768 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 763 (sig == SIGKILL ||
764 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
769 /* 765 /*
770 * This signal will be fatal to the whole group. 766 * This signal will be fatal to the whole group.
771 */ 767 */
@@ -1125,7 +1121,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1125 * is probably wrong. Should make it like BSD or SYSV. 1121 * is probably wrong. Should make it like BSD or SYSV.
1126 */ 1122 */
1127 1123
1128static int kill_something_info(int sig, struct siginfo *info, int pid) 1124static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1129{ 1125{
1130 int ret; 1126 int ret;
1131 1127
@@ -1237,17 +1233,6 @@ int kill_pid(struct pid *pid, int sig, int priv)
1237} 1233}
1238EXPORT_SYMBOL(kill_pid); 1234EXPORT_SYMBOL(kill_pid);
1239 1235
1240int
1241kill_proc(pid_t pid, int sig, int priv)
1242{
1243 int ret;
1244
1245 rcu_read_lock();
1246 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1247 rcu_read_unlock();
1248 return ret;
1249}
1250
1251/* 1236/*
1252 * These functions support sending signals using preallocated sigqueue 1237 * These functions support sending signals using preallocated sigqueue
1253 * structures. This is needed "because realtime applications cannot 1238 * structures. This is needed "because realtime applications cannot
@@ -1319,6 +1304,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1319 q->info.si_overrun++; 1304 q->info.si_overrun++;
1320 goto out; 1305 goto out;
1321 } 1306 }
1307 q->info.si_overrun = 0;
1322 1308
1323 signalfd_notify(t, sig); 1309 signalfd_notify(t, sig);
1324 pending = group ? &t->signal->shared_pending : &t->pending; 1310 pending = group ? &t->signal->shared_pending : &t->pending;
@@ -1343,13 +1329,16 @@ static inline void __wake_up_parent(struct task_struct *p,
1343/* 1329/*
1344 * Let a parent know about the death of a child. 1330 * Let a parent know about the death of a child.
1345 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1331 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1332 *
1333 * Returns -1 if our parent ignored us and so we've switched to
1334 * self-reaping, or else @sig.
1346 */ 1335 */
1347 1336int do_notify_parent(struct task_struct *tsk, int sig)
1348void do_notify_parent(struct task_struct *tsk, int sig)
1349{ 1337{
1350 struct siginfo info; 1338 struct siginfo info;
1351 unsigned long flags; 1339 unsigned long flags;
1352 struct sighand_struct *psig; 1340 struct sighand_struct *psig;
1341 int ret = sig;
1353 1342
1354 BUG_ON(sig == -1); 1343 BUG_ON(sig == -1);
1355 1344
@@ -1379,10 +1368,9 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1379 1368
1380 info.si_uid = tsk->uid; 1369 info.si_uid = tsk->uid;
1381 1370
1382 /* FIXME: find out whether or not this is supposed to be c*time. */ 1371 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1383 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1384 tsk->signal->utime)); 1372 tsk->signal->utime));
1385 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, 1373 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1386 tsk->signal->stime)); 1374 tsk->signal->stime));
1387 1375
1388 info.si_status = tsk->exit_code & 0x7f; 1376 info.si_status = tsk->exit_code & 0x7f;
@@ -1415,14 +1403,16 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1415 * is implementation-defined: we do (if you don't want 1403 * is implementation-defined: we do (if you don't want
1416 * it, just use SIG_IGN instead). 1404 * it, just use SIG_IGN instead).
1417 */ 1405 */
1418 tsk->exit_signal = -1; 1406 ret = tsk->exit_signal = -1;
1419 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1407 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1420 sig = 0; 1408 sig = -1;
1421 } 1409 }
1422 if (valid_signal(sig) && sig > 0) 1410 if (valid_signal(sig) && sig > 0)
1423 __group_send_sig_info(sig, &info, tsk->parent); 1411 __group_send_sig_info(sig, &info, tsk->parent);
1424 __wake_up_parent(tsk, tsk->parent); 1412 __wake_up_parent(tsk, tsk->parent);
1425 spin_unlock_irqrestore(&psig->siglock, flags); 1413 spin_unlock_irqrestore(&psig->siglock, flags);
1414
1415 return ret;
1426} 1416}
1427 1417
1428static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1418static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
@@ -1450,9 +1440,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1450 1440
1451 info.si_uid = tsk->uid; 1441 info.si_uid = tsk->uid;
1452 1442
1453 /* FIXME: find out whether or not this is supposed to be c*time. */ 1443 info.si_utime = cputime_to_clock_t(tsk->utime);
1454 info.si_utime = cputime_to_jiffies(tsk->utime); 1444 info.si_stime = cputime_to_clock_t(tsk->stime);
1455 info.si_stime = cputime_to_jiffies(tsk->stime);
1456 1445
1457 info.si_code = why; 1446 info.si_code = why;
1458 switch (why) { 1447 switch (why) {
@@ -1491,10 +1480,10 @@ static inline int may_ptrace_stop(void)
1491 * is a deadlock situation, and pointless because our tracer 1480 * is a deadlock situation, and pointless because our tracer
1492 * is dead so don't allow us to stop. 1481 * is dead so don't allow us to stop.
1493 * If SIGKILL was already sent before the caller unlocked 1482 * If SIGKILL was already sent before the caller unlocked
1494 * ->siglock we must see ->core_waiters != 0. Otherwise it 1483 * ->siglock we must see ->core_state != NULL. Otherwise it
1495 * is safe to enter schedule(). 1484 * is safe to enter schedule().
1496 */ 1485 */
1497 if (unlikely(current->mm->core_waiters) && 1486 if (unlikely(current->mm->core_state) &&
1498 unlikely(current->mm == current->parent->mm)) 1487 unlikely(current->mm == current->parent->mm))
1499 return 0; 1488 return 0;
1500 1489
@@ -1507,9 +1496,8 @@ static inline int may_ptrace_stop(void)
1507 */ 1496 */
1508static int sigkill_pending(struct task_struct *tsk) 1497static int sigkill_pending(struct task_struct *tsk)
1509{ 1498{
1510 return ((sigismember(&tsk->pending.signal, SIGKILL) || 1499 return sigismember(&tsk->pending.signal, SIGKILL) ||
1511 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && 1500 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1512 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1513} 1501}
1514 1502
1515/* 1503/*
@@ -1525,8 +1513,6 @@ static int sigkill_pending(struct task_struct *tsk)
1525 */ 1513 */
1526static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) 1514static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1527{ 1515{
1528 int killed = 0;
1529
1530 if (arch_ptrace_stop_needed(exit_code, info)) { 1516 if (arch_ptrace_stop_needed(exit_code, info)) {
1531 /* 1517 /*
1532 * The arch code has something special to do before a 1518 * The arch code has something special to do before a
@@ -1542,7 +1528,8 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1542 spin_unlock_irq(&current->sighand->siglock); 1528 spin_unlock_irq(&current->sighand->siglock);
1543 arch_ptrace_stop(exit_code, info); 1529 arch_ptrace_stop(exit_code, info);
1544 spin_lock_irq(&current->sighand->siglock); 1530 spin_lock_irq(&current->sighand->siglock);
1545 killed = sigkill_pending(current); 1531 if (sigkill_pending(current))
1532 return;
1546 } 1533 }
1547 1534
1548 /* 1535 /*
@@ -1559,7 +1546,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1559 __set_current_state(TASK_TRACED); 1546 __set_current_state(TASK_TRACED);
1560 spin_unlock_irq(&current->sighand->siglock); 1547 spin_unlock_irq(&current->sighand->siglock);
1561 read_lock(&tasklist_lock); 1548 read_lock(&tasklist_lock);
1562 if (!unlikely(killed) && may_ptrace_stop()) { 1549 if (may_ptrace_stop()) {
1563 do_notify_parent_cldstop(current, CLD_TRAPPED); 1550 do_notify_parent_cldstop(current, CLD_TRAPPED);
1564 read_unlock(&tasklist_lock); 1551 read_unlock(&tasklist_lock);
1565 schedule(); 1552 schedule();
@@ -1623,7 +1610,7 @@ finish_stop(int stop_count)
1623 * a group stop in progress and we are the last to stop, 1610 * a group stop in progress and we are the last to stop,
1624 * report to the parent. When ptraced, every thread reports itself. 1611 * report to the parent. When ptraced, every thread reports itself.
1625 */ 1612 */
1626 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1613 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1627 read_lock(&tasklist_lock); 1614 read_lock(&tasklist_lock);
1628 do_notify_parent_cldstop(current, CLD_STOPPED); 1615 do_notify_parent_cldstop(current, CLD_STOPPED);
1629 read_unlock(&tasklist_lock); 1616 read_unlock(&tasklist_lock);
@@ -1658,8 +1645,7 @@ static int do_signal_stop(int signr)
1658 } else { 1645 } else {
1659 struct task_struct *t; 1646 struct task_struct *t;
1660 1647
1661 if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) 1648 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1662 != SIGNAL_STOP_DEQUEUED) ||
1663 unlikely(signal_group_exit(sig))) 1649 unlikely(signal_group_exit(sig)))
1664 return 0; 1650 return 0;
1665 /* 1651 /*
@@ -1760,6 +1746,9 @@ relock:
1760 signal->flags &= ~SIGNAL_CLD_MASK; 1746 signal->flags &= ~SIGNAL_CLD_MASK;
1761 spin_unlock_irq(&sighand->siglock); 1747 spin_unlock_irq(&sighand->siglock);
1762 1748
1749 if (unlikely(!tracehook_notify_jctl(1, why)))
1750 goto relock;
1751
1763 read_lock(&tasklist_lock); 1752 read_lock(&tasklist_lock);
1764 do_notify_parent_cldstop(current->group_leader, why); 1753 do_notify_parent_cldstop(current->group_leader, why);
1765 read_unlock(&tasklist_lock); 1754 read_unlock(&tasklist_lock);
@@ -1773,17 +1762,33 @@ relock:
1773 do_signal_stop(0)) 1762 do_signal_stop(0))
1774 goto relock; 1763 goto relock;
1775 1764
1776 signr = dequeue_signal(current, &current->blocked, info); 1765 /*
1777 if (!signr) 1766 * Tracing can induce an artifical signal and choose sigaction.
1778 break; /* will return 0 */ 1767 * The return value in @signr determines the default action,
1768 * but @info->si_signo is the signal number we will report.
1769 */
1770 signr = tracehook_get_signal(current, regs, info, return_ka);
1771 if (unlikely(signr < 0))
1772 goto relock;
1773 if (unlikely(signr != 0))
1774 ka = return_ka;
1775 else {
1776 signr = dequeue_signal(current, &current->blocked,
1777 info);
1779 1778
1780 if (signr != SIGKILL) {
1781 signr = ptrace_signal(signr, info, regs, cookie);
1782 if (!signr) 1779 if (!signr)
1783 continue; 1780 break; /* will return 0 */
1781
1782 if (signr != SIGKILL) {
1783 signr = ptrace_signal(signr, info,
1784 regs, cookie);
1785 if (!signr)
1786 continue;
1787 }
1788
1789 ka = &sighand->action[signr-1];
1784 } 1790 }
1785 1791
1786 ka = &sighand->action[signr-1];
1787 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1792 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1788 continue; 1793 continue;
1789 if (ka->sa.sa_handler != SIG_DFL) { 1794 if (ka->sa.sa_handler != SIG_DFL) {
@@ -1831,7 +1836,7 @@ relock:
1831 spin_lock_irq(&sighand->siglock); 1836 spin_lock_irq(&sighand->siglock);
1832 } 1837 }
1833 1838
1834 if (likely(do_signal_stop(signr))) { 1839 if (likely(do_signal_stop(info->si_signo))) {
1835 /* It released the siglock. */ 1840 /* It released the siglock. */
1836 goto relock; 1841 goto relock;
1837 } 1842 }
@@ -1852,7 +1857,7 @@ relock:
1852 1857
1853 if (sig_kernel_coredump(signr)) { 1858 if (sig_kernel_coredump(signr)) {
1854 if (print_fatal_signals) 1859 if (print_fatal_signals)
1855 print_fatal_signal(regs, signr); 1860 print_fatal_signal(regs, info->si_signo);
1856 /* 1861 /*
1857 * If it was able to dump core, this kills all 1862 * If it was able to dump core, this kills all
1858 * other threads in the group and synchronizes with 1863 * other threads in the group and synchronizes with
@@ -1861,13 +1866,13 @@ relock:
1861 * first and our do_group_exit call below will use 1866 * first and our do_group_exit call below will use
1862 * that value and ignore the one we pass it. 1867 * that value and ignore the one we pass it.
1863 */ 1868 */
1864 do_coredump((long)signr, signr, regs); 1869 do_coredump(info->si_signo, info->si_signo, regs);
1865 } 1870 }
1866 1871
1867 /* 1872 /*
1868 * Death signals, no core dump. 1873 * Death signals, no core dump.
1869 */ 1874 */
1870 do_group_exit(signr); 1875 do_group_exit(info->si_signo);
1871 /* NOTREACHED */ 1876 /* NOTREACHED */
1872 } 1877 }
1873 spin_unlock_irq(&sighand->siglock); 1878 spin_unlock_irq(&sighand->siglock);
@@ -1909,7 +1914,7 @@ void exit_signals(struct task_struct *tsk)
1909out: 1914out:
1910 spin_unlock_irq(&tsk->sighand->siglock); 1915 spin_unlock_irq(&tsk->sighand->siglock);
1911 1916
1912 if (unlikely(group_stop)) { 1917 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1913 read_lock(&tasklist_lock); 1918 read_lock(&tasklist_lock);
1914 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1919 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1915 read_unlock(&tasklist_lock); 1920 read_unlock(&tasklist_lock);
@@ -1920,8 +1925,6 @@ EXPORT_SYMBOL(recalc_sigpending);
1920EXPORT_SYMBOL_GPL(dequeue_signal); 1925EXPORT_SYMBOL_GPL(dequeue_signal);
1921EXPORT_SYMBOL(flush_signals); 1926EXPORT_SYMBOL(flush_signals);
1922EXPORT_SYMBOL(force_sig); 1927EXPORT_SYMBOL(force_sig);
1923EXPORT_SYMBOL(kill_proc);
1924EXPORT_SYMBOL(ptrace_notify);
1925EXPORT_SYMBOL(send_sig); 1928EXPORT_SYMBOL(send_sig);
1926EXPORT_SYMBOL(send_sig_info); 1929EXPORT_SYMBOL(send_sig_info);
1927EXPORT_SYMBOL(sigprocmask); 1930EXPORT_SYMBOL(sigprocmask);
@@ -2196,7 +2199,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
2196} 2199}
2197 2200
2198asmlinkage long 2201asmlinkage long
2199sys_kill(int pid, int sig) 2202sys_kill(pid_t pid, int sig)
2200{ 2203{
2201 struct siginfo info; 2204 struct siginfo info;
2202 2205
@@ -2209,7 +2212,7 @@ sys_kill(int pid, int sig)
2209 return kill_something_info(sig, &info, pid); 2212 return kill_something_info(sig, &info, pid);
2210} 2213}
2211 2214
2212static int do_tkill(int tgid, int pid, int sig) 2215static int do_tkill(pid_t tgid, pid_t pid, int sig)
2213{ 2216{
2214 int error; 2217 int error;
2215 struct siginfo info; 2218 struct siginfo info;
@@ -2255,7 +2258,7 @@ static int do_tkill(int tgid, int pid, int sig)
2255 * exists but it's not belonging to the target process anymore. This 2258 * exists but it's not belonging to the target process anymore. This
2256 * method solves the problem of threads exiting and PIDs getting reused. 2259 * method solves the problem of threads exiting and PIDs getting reused.
2257 */ 2260 */
2258asmlinkage long sys_tgkill(int tgid, int pid, int sig) 2261asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2259{ 2262{
2260 /* This is only valid for single tasks */ 2263 /* This is only valid for single tasks */
2261 if (pid <= 0 || tgid <= 0) 2264 if (pid <= 0 || tgid <= 0)
@@ -2268,7 +2271,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2268 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2271 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2269 */ 2272 */
2270asmlinkage long 2273asmlinkage long
2271sys_tkill(int pid, int sig) 2274sys_tkill(pid_t pid, int sig)
2272{ 2275{
2273 /* This is only valid for single tasks */ 2276 /* This is only valid for single tasks */
2274 if (pid <= 0) 2277 if (pid <= 0)
@@ -2278,7 +2281,7 @@ sys_tkill(int pid, int sig)
2278} 2281}
2279 2282
2280asmlinkage long 2283asmlinkage long
2281sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) 2284sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2282{ 2285{
2283 siginfo_t info; 2286 siginfo_t info;
2284 2287
@@ -2325,7 +2328,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2325 * (for example, SIGCHLD), shall cause the pending signal to 2328 * (for example, SIGCHLD), shall cause the pending signal to
2326 * be discarded, whether or not it is blocked" 2329 * be discarded, whether or not it is blocked"
2327 */ 2330 */
2328 if (__sig_ignored(t, sig)) { 2331 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2329 sigemptyset(&mask); 2332 sigemptyset(&mask);
2330 sigaddset(&mask, sig); 2333 sigaddset(&mask, sig);
2331 rm_from_queue_full(&mask, &t->signal->shared_pending); 2334 rm_from_queue_full(&mask, &t->signal->shared_pending);