diff options
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 179 |
1 files changed, 90 insertions, 89 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index c5bf0c0df658..c539f60c6f41 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/signalfd.h> | 24 | #include <linux/signalfd.h> |
25 | #include <linux/tracehook.h> | ||
25 | #include <linux/capability.h> | 26 | #include <linux/capability.h> |
26 | #include <linux/freezer.h> | 27 | #include <linux/freezer.h> |
27 | #include <linux/pid_namespace.h> | 28 | #include <linux/pid_namespace.h> |
@@ -39,24 +40,21 @@ | |||
39 | 40 | ||
40 | static struct kmem_cache *sigqueue_cachep; | 41 | static struct kmem_cache *sigqueue_cachep; |
41 | 42 | ||
42 | static int __sig_ignored(struct task_struct *t, int sig) | 43 | static void __user *sig_handler(struct task_struct *t, int sig) |
43 | { | 44 | { |
44 | void __user *handler; | 45 | return t->sighand->action[sig - 1].sa.sa_handler; |
46 | } | ||
45 | 47 | ||
48 | static int sig_handler_ignored(void __user *handler, int sig) | ||
49 | { | ||
46 | /* Is it explicitly or implicitly ignored? */ | 50 | /* Is it explicitly or implicitly ignored? */ |
47 | |||
48 | handler = t->sighand->action[sig - 1].sa.sa_handler; | ||
49 | return handler == SIG_IGN || | 51 | return handler == SIG_IGN || |
50 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | 52 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
51 | } | 53 | } |
52 | 54 | ||
53 | static int sig_ignored(struct task_struct *t, int sig) | 55 | static int sig_ignored(struct task_struct *t, int sig) |
54 | { | 56 | { |
55 | /* | 57 | void __user *handler; |
56 | * Tracers always want to know about signals.. | ||
57 | */ | ||
58 | if (t->ptrace & PT_PTRACED) | ||
59 | return 0; | ||
60 | 58 | ||
61 | /* | 59 | /* |
62 | * Blocked signals are never ignored, since the | 60 | * Blocked signals are never ignored, since the |
@@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig) | |||
66 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | 64 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
67 | return 0; | 65 | return 0; |
68 | 66 | ||
69 | return __sig_ignored(t, sig); | 67 | handler = sig_handler(t, sig); |
68 | if (!sig_handler_ignored(handler, sig)) | ||
69 | return 0; | ||
70 | |||
71 | /* | ||
72 | * Tracers may want to know about even ignored signals. | ||
73 | */ | ||
74 | return !tracehook_consider_ignored_signal(t, sig, handler); | ||
70 | } | 75 | } |
71 | 76 | ||
72 | /* | 77 | /* |
@@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t) | |||
129 | 134 | ||
130 | void recalc_sigpending(void) | 135 | void recalc_sigpending(void) |
131 | { | 136 | { |
132 | if (!recalc_sigpending_tsk(current) && !freezing(current)) | 137 | if (unlikely(tracehook_force_sigpending())) |
138 | set_thread_flag(TIF_SIGPENDING); | ||
139 | else if (!recalc_sigpending_tsk(current) && !freezing(current)) | ||
133 | clear_thread_flag(TIF_SIGPENDING); | 140 | clear_thread_flag(TIF_SIGPENDING); |
134 | 141 | ||
135 | } | 142 | } |
@@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default) | |||
295 | 302 | ||
296 | int unhandled_signal(struct task_struct *tsk, int sig) | 303 | int unhandled_signal(struct task_struct *tsk, int sig) |
297 | { | 304 | { |
305 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; | ||
298 | if (is_global_init(tsk)) | 306 | if (is_global_init(tsk)) |
299 | return 1; | 307 | return 1; |
300 | if (tsk->ptrace & PT_PTRACED) | 308 | if (handler != SIG_IGN && handler != SIG_DFL) |
301 | return 0; | 309 | return 0; |
302 | return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || | 310 | return !tracehook_consider_fatal_signal(tsk, sig, handler); |
303 | (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL); | ||
304 | } | 311 | } |
305 | 312 | ||
306 | 313 | ||
@@ -338,13 +345,9 @@ unblock_all_signals(void) | |||
338 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 345 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
339 | } | 346 | } |
340 | 347 | ||
341 | static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) | 348 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
342 | { | 349 | { |
343 | struct sigqueue *q, *first = NULL; | 350 | struct sigqueue *q, *first = NULL; |
344 | int still_pending = 0; | ||
345 | |||
346 | if (unlikely(!sigismember(&list->signal, sig))) | ||
347 | return 0; | ||
348 | 351 | ||
349 | /* | 352 | /* |
350 | * Collect the siginfo appropriate to this signal. Check if | 353 | * Collect the siginfo appropriate to this signal. Check if |
@@ -352,33 +355,30 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) | |||
352 | */ | 355 | */ |
353 | list_for_each_entry(q, &list->list, list) { | 356 | list_for_each_entry(q, &list->list, list) { |
354 | if (q->info.si_signo == sig) { | 357 | if (q->info.si_signo == sig) { |
355 | if (first) { | 358 | if (first) |
356 | still_pending = 1; | 359 | goto still_pending; |
357 | break; | ||
358 | } | ||
359 | first = q; | 360 | first = q; |
360 | } | 361 | } |
361 | } | 362 | } |
363 | |||
364 | sigdelset(&list->signal, sig); | ||
365 | |||
362 | if (first) { | 366 | if (first) { |
367 | still_pending: | ||
363 | list_del_init(&first->list); | 368 | list_del_init(&first->list); |
364 | copy_siginfo(info, &first->info); | 369 | copy_siginfo(info, &first->info); |
365 | __sigqueue_free(first); | 370 | __sigqueue_free(first); |
366 | if (!still_pending) | ||
367 | sigdelset(&list->signal, sig); | ||
368 | } else { | 371 | } else { |
369 | |||
370 | /* Ok, it wasn't in the queue. This must be | 372 | /* Ok, it wasn't in the queue. This must be |
371 | a fast-pathed signal or we must have been | 373 | a fast-pathed signal or we must have been |
372 | out of queue space. So zero out the info. | 374 | out of queue space. So zero out the info. |
373 | */ | 375 | */ |
374 | sigdelset(&list->signal, sig); | ||
375 | info->si_signo = sig; | 376 | info->si_signo = sig; |
376 | info->si_errno = 0; | 377 | info->si_errno = 0; |
377 | info->si_code = 0; | 378 | info->si_code = 0; |
378 | info->si_pid = 0; | 379 | info->si_pid = 0; |
379 | info->si_uid = 0; | 380 | info->si_uid = 0; |
380 | } | 381 | } |
381 | return 1; | ||
382 | } | 382 | } |
383 | 383 | ||
384 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | 384 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
@@ -396,8 +396,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
396 | } | 396 | } |
397 | } | 397 | } |
398 | 398 | ||
399 | if (!collect_signal(sig, pending, info)) | 399 | collect_signal(sig, pending, info); |
400 | sig = 0; | ||
401 | } | 400 | } |
402 | 401 | ||
403 | return sig; | 402 | return sig; |
@@ -462,8 +461,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
462 | * is to alert stop-signal processing code when another | 461 | * is to alert stop-signal processing code when another |
463 | * processor has come along and cleared the flag. | 462 | * processor has come along and cleared the flag. |
464 | */ | 463 | */ |
465 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) | 464 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
466 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | ||
467 | } | 465 | } |
468 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 466 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
469 | /* | 467 | /* |
@@ -600,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
600 | return security_task_kill(t, info, sig, 0); | 598 | return security_task_kill(t, info, sig, 0); |
601 | } | 599 | } |
602 | 600 | ||
603 | /* forward decl */ | ||
604 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why); | ||
605 | |||
606 | /* | 601 | /* |
607 | * Handle magic process-wide effects of stop/continue signals. Unlike | 602 | * Handle magic process-wide effects of stop/continue signals. Unlike |
608 | * the signal actions, these happen immediately at signal-generation | 603 | * the signal actions, these happen immediately at signal-generation |
@@ -765,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
765 | if (sig_fatal(p, sig) && | 760 | if (sig_fatal(p, sig) && |
766 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | 761 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
767 | !sigismember(&t->real_blocked, sig) && | 762 | !sigismember(&t->real_blocked, sig) && |
768 | (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { | 763 | (sig == SIGKILL || |
764 | !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) { | ||
769 | /* | 765 | /* |
770 | * This signal will be fatal to the whole group. | 766 | * This signal will be fatal to the whole group. |
771 | */ | 767 | */ |
@@ -1125,7 +1121,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); | |||
1125 | * is probably wrong. Should make it like BSD or SYSV. | 1121 | * is probably wrong. Should make it like BSD or SYSV. |
1126 | */ | 1122 | */ |
1127 | 1123 | ||
1128 | static int kill_something_info(int sig, struct siginfo *info, int pid) | 1124 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1129 | { | 1125 | { |
1130 | int ret; | 1126 | int ret; |
1131 | 1127 | ||
@@ -1237,17 +1233,6 @@ int kill_pid(struct pid *pid, int sig, int priv) | |||
1237 | } | 1233 | } |
1238 | EXPORT_SYMBOL(kill_pid); | 1234 | EXPORT_SYMBOL(kill_pid); |
1239 | 1235 | ||
1240 | int | ||
1241 | kill_proc(pid_t pid, int sig, int priv) | ||
1242 | { | ||
1243 | int ret; | ||
1244 | |||
1245 | rcu_read_lock(); | ||
1246 | ret = kill_pid_info(sig, __si_special(priv), find_pid(pid)); | ||
1247 | rcu_read_unlock(); | ||
1248 | return ret; | ||
1249 | } | ||
1250 | |||
1251 | /* | 1236 | /* |
1252 | * These functions support sending signals using preallocated sigqueue | 1237 | * These functions support sending signals using preallocated sigqueue |
1253 | * structures. This is needed "because realtime applications cannot | 1238 | * structures. This is needed "because realtime applications cannot |
@@ -1344,9 +1329,11 @@ static inline void __wake_up_parent(struct task_struct *p, | |||
1344 | /* | 1329 | /* |
1345 | * Let a parent know about the death of a child. | 1330 | * Let a parent know about the death of a child. |
1346 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | 1331 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1332 | * | ||
1333 | * Returns -1 if our parent ignored us and so we've switched to | ||
1334 | * self-reaping, or else @sig. | ||
1347 | */ | 1335 | */ |
1348 | 1336 | int do_notify_parent(struct task_struct *tsk, int sig) | |
1349 | void do_notify_parent(struct task_struct *tsk, int sig) | ||
1350 | { | 1337 | { |
1351 | struct siginfo info; | 1338 | struct siginfo info; |
1352 | unsigned long flags; | 1339 | unsigned long flags; |
@@ -1380,10 +1367,9 @@ void do_notify_parent(struct task_struct *tsk, int sig) | |||
1380 | 1367 | ||
1381 | info.si_uid = tsk->uid; | 1368 | info.si_uid = tsk->uid; |
1382 | 1369 | ||
1383 | /* FIXME: find out whether or not this is supposed to be c*time. */ | 1370 | info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, |
1384 | info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, | ||
1385 | tsk->signal->utime)); | 1371 | tsk->signal->utime)); |
1386 | info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, | 1372 | info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, |
1387 | tsk->signal->stime)); | 1373 | tsk->signal->stime)); |
1388 | 1374 | ||
1389 | info.si_status = tsk->exit_code & 0x7f; | 1375 | info.si_status = tsk->exit_code & 0x7f; |
@@ -1418,12 +1404,14 @@ void do_notify_parent(struct task_struct *tsk, int sig) | |||
1418 | */ | 1404 | */ |
1419 | tsk->exit_signal = -1; | 1405 | tsk->exit_signal = -1; |
1420 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | 1406 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1421 | sig = 0; | 1407 | sig = -1; |
1422 | } | 1408 | } |
1423 | if (valid_signal(sig) && sig > 0) | 1409 | if (valid_signal(sig) && sig > 0) |
1424 | __group_send_sig_info(sig, &info, tsk->parent); | 1410 | __group_send_sig_info(sig, &info, tsk->parent); |
1425 | __wake_up_parent(tsk, tsk->parent); | 1411 | __wake_up_parent(tsk, tsk->parent); |
1426 | spin_unlock_irqrestore(&psig->siglock, flags); | 1412 | spin_unlock_irqrestore(&psig->siglock, flags); |
1413 | |||
1414 | return sig; | ||
1427 | } | 1415 | } |
1428 | 1416 | ||
1429 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | 1417 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) |
@@ -1451,9 +1439,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |||
1451 | 1439 | ||
1452 | info.si_uid = tsk->uid; | 1440 | info.si_uid = tsk->uid; |
1453 | 1441 | ||
1454 | /* FIXME: find out whether or not this is supposed to be c*time. */ | 1442 | info.si_utime = cputime_to_clock_t(tsk->utime); |
1455 | info.si_utime = cputime_to_jiffies(tsk->utime); | 1443 | info.si_stime = cputime_to_clock_t(tsk->stime); |
1456 | info.si_stime = cputime_to_jiffies(tsk->stime); | ||
1457 | 1444 | ||
1458 | info.si_code = why; | 1445 | info.si_code = why; |
1459 | switch (why) { | 1446 | switch (why) { |
@@ -1492,10 +1479,10 @@ static inline int may_ptrace_stop(void) | |||
1492 | * is a deadlock situation, and pointless because our tracer | 1479 | * is a deadlock situation, and pointless because our tracer |
1493 | * is dead so don't allow us to stop. | 1480 | * is dead so don't allow us to stop. |
1494 | * If SIGKILL was already sent before the caller unlocked | 1481 | * If SIGKILL was already sent before the caller unlocked |
1495 | * ->siglock we must see ->core_waiters != 0. Otherwise it | 1482 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1496 | * is safe to enter schedule(). | 1483 | * is safe to enter schedule(). |
1497 | */ | 1484 | */ |
1498 | if (unlikely(current->mm->core_waiters) && | 1485 | if (unlikely(current->mm->core_state) && |
1499 | unlikely(current->mm == current->parent->mm)) | 1486 | unlikely(current->mm == current->parent->mm)) |
1500 | return 0; | 1487 | return 0; |
1501 | 1488 | ||
@@ -1508,9 +1495,8 @@ static inline int may_ptrace_stop(void) | |||
1508 | */ | 1495 | */ |
1509 | static int sigkill_pending(struct task_struct *tsk) | 1496 | static int sigkill_pending(struct task_struct *tsk) |
1510 | { | 1497 | { |
1511 | return ((sigismember(&tsk->pending.signal, SIGKILL) || | 1498 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1512 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && | 1499 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1513 | !unlikely(sigismember(&tsk->blocked, SIGKILL))); | ||
1514 | } | 1500 | } |
1515 | 1501 | ||
1516 | /* | 1502 | /* |
@@ -1526,8 +1512,6 @@ static int sigkill_pending(struct task_struct *tsk) | |||
1526 | */ | 1512 | */ |
1527 | static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | 1513 | static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) |
1528 | { | 1514 | { |
1529 | int killed = 0; | ||
1530 | |||
1531 | if (arch_ptrace_stop_needed(exit_code, info)) { | 1515 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1532 | /* | 1516 | /* |
1533 | * The arch code has something special to do before a | 1517 | * The arch code has something special to do before a |
@@ -1543,7 +1527,8 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1543 | spin_unlock_irq(¤t->sighand->siglock); | 1527 | spin_unlock_irq(¤t->sighand->siglock); |
1544 | arch_ptrace_stop(exit_code, info); | 1528 | arch_ptrace_stop(exit_code, info); |
1545 | spin_lock_irq(¤t->sighand->siglock); | 1529 | spin_lock_irq(¤t->sighand->siglock); |
1546 | killed = sigkill_pending(current); | 1530 | if (sigkill_pending(current)) |
1531 | return; | ||
1547 | } | 1532 | } |
1548 | 1533 | ||
1549 | /* | 1534 | /* |
@@ -1560,7 +1545,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1560 | __set_current_state(TASK_TRACED); | 1545 | __set_current_state(TASK_TRACED); |
1561 | spin_unlock_irq(¤t->sighand->siglock); | 1546 | spin_unlock_irq(¤t->sighand->siglock); |
1562 | read_lock(&tasklist_lock); | 1547 | read_lock(&tasklist_lock); |
1563 | if (!unlikely(killed) && may_ptrace_stop()) { | 1548 | if (may_ptrace_stop()) { |
1564 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1549 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
1565 | read_unlock(&tasklist_lock); | 1550 | read_unlock(&tasklist_lock); |
1566 | schedule(); | 1551 | schedule(); |
@@ -1624,7 +1609,7 @@ finish_stop(int stop_count) | |||
1624 | * a group stop in progress and we are the last to stop, | 1609 | * a group stop in progress and we are the last to stop, |
1625 | * report to the parent. When ptraced, every thread reports itself. | 1610 | * report to the parent. When ptraced, every thread reports itself. |
1626 | */ | 1611 | */ |
1627 | if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { | 1612 | if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) { |
1628 | read_lock(&tasklist_lock); | 1613 | read_lock(&tasklist_lock); |
1629 | do_notify_parent_cldstop(current, CLD_STOPPED); | 1614 | do_notify_parent_cldstop(current, CLD_STOPPED); |
1630 | read_unlock(&tasklist_lock); | 1615 | read_unlock(&tasklist_lock); |
@@ -1659,8 +1644,7 @@ static int do_signal_stop(int signr) | |||
1659 | } else { | 1644 | } else { |
1660 | struct task_struct *t; | 1645 | struct task_struct *t; |
1661 | 1646 | ||
1662 | if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) | 1647 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
1663 | != SIGNAL_STOP_DEQUEUED) || | ||
1664 | unlikely(signal_group_exit(sig))) | 1648 | unlikely(signal_group_exit(sig))) |
1665 | return 0; | 1649 | return 0; |
1666 | /* | 1650 | /* |
@@ -1761,6 +1745,9 @@ relock: | |||
1761 | signal->flags &= ~SIGNAL_CLD_MASK; | 1745 | signal->flags &= ~SIGNAL_CLD_MASK; |
1762 | spin_unlock_irq(&sighand->siglock); | 1746 | spin_unlock_irq(&sighand->siglock); |
1763 | 1747 | ||
1748 | if (unlikely(!tracehook_notify_jctl(1, why))) | ||
1749 | goto relock; | ||
1750 | |||
1764 | read_lock(&tasklist_lock); | 1751 | read_lock(&tasklist_lock); |
1765 | do_notify_parent_cldstop(current->group_leader, why); | 1752 | do_notify_parent_cldstop(current->group_leader, why); |
1766 | read_unlock(&tasklist_lock); | 1753 | read_unlock(&tasklist_lock); |
@@ -1774,17 +1761,33 @@ relock: | |||
1774 | do_signal_stop(0)) | 1761 | do_signal_stop(0)) |
1775 | goto relock; | 1762 | goto relock; |
1776 | 1763 | ||
1777 | signr = dequeue_signal(current, ¤t->blocked, info); | 1764 | /* |
1778 | if (!signr) | 1765 | * Tracing can induce an artifical signal and choose sigaction. |
1779 | break; /* will return 0 */ | 1766 | * The return value in @signr determines the default action, |
1767 | * but @info->si_signo is the signal number we will report. | ||
1768 | */ | ||
1769 | signr = tracehook_get_signal(current, regs, info, return_ka); | ||
1770 | if (unlikely(signr < 0)) | ||
1771 | goto relock; | ||
1772 | if (unlikely(signr != 0)) | ||
1773 | ka = return_ka; | ||
1774 | else { | ||
1775 | signr = dequeue_signal(current, ¤t->blocked, | ||
1776 | info); | ||
1780 | 1777 | ||
1781 | if (signr != SIGKILL) { | ||
1782 | signr = ptrace_signal(signr, info, regs, cookie); | ||
1783 | if (!signr) | 1778 | if (!signr) |
1784 | continue; | 1779 | break; /* will return 0 */ |
1780 | |||
1781 | if (signr != SIGKILL) { | ||
1782 | signr = ptrace_signal(signr, info, | ||
1783 | regs, cookie); | ||
1784 | if (!signr) | ||
1785 | continue; | ||
1786 | } | ||
1787 | |||
1788 | ka = &sighand->action[signr-1]; | ||
1785 | } | 1789 | } |
1786 | 1790 | ||
1787 | ka = &sighand->action[signr-1]; | ||
1788 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | 1791 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
1789 | continue; | 1792 | continue; |
1790 | if (ka->sa.sa_handler != SIG_DFL) { | 1793 | if (ka->sa.sa_handler != SIG_DFL) { |
@@ -1832,7 +1835,7 @@ relock: | |||
1832 | spin_lock_irq(&sighand->siglock); | 1835 | spin_lock_irq(&sighand->siglock); |
1833 | } | 1836 | } |
1834 | 1837 | ||
1835 | if (likely(do_signal_stop(signr))) { | 1838 | if (likely(do_signal_stop(info->si_signo))) { |
1836 | /* It released the siglock. */ | 1839 | /* It released the siglock. */ |
1837 | goto relock; | 1840 | goto relock; |
1838 | } | 1841 | } |
@@ -1853,7 +1856,7 @@ relock: | |||
1853 | 1856 | ||
1854 | if (sig_kernel_coredump(signr)) { | 1857 | if (sig_kernel_coredump(signr)) { |
1855 | if (print_fatal_signals) | 1858 | if (print_fatal_signals) |
1856 | print_fatal_signal(regs, signr); | 1859 | print_fatal_signal(regs, info->si_signo); |
1857 | /* | 1860 | /* |
1858 | * If it was able to dump core, this kills all | 1861 | * If it was able to dump core, this kills all |
1859 | * other threads in the group and synchronizes with | 1862 | * other threads in the group and synchronizes with |
@@ -1862,13 +1865,13 @@ relock: | |||
1862 | * first and our do_group_exit call below will use | 1865 | * first and our do_group_exit call below will use |
1863 | * that value and ignore the one we pass it. | 1866 | * that value and ignore the one we pass it. |
1864 | */ | 1867 | */ |
1865 | do_coredump((long)signr, signr, regs); | 1868 | do_coredump(info->si_signo, info->si_signo, regs); |
1866 | } | 1869 | } |
1867 | 1870 | ||
1868 | /* | 1871 | /* |
1869 | * Death signals, no core dump. | 1872 | * Death signals, no core dump. |
1870 | */ | 1873 | */ |
1871 | do_group_exit(signr); | 1874 | do_group_exit(info->si_signo); |
1872 | /* NOTREACHED */ | 1875 | /* NOTREACHED */ |
1873 | } | 1876 | } |
1874 | spin_unlock_irq(&sighand->siglock); | 1877 | spin_unlock_irq(&sighand->siglock); |
@@ -1910,7 +1913,7 @@ void exit_signals(struct task_struct *tsk) | |||
1910 | out: | 1913 | out: |
1911 | spin_unlock_irq(&tsk->sighand->siglock); | 1914 | spin_unlock_irq(&tsk->sighand->siglock); |
1912 | 1915 | ||
1913 | if (unlikely(group_stop)) { | 1916 | if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { |
1914 | read_lock(&tasklist_lock); | 1917 | read_lock(&tasklist_lock); |
1915 | do_notify_parent_cldstop(tsk, CLD_STOPPED); | 1918 | do_notify_parent_cldstop(tsk, CLD_STOPPED); |
1916 | read_unlock(&tasklist_lock); | 1919 | read_unlock(&tasklist_lock); |
@@ -1921,8 +1924,6 @@ EXPORT_SYMBOL(recalc_sigpending); | |||
1921 | EXPORT_SYMBOL_GPL(dequeue_signal); | 1924 | EXPORT_SYMBOL_GPL(dequeue_signal); |
1922 | EXPORT_SYMBOL(flush_signals); | 1925 | EXPORT_SYMBOL(flush_signals); |
1923 | EXPORT_SYMBOL(force_sig); | 1926 | EXPORT_SYMBOL(force_sig); |
1924 | EXPORT_SYMBOL(kill_proc); | ||
1925 | EXPORT_SYMBOL(ptrace_notify); | ||
1926 | EXPORT_SYMBOL(send_sig); | 1927 | EXPORT_SYMBOL(send_sig); |
1927 | EXPORT_SYMBOL(send_sig_info); | 1928 | EXPORT_SYMBOL(send_sig_info); |
1928 | EXPORT_SYMBOL(sigprocmask); | 1929 | EXPORT_SYMBOL(sigprocmask); |
@@ -2197,7 +2198,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, | |||
2197 | } | 2198 | } |
2198 | 2199 | ||
2199 | asmlinkage long | 2200 | asmlinkage long |
2200 | sys_kill(int pid, int sig) | 2201 | sys_kill(pid_t pid, int sig) |
2201 | { | 2202 | { |
2202 | struct siginfo info; | 2203 | struct siginfo info; |
2203 | 2204 | ||
@@ -2210,7 +2211,7 @@ sys_kill(int pid, int sig) | |||
2210 | return kill_something_info(sig, &info, pid); | 2211 | return kill_something_info(sig, &info, pid); |
2211 | } | 2212 | } |
2212 | 2213 | ||
2213 | static int do_tkill(int tgid, int pid, int sig) | 2214 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2214 | { | 2215 | { |
2215 | int error; | 2216 | int error; |
2216 | struct siginfo info; | 2217 | struct siginfo info; |
@@ -2256,7 +2257,7 @@ static int do_tkill(int tgid, int pid, int sig) | |||
2256 | * exists but it's not belonging to the target process anymore. This | 2257 | * exists but it's not belonging to the target process anymore. This |
2257 | * method solves the problem of threads exiting and PIDs getting reused. | 2258 | * method solves the problem of threads exiting and PIDs getting reused. |
2258 | */ | 2259 | */ |
2259 | asmlinkage long sys_tgkill(int tgid, int pid, int sig) | 2260 | asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig) |
2260 | { | 2261 | { |
2261 | /* This is only valid for single tasks */ | 2262 | /* This is only valid for single tasks */ |
2262 | if (pid <= 0 || tgid <= 0) | 2263 | if (pid <= 0 || tgid <= 0) |
@@ -2269,7 +2270,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) | |||
2269 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | 2270 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2270 | */ | 2271 | */ |
2271 | asmlinkage long | 2272 | asmlinkage long |
2272 | sys_tkill(int pid, int sig) | 2273 | sys_tkill(pid_t pid, int sig) |
2273 | { | 2274 | { |
2274 | /* This is only valid for single tasks */ | 2275 | /* This is only valid for single tasks */ |
2275 | if (pid <= 0) | 2276 | if (pid <= 0) |
@@ -2279,7 +2280,7 @@ sys_tkill(int pid, int sig) | |||
2279 | } | 2280 | } |
2280 | 2281 | ||
2281 | asmlinkage long | 2282 | asmlinkage long |
2282 | sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) | 2283 | sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo) |
2283 | { | 2284 | { |
2284 | siginfo_t info; | 2285 | siginfo_t info; |
2285 | 2286 | ||
@@ -2326,7 +2327,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
2326 | * (for example, SIGCHLD), shall cause the pending signal to | 2327 | * (for example, SIGCHLD), shall cause the pending signal to |
2327 | * be discarded, whether or not it is blocked" | 2328 | * be discarded, whether or not it is blocked" |
2328 | */ | 2329 | */ |
2329 | if (__sig_ignored(t, sig)) { | 2330 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
2330 | sigemptyset(&mask); | 2331 | sigemptyset(&mask); |
2331 | sigaddset(&mask, sig); | 2332 | sigaddset(&mask, sig); |
2332 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 2333 | rm_from_queue_full(&mask, &t->signal->shared_pending); |