aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c344
1 files changed, 70 insertions, 274 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 75f7341b0c39..4922928d91f6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,7 +22,6 @@
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24#include <linux/ptrace.h> 24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
26#include <linux/signal.h> 25#include <linux/signal.h>
27#include <linux/audit.h> 26#include <linux/audit.h>
28#include <linux/capability.h> 27#include <linux/capability.h>
@@ -147,6 +146,8 @@ static kmem_cache_t *sigqueue_cachep;
147#define sig_kernel_stop(sig) \ 146#define sig_kernel_stop(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) 147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149 148
149#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
150
150#define sig_user_defined(t, signr) \ 151#define sig_user_defined(t, signr) \
151 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ 152 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
152 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) 153 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
@@ -292,7 +293,7 @@ static void __sigqueue_free(struct sigqueue *q)
292 kmem_cache_free(sigqueue_cachep, q); 293 kmem_cache_free(sigqueue_cachep, q);
293} 294}
294 295
295static void flush_sigqueue(struct sigpending *queue) 296void flush_sigqueue(struct sigpending *queue)
296{ 297{
297 struct sigqueue *q; 298 struct sigqueue *q;
298 299
@@ -307,9 +308,7 @@ static void flush_sigqueue(struct sigpending *queue)
307/* 308/*
308 * Flush all pending signals for a task. 309 * Flush all pending signals for a task.
309 */ 310 */
310 311void flush_signals(struct task_struct *t)
311void
312flush_signals(struct task_struct *t)
313{ 312{
314 unsigned long flags; 313 unsigned long flags;
315 314
@@ -321,109 +320,6 @@ flush_signals(struct task_struct *t)
321} 320}
322 321
323/* 322/*
324 * This function expects the tasklist_lock write-locked.
325 */
326void __exit_sighand(struct task_struct *tsk)
327{
328 struct sighand_struct * sighand = tsk->sighand;
329
330 /* Ok, we're done with the signal handlers */
331 tsk->sighand = NULL;
332 if (atomic_dec_and_test(&sighand->count))
333 sighand_free(sighand);
334}
335
336void exit_sighand(struct task_struct *tsk)
337{
338 write_lock_irq(&tasklist_lock);
339 rcu_read_lock();
340 if (tsk->sighand != NULL) {
341 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
342 spin_lock(&sighand->siglock);
343 __exit_sighand(tsk);
344 spin_unlock(&sighand->siglock);
345 }
346 rcu_read_unlock();
347 write_unlock_irq(&tasklist_lock);
348}
349
350/*
351 * This function expects the tasklist_lock write-locked.
352 */
353void __exit_signal(struct task_struct *tsk)
354{
355 struct signal_struct * sig = tsk->signal;
356 struct sighand_struct * sighand;
357
358 if (!sig)
359 BUG();
360 if (!atomic_read(&sig->count))
361 BUG();
362 rcu_read_lock();
363 sighand = rcu_dereference(tsk->sighand);
364 spin_lock(&sighand->siglock);
365 posix_cpu_timers_exit(tsk);
366 if (atomic_dec_and_test(&sig->count)) {
367 posix_cpu_timers_exit_group(tsk);
368 tsk->signal = NULL;
369 __exit_sighand(tsk);
370 spin_unlock(&sighand->siglock);
371 flush_sigqueue(&sig->shared_pending);
372 } else {
373 /*
374 * If there is any task waiting for the group exit
375 * then notify it:
376 */
377 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
378 wake_up_process(sig->group_exit_task);
379 sig->group_exit_task = NULL;
380 }
381 if (tsk == sig->curr_target)
382 sig->curr_target = next_thread(tsk);
383 tsk->signal = NULL;
384 /*
385 * Accumulate here the counters for all threads but the
386 * group leader as they die, so they can be added into
387 * the process-wide totals when those are taken.
388 * The group leader stays around as a zombie as long
389 * as there are other threads. When it gets reaped,
390 * the exit.c code will add its counts into these totals.
391 * We won't ever get here for the group leader, since it
392 * will have been the last reference on the signal_struct.
393 */
394 sig->utime = cputime_add(sig->utime, tsk->utime);
395 sig->stime = cputime_add(sig->stime, tsk->stime);
396 sig->min_flt += tsk->min_flt;
397 sig->maj_flt += tsk->maj_flt;
398 sig->nvcsw += tsk->nvcsw;
399 sig->nivcsw += tsk->nivcsw;
400 sig->sched_time += tsk->sched_time;
401 __exit_sighand(tsk);
402 spin_unlock(&sighand->siglock);
403 sig = NULL; /* Marker for below. */
404 }
405 rcu_read_unlock();
406 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
407 flush_sigqueue(&tsk->pending);
408 if (sig) {
409 /*
410 * We are cleaning up the signal_struct here.
411 */
412 exit_thread_group_keys(sig);
413 kmem_cache_free(signal_cachep, sig);
414 }
415}
416
417void exit_signal(struct task_struct *tsk)
418{
419 atomic_dec(&tsk->signal->live);
420
421 write_lock_irq(&tasklist_lock);
422 __exit_signal(tsk);
423 write_unlock_irq(&tasklist_lock);
424}
425
426/*
427 * Flush all handlers for a task. 323 * Flush all handlers for a task.
428 */ 324 */
429 325
@@ -695,9 +591,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
695} 591}
696 592
697/* forward decl */ 593/* forward decl */
698static void do_notify_parent_cldstop(struct task_struct *tsk, 594static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
699 int to_self,
700 int why);
701 595
702/* 596/*
703 * Handle magic process-wide effects of stop/continue signals. 597 * Handle magic process-wide effects of stop/continue signals.
@@ -747,7 +641,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
747 p->signal->group_stop_count = 0; 641 p->signal->group_stop_count = 0;
748 p->signal->flags = SIGNAL_STOP_CONTINUED; 642 p->signal->flags = SIGNAL_STOP_CONTINUED;
749 spin_unlock(&p->sighand->siglock); 643 spin_unlock(&p->sighand->siglock);
750 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED); 644 do_notify_parent_cldstop(p, CLD_STOPPED);
751 spin_lock(&p->sighand->siglock); 645 spin_lock(&p->sighand->siglock);
752 } 646 }
753 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 647 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
@@ -788,7 +682,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
788 p->signal->flags = SIGNAL_STOP_CONTINUED; 682 p->signal->flags = SIGNAL_STOP_CONTINUED;
789 p->signal->group_exit_code = 0; 683 p->signal->group_exit_code = 0;
790 spin_unlock(&p->sighand->siglock); 684 spin_unlock(&p->sighand->siglock);
791 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED); 685 do_notify_parent_cldstop(p, CLD_CONTINUED);
792 spin_lock(&p->sighand->siglock); 686 spin_lock(&p->sighand->siglock);
793 } else { 687 } else {
794 /* 688 /*
@@ -1120,27 +1014,37 @@ void zap_other_threads(struct task_struct *p)
1120/* 1014/*
1121 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 1015 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1122 */ 1016 */
1017struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1018{
1019 struct sighand_struct *sighand;
1020
1021 for (;;) {
1022 sighand = rcu_dereference(tsk->sighand);
1023 if (unlikely(sighand == NULL))
1024 break;
1025
1026 spin_lock_irqsave(&sighand->siglock, *flags);
1027 if (likely(sighand == tsk->sighand))
1028 break;
1029 spin_unlock_irqrestore(&sighand->siglock, *flags);
1030 }
1031
1032 return sighand;
1033}
1034
1123int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1035int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1124{ 1036{
1125 unsigned long flags; 1037 unsigned long flags;
1126 struct sighand_struct *sp;
1127 int ret; 1038 int ret;
1128 1039
1129retry:
1130 ret = check_kill_permission(sig, info, p); 1040 ret = check_kill_permission(sig, info, p);
1131 if (!ret && sig && (sp = rcu_dereference(p->sighand))) { 1041
1132 spin_lock_irqsave(&sp->siglock, flags); 1042 if (!ret && sig) {
1133 if (p->sighand != sp) { 1043 ret = -ESRCH;
1134 spin_unlock_irqrestore(&sp->siglock, flags); 1044 if (lock_task_sighand(p, &flags)) {
1135 goto retry; 1045 ret = __group_send_sig_info(sig, info, p);
1136 } 1046 unlock_task_sighand(p, &flags);
1137 if ((atomic_read(&sp->count) == 0) ||
1138 (atomic_read(&p->usage) == 0)) {
1139 spin_unlock_irqrestore(&sp->siglock, flags);
1140 return -ESRCH;
1141 } 1047 }
1142 ret = __group_send_sig_info(sig, info, p);
1143 spin_unlock_irqrestore(&sp->siglock, flags);
1144 } 1048 }
1145 1049
1146 return ret; 1050 return ret;
@@ -1189,7 +1093,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1189 struct task_struct *p; 1093 struct task_struct *p;
1190 1094
1191 rcu_read_lock(); 1095 rcu_read_lock();
1192 if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) { 1096 if (unlikely(sig_needs_tasklist(sig))) {
1193 read_lock(&tasklist_lock); 1097 read_lock(&tasklist_lock);
1194 acquired_tasklist_lock = 1; 1098 acquired_tasklist_lock = 1;
1195 } 1099 }
@@ -1405,12 +1309,10 @@ void sigqueue_free(struct sigqueue *q)
1405 __sigqueue_free(q); 1309 __sigqueue_free(q);
1406} 1310}
1407 1311
1408int 1312int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1409send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1410{ 1313{
1411 unsigned long flags; 1314 unsigned long flags;
1412 int ret = 0; 1315 int ret = 0;
1413 struct sighand_struct *sh;
1414 1316
1415 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1317 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1416 1318
@@ -1424,48 +1326,17 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1424 */ 1326 */
1425 rcu_read_lock(); 1327 rcu_read_lock();
1426 1328
1427 if (unlikely(p->flags & PF_EXITING)) { 1329 if (!likely(lock_task_sighand(p, &flags))) {
1428 ret = -1; 1330 ret = -1;
1429 goto out_err; 1331 goto out_err;
1430 } 1332 }
1431 1333
1432retry:
1433 sh = rcu_dereference(p->sighand);
1434
1435 spin_lock_irqsave(&sh->siglock, flags);
1436 if (p->sighand != sh) {
1437 /* We raced with exec() in a multithreaded process... */
1438 spin_unlock_irqrestore(&sh->siglock, flags);
1439 goto retry;
1440 }
1441
1442 /*
1443 * We do the check here again to handle the following scenario:
1444 *
1445 * CPU 0 CPU 1
1446 * send_sigqueue
1447 * check PF_EXITING
1448 * interrupt exit code running
1449 * __exit_signal
1450 * lock sighand->siglock
1451 * unlock sighand->siglock
1452 * lock sh->siglock
1453 * add(tsk->pending) flush_sigqueue(tsk->pending)
1454 *
1455 */
1456
1457 if (unlikely(p->flags & PF_EXITING)) {
1458 ret = -1;
1459 goto out;
1460 }
1461
1462 if (unlikely(!list_empty(&q->list))) { 1334 if (unlikely(!list_empty(&q->list))) {
1463 /* 1335 /*
1464 * If an SI_TIMER entry is already queue just increment 1336 * If an SI_TIMER entry is already queue just increment
1465 * the overrun count. 1337 * the overrun count.
1466 */ 1338 */
1467 if (q->info.si_code != SI_TIMER) 1339 BUG_ON(q->info.si_code != SI_TIMER);
1468 BUG();
1469 q->info.si_overrun++; 1340 q->info.si_overrun++;
1470 goto out; 1341 goto out;
1471 } 1342 }
@@ -1481,7 +1352,7 @@ retry:
1481 signal_wake_up(p, sig == SIGKILL); 1352 signal_wake_up(p, sig == SIGKILL);
1482 1353
1483out: 1354out:
1484 spin_unlock_irqrestore(&sh->siglock, flags); 1355 unlock_task_sighand(p, &flags);
1485out_err: 1356out_err:
1486 rcu_read_unlock(); 1357 rcu_read_unlock();
1487 1358
@@ -1613,14 +1484,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1613 spin_unlock_irqrestore(&psig->siglock, flags); 1484 spin_unlock_irqrestore(&psig->siglock, flags);
1614} 1485}
1615 1486
1616static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why) 1487static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1617{ 1488{
1618 struct siginfo info; 1489 struct siginfo info;
1619 unsigned long flags; 1490 unsigned long flags;
1620 struct task_struct *parent; 1491 struct task_struct *parent;
1621 struct sighand_struct *sighand; 1492 struct sighand_struct *sighand;
1622 1493
1623 if (to_self) 1494 if (tsk->ptrace & PT_PTRACED)
1624 parent = tsk->parent; 1495 parent = tsk->parent;
1625 else { 1496 else {
1626 tsk = tsk->group_leader; 1497 tsk = tsk->group_leader;
@@ -1695,7 +1566,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1695 !(current->ptrace & PT_ATTACHED)) && 1566 !(current->ptrace & PT_ATTACHED)) &&
1696 (likely(current->parent->signal != current->signal) || 1567 (likely(current->parent->signal != current->signal) ||
1697 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { 1568 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1698 do_notify_parent_cldstop(current, 1, CLD_TRAPPED); 1569 do_notify_parent_cldstop(current, CLD_TRAPPED);
1699 read_unlock(&tasklist_lock); 1570 read_unlock(&tasklist_lock);
1700 schedule(); 1571 schedule();
1701 } else { 1572 } else {
@@ -1744,25 +1615,17 @@ void ptrace_notify(int exit_code)
1744static void 1615static void
1745finish_stop(int stop_count) 1616finish_stop(int stop_count)
1746{ 1617{
1747 int to_self;
1748
1749 /* 1618 /*
1750 * If there are no other threads in the group, or if there is 1619 * If there are no other threads in the group, or if there is
1751 * a group stop in progress and we are the last to stop, 1620 * a group stop in progress and we are the last to stop,
1752 * report to the parent. When ptraced, every thread reports itself. 1621 * report to the parent. When ptraced, every thread reports itself.
1753 */ 1622 */
1754 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) 1623 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1755 to_self = 1; 1624 read_lock(&tasklist_lock);
1756 else if (stop_count == 0) 1625 do_notify_parent_cldstop(current, CLD_STOPPED);
1757 to_self = 0; 1626 read_unlock(&tasklist_lock);
1758 else 1627 }
1759 goto out;
1760
1761 read_lock(&tasklist_lock);
1762 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1763 read_unlock(&tasklist_lock);
1764 1628
1765out:
1766 schedule(); 1629 schedule();
1767 /* 1630 /*
1768 * Now we don't run again until continued. 1631 * Now we don't run again until continued.
@@ -1776,12 +1639,10 @@ out:
1776 * Returns nonzero if we've actually stopped and released the siglock. 1639 * Returns nonzero if we've actually stopped and released the siglock.
1777 * Returns zero if we didn't stop and still hold the siglock. 1640 * Returns zero if we didn't stop and still hold the siglock.
1778 */ 1641 */
1779static int 1642static int do_signal_stop(int signr)
1780do_signal_stop(int signr)
1781{ 1643{
1782 struct signal_struct *sig = current->signal; 1644 struct signal_struct *sig = current->signal;
1783 struct sighand_struct *sighand = current->sighand; 1645 int stop_count;
1784 int stop_count = -1;
1785 1646
1786 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) 1647 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1787 return 0; 1648 return 0;
@@ -1791,86 +1652,37 @@ do_signal_stop(int signr)
1791 * There is a group stop in progress. We don't need to 1652 * There is a group stop in progress. We don't need to
1792 * start another one. 1653 * start another one.
1793 */ 1654 */
1794 signr = sig->group_exit_code;
1795 stop_count = --sig->group_stop_count; 1655 stop_count = --sig->group_stop_count;
1796 current->exit_code = signr; 1656 } else {
1797 set_current_state(TASK_STOPPED);
1798 if (stop_count == 0)
1799 sig->flags = SIGNAL_STOP_STOPPED;
1800 spin_unlock_irq(&sighand->siglock);
1801 }
1802 else if (thread_group_empty(current)) {
1803 /*
1804 * Lock must be held through transition to stopped state.
1805 */
1806 current->exit_code = current->signal->group_exit_code = signr;
1807 set_current_state(TASK_STOPPED);
1808 sig->flags = SIGNAL_STOP_STOPPED;
1809 spin_unlock_irq(&sighand->siglock);
1810 }
1811 else {
1812 /* 1657 /*
1813 * There is no group stop already in progress. 1658 * There is no group stop already in progress.
1814 * We must initiate one now, but that requires 1659 * We must initiate one now.
1815 * dropping siglock to get both the tasklist lock
1816 * and siglock again in the proper order. Note that
1817 * this allows an intervening SIGCONT to be posted.
1818 * We need to check for that and bail out if necessary.
1819 */ 1660 */
1820 struct task_struct *t; 1661 struct task_struct *t;
1821 1662
1822 spin_unlock_irq(&sighand->siglock); 1663 sig->group_exit_code = signr;
1823
1824 /* signals can be posted during this window */
1825 1664
1826 read_lock(&tasklist_lock); 1665 stop_count = 0;
1827 spin_lock_irq(&sighand->siglock); 1666 for (t = next_thread(current); t != current; t = next_thread(t))
1828
1829 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1830 /* 1667 /*
1831 * Another stop or continue happened while we 1668 * Setting state to TASK_STOPPED for a group
1832 * didn't have the lock. We can just swallow this 1669 * stop is always done with the siglock held,
1833 * signal now. If we raced with a SIGCONT, that 1670 * so this check has no races.
1834 * should have just cleared it now. If we raced
1835 * with another processor delivering a stop signal,
1836 * then the SIGCONT that wakes us up should clear it.
1837 */ 1671 */
1838 read_unlock(&tasklist_lock); 1672 if (!t->exit_state &&
1839 return 0; 1673 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1840 } 1674 stop_count++;
1841 1675 signal_wake_up(t, 0);
1842 if (sig->group_stop_count == 0) { 1676 }
1843 sig->group_exit_code = signr; 1677 sig->group_stop_count = stop_count;
1844 stop_count = 0;
1845 for (t = next_thread(current); t != current;
1846 t = next_thread(t))
1847 /*
1848 * Setting state to TASK_STOPPED for a group
1849 * stop is always done with the siglock held,
1850 * so this check has no races.
1851 */
1852 if (!t->exit_state &&
1853 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1854 stop_count++;
1855 signal_wake_up(t, 0);
1856 }
1857 sig->group_stop_count = stop_count;
1858 }
1859 else {
1860 /* A race with another thread while unlocked. */
1861 signr = sig->group_exit_code;
1862 stop_count = --sig->group_stop_count;
1863 }
1864
1865 current->exit_code = signr;
1866 set_current_state(TASK_STOPPED);
1867 if (stop_count == 0)
1868 sig->flags = SIGNAL_STOP_STOPPED;
1869
1870 spin_unlock_irq(&sighand->siglock);
1871 read_unlock(&tasklist_lock);
1872 } 1678 }
1873 1679
1680 if (stop_count == 0)
1681 sig->flags = SIGNAL_STOP_STOPPED;
1682 current->exit_code = sig->group_exit_code;
1683 __set_current_state(TASK_STOPPED);
1684
1685 spin_unlock_irq(&current->sighand->siglock);
1874 finish_stop(stop_count); 1686 finish_stop(stop_count);
1875 return 1; 1687 return 1;
1876} 1688}
@@ -1990,7 +1802,7 @@ relock:
1990 continue; 1802 continue;
1991 1803
1992 /* Init gets no signals it doesn't want. */ 1804 /* Init gets no signals it doesn't want. */
1993 if (current->pid == 1) 1805 if (current == child_reaper)
1994 continue; 1806 continue;
1995 1807
1996 if (sig_kernel_stop(signr)) { 1808 if (sig_kernel_stop(signr)) {
@@ -2430,8 +2242,7 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2430 return kill_proc_info(sig, &info, pid); 2242 return kill_proc_info(sig, &info, pid);
2431} 2243}
2432 2244
2433int 2245int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2434do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2435{ 2246{
2436 struct k_sigaction *k; 2247 struct k_sigaction *k;
2437 sigset_t mask; 2248 sigset_t mask;
@@ -2457,6 +2268,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2457 if (act) { 2268 if (act) {
2458 sigdelsetmask(&act->sa.sa_mask, 2269 sigdelsetmask(&act->sa.sa_mask,
2459 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2270 sigmask(SIGKILL) | sigmask(SIGSTOP));
2271 *k = *act;
2460 /* 2272 /*
2461 * POSIX 3.3.1.3: 2273 * POSIX 3.3.1.3:
2462 * "Setting a signal action to SIG_IGN for a signal that is 2274 * "Setting a signal action to SIG_IGN for a signal that is
@@ -2469,19 +2281,8 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2469 * be discarded, whether or not it is blocked" 2281 * be discarded, whether or not it is blocked"
2470 */ 2282 */
2471 if (act->sa.sa_handler == SIG_IGN || 2283 if (act->sa.sa_handler == SIG_IGN ||
2472 (act->sa.sa_handler == SIG_DFL && 2284 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2473 sig_kernel_ignore(sig))) {
2474 /*
2475 * This is a fairly rare case, so we only take the
2476 * tasklist_lock once we're sure we'll need it.
2477 * Now we must do this little unlock and relock
2478 * dance to maintain the lock hierarchy.
2479 */
2480 struct task_struct *t = current; 2285 struct task_struct *t = current;
2481 spin_unlock_irq(&t->sighand->siglock);
2482 read_lock(&tasklist_lock);
2483 spin_lock_irq(&t->sighand->siglock);
2484 *k = *act;
2485 sigemptyset(&mask); 2286 sigemptyset(&mask);
2486 sigaddset(&mask, sig); 2287 sigaddset(&mask, sig);
2487 rm_from_queue_full(&mask, &t->signal->shared_pending); 2288 rm_from_queue_full(&mask, &t->signal->shared_pending);
@@ -2490,12 +2291,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2490 recalc_sigpending_tsk(t); 2291 recalc_sigpending_tsk(t);
2491 t = next_thread(t); 2292 t = next_thread(t);
2492 } while (t != current); 2293 } while (t != current);
2493 spin_unlock_irq(&current->sighand->siglock);
2494 read_unlock(&tasklist_lock);
2495 return 0;
2496 } 2294 }
2497
2498 *k = *act;
2499 } 2295 }
2500 2296
2501 spin_unlock_irq(&current->sighand->siglock); 2297 spin_unlock_irq(&current->sighand->siglock);