diff options
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 150 |
1 files changed, 53 insertions, 97 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index f2b96b08fb44..1bf3c39d6109 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -277,7 +277,6 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
277 | } else { | 277 | } else { |
278 | INIT_LIST_HEAD(&q->list); | 278 | INIT_LIST_HEAD(&q->list); |
279 | q->flags = 0; | 279 | q->flags = 0; |
280 | q->lock = NULL; | ||
281 | q->user = get_uid(t->user); | 280 | q->user = get_uid(t->user); |
282 | } | 281 | } |
283 | return(q); | 282 | return(q); |
@@ -406,6 +405,8 @@ void __exit_signal(struct task_struct *tsk) | |||
406 | 405 | ||
407 | void exit_signal(struct task_struct *tsk) | 406 | void exit_signal(struct task_struct *tsk) |
408 | { | 407 | { |
408 | atomic_dec(&tsk->signal->live); | ||
409 | |||
409 | write_lock_irq(&tasklist_lock); | 410 | write_lock_irq(&tasklist_lock); |
410 | __exit_signal(tsk); | 411 | __exit_signal(tsk); |
411 | write_unlock_irq(&tasklist_lock); | 412 | write_unlock_irq(&tasklist_lock); |
@@ -650,8 +651,7 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
650 | if (!valid_signal(sig)) | 651 | if (!valid_signal(sig)) |
651 | return error; | 652 | return error; |
652 | error = -EPERM; | 653 | error = -EPERM; |
653 | if ((!info || ((unsigned long)info != 1 && | 654 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) |
654 | (unsigned long)info != 2 && SI_FROMUSER(info))) | ||
655 | && ((sig != SIGCONT) || | 655 | && ((sig != SIGCONT) || |
656 | (current->signal->session != t->signal->session)) | 656 | (current->signal->session != t->signal->session)) |
657 | && (current->euid ^ t->suid) && (current->euid ^ t->uid) | 657 | && (current->euid ^ t->suid) && (current->euid ^ t->uid) |
@@ -788,7 +788,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
788 | * fast-pathed signals for kernel-internal things like SIGSTOP | 788 | * fast-pathed signals for kernel-internal things like SIGSTOP |
789 | * or SIGKILL. | 789 | * or SIGKILL. |
790 | */ | 790 | */ |
791 | if ((unsigned long)info == 2) | 791 | if (info == SEND_SIG_FORCED) |
792 | goto out_set; | 792 | goto out_set; |
793 | 793 | ||
794 | /* Real-time signals must be queued if sent by sigqueue, or | 794 | /* Real-time signals must be queued if sent by sigqueue, or |
@@ -800,19 +800,19 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
800 | pass on the info struct. */ | 800 | pass on the info struct. */ |
801 | 801 | ||
802 | q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && | 802 | q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && |
803 | ((unsigned long) info < 2 || | 803 | (is_si_special(info) || |
804 | info->si_code >= 0))); | 804 | info->si_code >= 0))); |
805 | if (q) { | 805 | if (q) { |
806 | list_add_tail(&q->list, &signals->list); | 806 | list_add_tail(&q->list, &signals->list); |
807 | switch ((unsigned long) info) { | 807 | switch ((unsigned long) info) { |
808 | case 0: | 808 | case (unsigned long) SEND_SIG_NOINFO: |
809 | q->info.si_signo = sig; | 809 | q->info.si_signo = sig; |
810 | q->info.si_errno = 0; | 810 | q->info.si_errno = 0; |
811 | q->info.si_code = SI_USER; | 811 | q->info.si_code = SI_USER; |
812 | q->info.si_pid = current->pid; | 812 | q->info.si_pid = current->pid; |
813 | q->info.si_uid = current->uid; | 813 | q->info.si_uid = current->uid; |
814 | break; | 814 | break; |
815 | case 1: | 815 | case (unsigned long) SEND_SIG_PRIV: |
816 | q->info.si_signo = sig; | 816 | q->info.si_signo = sig; |
817 | q->info.si_errno = 0; | 817 | q->info.si_errno = 0; |
818 | q->info.si_code = SI_KERNEL; | 818 | q->info.si_code = SI_KERNEL; |
@@ -823,20 +823,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
823 | copy_siginfo(&q->info, info); | 823 | copy_siginfo(&q->info, info); |
824 | break; | 824 | break; |
825 | } | 825 | } |
826 | } else { | 826 | } else if (!is_si_special(info)) { |
827 | if (sig >= SIGRTMIN && info && (unsigned long)info != 1 | 827 | if (sig >= SIGRTMIN && info->si_code != SI_USER) |
828 | && info->si_code != SI_USER) | ||
829 | /* | 828 | /* |
830 | * Queue overflow, abort. We may abort if the signal was rt | 829 | * Queue overflow, abort. We may abort if the signal was rt |
831 | * and sent by user using something other than kill(). | 830 | * and sent by user using something other than kill(). |
832 | */ | 831 | */ |
833 | return -EAGAIN; | 832 | return -EAGAIN; |
834 | if (((unsigned long)info > 1) && (info->si_code == SI_TIMER)) | ||
835 | /* | ||
836 | * Set up a return to indicate that we dropped | ||
837 | * the signal. | ||
838 | */ | ||
839 | ret = info->si_sys_private; | ||
840 | } | 833 | } |
841 | 834 | ||
842 | out_set: | 835 | out_set: |
@@ -857,12 +850,6 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |||
857 | BUG(); | 850 | BUG(); |
858 | assert_spin_locked(&t->sighand->siglock); | 851 | assert_spin_locked(&t->sighand->siglock); |
859 | 852 | ||
860 | if (((unsigned long)info > 2) && (info->si_code == SI_TIMER)) | ||
861 | /* | ||
862 | * Set up a return to indicate that we dropped the signal. | ||
863 | */ | ||
864 | ret = info->si_sys_private; | ||
865 | |||
866 | /* Short-circuit ignored signals. */ | 853 | /* Short-circuit ignored signals. */ |
867 | if (sig_ignored(t, sig)) | 854 | if (sig_ignored(t, sig)) |
868 | goto out; | 855 | goto out; |
@@ -892,11 +879,13 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |||
892 | int ret; | 879 | int ret; |
893 | 880 | ||
894 | spin_lock_irqsave(&t->sighand->siglock, flags); | 881 | spin_lock_irqsave(&t->sighand->siglock, flags); |
895 | if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { | 882 | if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { |
896 | t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; | 883 | t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; |
884 | } | ||
885 | if (sigismember(&t->blocked, sig)) { | ||
897 | sigdelset(&t->blocked, sig); | 886 | sigdelset(&t->blocked, sig); |
898 | recalc_sigpending_tsk(t); | ||
899 | } | 887 | } |
888 | recalc_sigpending_tsk(t); | ||
900 | ret = specific_send_sig_info(sig, info, t); | 889 | ret = specific_send_sig_info(sig, info, t); |
901 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | 890 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
902 | 891 | ||
@@ -906,15 +895,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |||
906 | void | 895 | void |
907 | force_sig_specific(int sig, struct task_struct *t) | 896 | force_sig_specific(int sig, struct task_struct *t) |
908 | { | 897 | { |
909 | unsigned long int flags; | 898 | force_sig_info(sig, SEND_SIG_FORCED, t); |
910 | |||
911 | spin_lock_irqsave(&t->sighand->siglock, flags); | ||
912 | if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) | ||
913 | t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; | ||
914 | sigdelset(&t->blocked, sig); | ||
915 | recalc_sigpending_tsk(t); | ||
916 | specific_send_sig_info(sig, (void *)2, t); | ||
917 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | ||
918 | } | 899 | } |
919 | 900 | ||
920 | /* | 901 | /* |
@@ -1049,12 +1030,6 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |||
1049 | assert_spin_locked(&p->sighand->siglock); | 1030 | assert_spin_locked(&p->sighand->siglock); |
1050 | handle_stop_signal(sig, p); | 1031 | handle_stop_signal(sig, p); |
1051 | 1032 | ||
1052 | if (((unsigned long)info > 2) && (info->si_code == SI_TIMER)) | ||
1053 | /* | ||
1054 | * Set up a return to indicate that we dropped the signal. | ||
1055 | */ | ||
1056 | ret = info->si_sys_private; | ||
1057 | |||
1058 | /* Short-circuit ignored signals. */ | 1033 | /* Short-circuit ignored signals. */ |
1059 | if (sig_ignored(p, sig)) | 1034 | if (sig_ignored(p, sig)) |
1060 | return ret; | 1035 | return ret; |
@@ -1107,8 +1082,8 @@ void zap_other_threads(struct task_struct *p) | |||
1107 | if (t != p->group_leader) | 1082 | if (t != p->group_leader) |
1108 | t->exit_signal = -1; | 1083 | t->exit_signal = -1; |
1109 | 1084 | ||
1085 | /* SIGKILL will be handled before any pending SIGSTOP */ | ||
1110 | sigaddset(&t->pending.signal, SIGKILL); | 1086 | sigaddset(&t->pending.signal, SIGKILL); |
1111 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | ||
1112 | signal_wake_up(t, 1); | 1087 | signal_wake_up(t, 1); |
1113 | } | 1088 | } |
1114 | } | 1089 | } |
@@ -1284,10 +1259,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |||
1284 | return ret; | 1259 | return ret; |
1285 | } | 1260 | } |
1286 | 1261 | ||
1262 | #define __si_special(priv) \ | ||
1263 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | ||
1264 | |||
1287 | int | 1265 | int |
1288 | send_sig(int sig, struct task_struct *p, int priv) | 1266 | send_sig(int sig, struct task_struct *p, int priv) |
1289 | { | 1267 | { |
1290 | return send_sig_info(sig, (void*)(long)(priv != 0), p); | 1268 | return send_sig_info(sig, __si_special(priv), p); |
1291 | } | 1269 | } |
1292 | 1270 | ||
1293 | /* | 1271 | /* |
@@ -1307,7 +1285,7 @@ send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |||
1307 | void | 1285 | void |
1308 | force_sig(int sig, struct task_struct *p) | 1286 | force_sig(int sig, struct task_struct *p) |
1309 | { | 1287 | { |
1310 | force_sig_info(sig, (void*)1L, p); | 1288 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1311 | } | 1289 | } |
1312 | 1290 | ||
1313 | /* | 1291 | /* |
@@ -1332,13 +1310,13 @@ force_sigsegv(int sig, struct task_struct *p) | |||
1332 | int | 1310 | int |
1333 | kill_pg(pid_t pgrp, int sig, int priv) | 1311 | kill_pg(pid_t pgrp, int sig, int priv) |
1334 | { | 1312 | { |
1335 | return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp); | 1313 | return kill_pg_info(sig, __si_special(priv), pgrp); |
1336 | } | 1314 | } |
1337 | 1315 | ||
1338 | int | 1316 | int |
1339 | kill_proc(pid_t pid, int sig, int priv) | 1317 | kill_proc(pid_t pid, int sig, int priv) |
1340 | { | 1318 | { |
1341 | return kill_proc_info(sig, (void *)(long)(priv != 0), pid); | 1319 | return kill_proc_info(sig, __si_special(priv), pid); |
1342 | } | 1320 | } |
1343 | 1321 | ||
1344 | /* | 1322 | /* |
@@ -1369,11 +1347,12 @@ void sigqueue_free(struct sigqueue *q) | |||
1369 | * pending queue. | 1347 | * pending queue. |
1370 | */ | 1348 | */ |
1371 | if (unlikely(!list_empty(&q->list))) { | 1349 | if (unlikely(!list_empty(&q->list))) { |
1372 | read_lock(&tasklist_lock); | 1350 | spinlock_t *lock = ¤t->sighand->siglock; |
1373 | spin_lock_irqsave(q->lock, flags); | 1351 | read_lock(&tasklist_lock); |
1352 | spin_lock_irqsave(lock, flags); | ||
1374 | if (!list_empty(&q->list)) | 1353 | if (!list_empty(&q->list)) |
1375 | list_del_init(&q->list); | 1354 | list_del_init(&q->list); |
1376 | spin_unlock_irqrestore(q->lock, flags); | 1355 | spin_unlock_irqrestore(lock, flags); |
1377 | read_unlock(&tasklist_lock); | 1356 | read_unlock(&tasklist_lock); |
1378 | } | 1357 | } |
1379 | q->flags &= ~SIGQUEUE_PREALLOC; | 1358 | q->flags &= ~SIGQUEUE_PREALLOC; |
@@ -1412,7 +1391,6 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1412 | goto out; | 1391 | goto out; |
1413 | } | 1392 | } |
1414 | 1393 | ||
1415 | q->lock = &p->sighand->siglock; | ||
1416 | list_add_tail(&q->list, &p->pending.list); | 1394 | list_add_tail(&q->list, &p->pending.list); |
1417 | sigaddset(&p->pending.signal, sig); | 1395 | sigaddset(&p->pending.signal, sig); |
1418 | if (!sigismember(&p->blocked, sig)) | 1396 | if (!sigismember(&p->blocked, sig)) |
@@ -1460,7 +1438,6 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1460 | * We always use the shared queue for process-wide signals, | 1438 | * We always use the shared queue for process-wide signals, |
1461 | * to avoid several races. | 1439 | * to avoid several races. |
1462 | */ | 1440 | */ |
1463 | q->lock = &p->sighand->siglock; | ||
1464 | list_add_tail(&q->list, &p->signal->shared_pending.list); | 1441 | list_add_tail(&q->list, &p->signal->shared_pending.list); |
1465 | sigaddset(&p->signal->shared_pending.signal, sig); | 1442 | sigaddset(&p->signal->shared_pending.signal, sig); |
1466 | 1443 | ||
@@ -1879,9 +1856,9 @@ relock: | |||
1879 | /* Let the debugger run. */ | 1856 | /* Let the debugger run. */ |
1880 | ptrace_stop(signr, signr, info); | 1857 | ptrace_stop(signr, signr, info); |
1881 | 1858 | ||
1882 | /* We're back. Did the debugger cancel the sig? */ | 1859 | /* We're back. Did the debugger cancel the sig or group_exit? */ |
1883 | signr = current->exit_code; | 1860 | signr = current->exit_code; |
1884 | if (signr == 0) | 1861 | if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT) |
1885 | continue; | 1862 | continue; |
1886 | 1863 | ||
1887 | current->exit_code = 0; | 1864 | current->exit_code = 0; |
@@ -2283,26 +2260,13 @@ sys_kill(int pid, int sig) | |||
2283 | return kill_something_info(sig, &info, pid); | 2260 | return kill_something_info(sig, &info, pid); |
2284 | } | 2261 | } |
2285 | 2262 | ||
2286 | /** | 2263 | static int do_tkill(int tgid, int pid, int sig) |
2287 | * sys_tgkill - send signal to one specific thread | ||
2288 | * @tgid: the thread group ID of the thread | ||
2289 | * @pid: the PID of the thread | ||
2290 | * @sig: signal to be sent | ||
2291 | * | ||
2292 | * This syscall also checks the tgid and returns -ESRCH even if the PID | ||
2293 | * exists but it's not belonging to the target process anymore. This | ||
2294 | * method solves the problem of threads exiting and PIDs getting reused. | ||
2295 | */ | ||
2296 | asmlinkage long sys_tgkill(int tgid, int pid, int sig) | ||
2297 | { | 2264 | { |
2298 | struct siginfo info; | ||
2299 | int error; | 2265 | int error; |
2266 | struct siginfo info; | ||
2300 | struct task_struct *p; | 2267 | struct task_struct *p; |
2301 | 2268 | ||
2302 | /* This is only valid for single tasks */ | 2269 | error = -ESRCH; |
2303 | if (pid <= 0 || tgid <= 0) | ||
2304 | return -EINVAL; | ||
2305 | |||
2306 | info.si_signo = sig; | 2270 | info.si_signo = sig; |
2307 | info.si_errno = 0; | 2271 | info.si_errno = 0; |
2308 | info.si_code = SI_TKILL; | 2272 | info.si_code = SI_TKILL; |
@@ -2311,8 +2275,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) | |||
2311 | 2275 | ||
2312 | read_lock(&tasklist_lock); | 2276 | read_lock(&tasklist_lock); |
2313 | p = find_task_by_pid(pid); | 2277 | p = find_task_by_pid(pid); |
2314 | error = -ESRCH; | 2278 | if (p && (tgid <= 0 || p->tgid == tgid)) { |
2315 | if (p && (p->tgid == tgid)) { | ||
2316 | error = check_kill_permission(sig, &info, p); | 2279 | error = check_kill_permission(sig, &info, p); |
2317 | /* | 2280 | /* |
2318 | * The null signal is a permissions and process existence | 2281 | * The null signal is a permissions and process existence |
@@ -2326,47 +2289,40 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) | |||
2326 | } | 2289 | } |
2327 | } | 2290 | } |
2328 | read_unlock(&tasklist_lock); | 2291 | read_unlock(&tasklist_lock); |
2292 | |||
2329 | return error; | 2293 | return error; |
2330 | } | 2294 | } |
2331 | 2295 | ||
2296 | /** | ||
2297 | * sys_tgkill - send signal to one specific thread | ||
2298 | * @tgid: the thread group ID of the thread | ||
2299 | * @pid: the PID of the thread | ||
2300 | * @sig: signal to be sent | ||
2301 | * | ||
2302 | * This syscall also checks the tgid and returns -ESRCH even if the PID | ||
2303 | * exists but it's not belonging to the target process anymore. This | ||
2304 | * method solves the problem of threads exiting and PIDs getting reused. | ||
2305 | */ | ||
2306 | asmlinkage long sys_tgkill(int tgid, int pid, int sig) | ||
2307 | { | ||
2308 | /* This is only valid for single tasks */ | ||
2309 | if (pid <= 0 || tgid <= 0) | ||
2310 | return -EINVAL; | ||
2311 | |||
2312 | return do_tkill(tgid, pid, sig); | ||
2313 | } | ||
2314 | |||
2332 | /* | 2315 | /* |
2333 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | 2316 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2334 | */ | 2317 | */ |
2335 | asmlinkage long | 2318 | asmlinkage long |
2336 | sys_tkill(int pid, int sig) | 2319 | sys_tkill(int pid, int sig) |
2337 | { | 2320 | { |
2338 | struct siginfo info; | ||
2339 | int error; | ||
2340 | struct task_struct *p; | ||
2341 | |||
2342 | /* This is only valid for single tasks */ | 2321 | /* This is only valid for single tasks */ |
2343 | if (pid <= 0) | 2322 | if (pid <= 0) |
2344 | return -EINVAL; | 2323 | return -EINVAL; |
2345 | 2324 | ||
2346 | info.si_signo = sig; | 2325 | return do_tkill(0, pid, sig); |
2347 | info.si_errno = 0; | ||
2348 | info.si_code = SI_TKILL; | ||
2349 | info.si_pid = current->tgid; | ||
2350 | info.si_uid = current->uid; | ||
2351 | |||
2352 | read_lock(&tasklist_lock); | ||
2353 | p = find_task_by_pid(pid); | ||
2354 | error = -ESRCH; | ||
2355 | if (p) { | ||
2356 | error = check_kill_permission(sig, &info, p); | ||
2357 | /* | ||
2358 | * The null signal is a permissions and process existence | ||
2359 | * probe. No signal is actually delivered. | ||
2360 | */ | ||
2361 | if (!error && sig && p->sighand) { | ||
2362 | spin_lock_irq(&p->sighand->siglock); | ||
2363 | handle_stop_signal(sig, p); | ||
2364 | error = specific_send_sig_info(sig, &info, p); | ||
2365 | spin_unlock_irq(&p->sighand->siglock); | ||
2366 | } | ||
2367 | } | ||
2368 | read_unlock(&tasklist_lock); | ||
2369 | return error; | ||
2370 | } | 2326 | } |
2371 | 2327 | ||
2372 | asmlinkage long | 2328 | asmlinkage long |