diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-04-30 03:52:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-30 11:29:36 -0400 |
commit | 4cd4b6d4e0372075f846feb85aea016cbdbfec4c (patch) | |
tree | 219dd1d4de6313bcc9dd232fe3186423875bea5e /kernel | |
parent | 5fcd835bf8c2cde06404559b1904e2f1dfcb4567 (diff) |
signals: fold complete_signal() into send_signal/do_send_sigqueue
Factor out complete_signal() callsites. This change completely unifies the
helpers sending the specific/group signals.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/signal.c | 46 |
1 files changed, 11 insertions, 35 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index fc1cb03c241c..87424f7a4f3d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -826,7 +826,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
826 | 826 | ||
827 | out_set: | 827 | out_set: |
828 | sigaddset(&pending->signal, sig); | 828 | sigaddset(&pending->signal, sig); |
829 | return 1; | 829 | complete_signal(sig, t, group); |
830 | return 0; | ||
830 | } | 831 | } |
831 | 832 | ||
832 | int print_fatal_signals; | 833 | int print_fatal_signals; |
@@ -861,17 +862,16 @@ static int __init setup_print_fatal_signals(char *str) | |||
861 | 862 | ||
862 | __setup("print-fatal-signals=", setup_print_fatal_signals); | 863 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
863 | 864 | ||
865 | int | ||
866 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | ||
867 | { | ||
868 | return send_signal(sig, info, p, 1); | ||
869 | } | ||
870 | |||
864 | static int | 871 | static int |
865 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | 872 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
866 | { | 873 | { |
867 | int ret; | 874 | return send_signal(sig, info, t, 0); |
868 | |||
869 | ret = send_signal(sig, info, t, 0); | ||
870 | if (ret <= 0) | ||
871 | return ret; | ||
872 | |||
873 | complete_signal(sig, t, 0); | ||
874 | return 0; | ||
875 | } | 875 | } |
876 | 876 | ||
877 | /* | 877 | /* |
@@ -914,24 +914,6 @@ force_sig_specific(int sig, struct task_struct *t) | |||
914 | force_sig_info(sig, SEND_SIG_FORCED, t); | 914 | force_sig_info(sig, SEND_SIG_FORCED, t); |
915 | } | 915 | } |
916 | 916 | ||
917 | int | ||
918 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | ||
919 | { | ||
920 | int ret; | ||
921 | |||
922 | /* | ||
923 | * Put this signal on the shared-pending queue, or fail with EAGAIN. | ||
924 | * We always use the shared queue for process-wide signals, | ||
925 | * to avoid several races. | ||
926 | */ | ||
927 | ret = send_signal(sig, info, p, 1); | ||
928 | if (ret <= 0) | ||
929 | return ret; | ||
930 | |||
931 | complete_signal(sig, p, 1); | ||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | /* | 917 | /* |
936 | * Nuke all other threads in the group. | 918 | * Nuke all other threads in the group. |
937 | */ | 919 | */ |
@@ -1263,6 +1245,7 @@ static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, | |||
1263 | { | 1245 | { |
1264 | struct sigpending *pending; | 1246 | struct sigpending *pending; |
1265 | 1247 | ||
1248 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | ||
1266 | handle_stop_signal(sig, t); | 1249 | handle_stop_signal(sig, t); |
1267 | 1250 | ||
1268 | if (unlikely(!list_empty(&q->list))) { | 1251 | if (unlikely(!list_empty(&q->list))) { |
@@ -1283,6 +1266,7 @@ static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, | |||
1283 | pending = group ? &t->signal->shared_pending : &t->pending; | 1266 | pending = group ? &t->signal->shared_pending : &t->pending; |
1284 | list_add_tail(&q->list, &pending->list); | 1267 | list_add_tail(&q->list, &pending->list); |
1285 | sigaddset(&pending->signal, sig); | 1268 | sigaddset(&pending->signal, sig); |
1269 | complete_signal(sig, t, group); | ||
1286 | 1270 | ||
1287 | return 0; | 1271 | return 0; |
1288 | } | 1272 | } |
@@ -1292,8 +1276,6 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1292 | unsigned long flags; | 1276 | unsigned long flags; |
1293 | int ret = -1; | 1277 | int ret = -1; |
1294 | 1278 | ||
1295 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | ||
1296 | |||
1297 | /* | 1279 | /* |
1298 | * The rcu based delayed sighand destroy makes it possible to | 1280 | * The rcu based delayed sighand destroy makes it possible to |
1299 | * run this without tasklist lock held. The task struct itself | 1281 | * run this without tasklist lock held. The task struct itself |
@@ -1307,8 +1289,6 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1307 | 1289 | ||
1308 | ret = do_send_sigqueue(sig, q, p, 0); | 1290 | ret = do_send_sigqueue(sig, q, p, 0); |
1309 | 1291 | ||
1310 | complete_signal(sig, p, 0); | ||
1311 | |||
1312 | unlock_task_sighand(p, &flags); | 1292 | unlock_task_sighand(p, &flags); |
1313 | out_err: | 1293 | out_err: |
1314 | return ret; | 1294 | return ret; |
@@ -1320,15 +1300,11 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1320 | unsigned long flags; | 1300 | unsigned long flags; |
1321 | int ret; | 1301 | int ret; |
1322 | 1302 | ||
1323 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | ||
1324 | |||
1325 | /* Since it_lock is held, p->sighand cannot be NULL. */ | 1303 | /* Since it_lock is held, p->sighand cannot be NULL. */ |
1326 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1304 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1327 | 1305 | ||
1328 | ret = do_send_sigqueue(sig, q, p, 1); | 1306 | ret = do_send_sigqueue(sig, q, p, 1); |
1329 | 1307 | ||
1330 | complete_signal(sig, p, 1); | ||
1331 | |||
1332 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1308 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1333 | 1309 | ||
1334 | return ret; | 1310 | return ret; |