diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-04-30 03:52:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-30 11:29:34 -0400 |
commit | 9e3bd6c3fb2334be171e69b432039cd18bce4458 (patch) | |
tree | 199963534d6379457d84e6f2ead2b104088182dd /kernel | |
parent | c5363d03637885310f1101b95cbbd26d067b4c8d (diff) |
signals: consolidate send_sigqueue and send_group_sigqueue
Both functions do the same thing after proper locking, but with
different sigpending structs, so move the common code into a helper.
After this we have 4 places that look very similar: send_sigqueue: calls
do_send_sigqueue and signal_wakeup send_group_sigqueue: calls
do_send_sigqueue and __group_complete_signal __group_send_sig_info:
calls send_signal and __group_complete_signal specific_send_sig_info:
calls send_signal and signal_wakeup
Besides, send_signal performs actions similar to do_send_sigqueue's
and __group_complete_signal - to signal_wakeup.
It looks like they can be consolidated gracefully.
Oleg said:
Personally, I think this change is very good. But send_sigqueue() and
send_group_sigqueue() have a very subtle difference which I was never able
to understand.
Let's suppose that sigqueue is already queued, and the signal is ignored
(the latter means we should re-schedule cpu timer or handle overrruns). In
that case send_sigqueue() returns 0, but send_group_sigqueue() returns 1.
I think this is not the problem (in fact, I think this patch makes the
behaviour more correct), but I hope Thomas can take a look and confirm.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/signal.c | 86 |
1 files changed, 29 insertions, 57 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 6610a95506b3..f9a52c721274 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1290,10 +1290,33 @@ void sigqueue_free(struct sigqueue *q) | |||
1290 | __sigqueue_free(q); | 1290 | __sigqueue_free(q); |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, | ||
1294 | struct sigpending *pending) | ||
1295 | { | ||
1296 | if (unlikely(!list_empty(&q->list))) { | ||
1297 | /* | ||
1298 | * If an SI_TIMER entry is already queue just increment | ||
1299 | * the overrun count. | ||
1300 | */ | ||
1301 | |||
1302 | BUG_ON(q->info.si_code != SI_TIMER); | ||
1303 | q->info.si_overrun++; | ||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | if (sig_ignored(t, sig)) | ||
1308 | return 1; | ||
1309 | |||
1310 | signalfd_notify(t, sig); | ||
1311 | list_add_tail(&q->list, &pending->list); | ||
1312 | sigaddset(&pending->signal, sig); | ||
1313 | return 0; | ||
1314 | } | ||
1315 | |||
1293 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | 1316 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
1294 | { | 1317 | { |
1295 | unsigned long flags; | 1318 | unsigned long flags; |
1296 | int ret = 0; | 1319 | int ret = -1; |
1297 | 1320 | ||
1298 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1321 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1299 | 1322 | ||
@@ -1307,37 +1330,14 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1307 | */ | 1330 | */ |
1308 | rcu_read_lock(); | 1331 | rcu_read_lock(); |
1309 | 1332 | ||
1310 | if (!likely(lock_task_sighand(p, &flags))) { | 1333 | if (!likely(lock_task_sighand(p, &flags))) |
1311 | ret = -1; | ||
1312 | goto out_err; | 1334 | goto out_err; |
1313 | } | ||
1314 | 1335 | ||
1315 | if (unlikely(!list_empty(&q->list))) { | 1336 | ret = do_send_sigqueue(sig, q, p, &p->pending); |
1316 | /* | ||
1317 | * If an SI_TIMER entry is already queue just increment | ||
1318 | * the overrun count. | ||
1319 | */ | ||
1320 | BUG_ON(q->info.si_code != SI_TIMER); | ||
1321 | q->info.si_overrun++; | ||
1322 | goto out; | ||
1323 | } | ||
1324 | /* Short-circuit ignored signals. */ | ||
1325 | if (sig_ignored(p, sig)) { | ||
1326 | ret = 1; | ||
1327 | goto out; | ||
1328 | } | ||
1329 | /* | ||
1330 | * Deliver the signal to listening signalfds. This must be called | ||
1331 | * with the sighand lock held. | ||
1332 | */ | ||
1333 | signalfd_notify(p, sig); | ||
1334 | 1337 | ||
1335 | list_add_tail(&q->list, &p->pending.list); | ||
1336 | sigaddset(&p->pending.signal, sig); | ||
1337 | if (!sigismember(&p->blocked, sig)) | 1338 | if (!sigismember(&p->blocked, sig)) |
1338 | signal_wake_up(p, sig == SIGKILL); | 1339 | signal_wake_up(p, sig == SIGKILL); |
1339 | 1340 | ||
1340 | out: | ||
1341 | unlock_task_sighand(p, &flags); | 1341 | unlock_task_sighand(p, &flags); |
1342 | out_err: | 1342 | out_err: |
1343 | rcu_read_unlock(); | 1343 | rcu_read_unlock(); |
@@ -1349,7 +1349,7 @@ int | |||
1349 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | 1349 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
1350 | { | 1350 | { |
1351 | unsigned long flags; | 1351 | unsigned long flags; |
1352 | int ret = 0; | 1352 | int ret; |
1353 | 1353 | ||
1354 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1354 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1355 | 1355 | ||
@@ -1358,38 +1358,10 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | |||
1358 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1358 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1359 | handle_stop_signal(sig, p); | 1359 | handle_stop_signal(sig, p); |
1360 | 1360 | ||
1361 | /* Short-circuit ignored signals. */ | 1361 | ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending); |
1362 | if (sig_ignored(p, sig)) { | ||
1363 | ret = 1; | ||
1364 | goto out; | ||
1365 | } | ||
1366 | |||
1367 | if (unlikely(!list_empty(&q->list))) { | ||
1368 | /* | ||
1369 | * If an SI_TIMER entry is already queue just increment | ||
1370 | * the overrun count. Other uses should not try to | ||
1371 | * send the signal multiple times. | ||
1372 | */ | ||
1373 | BUG_ON(q->info.si_code != SI_TIMER); | ||
1374 | q->info.si_overrun++; | ||
1375 | goto out; | ||
1376 | } | ||
1377 | /* | ||
1378 | * Deliver the signal to listening signalfds. This must be called | ||
1379 | * with the sighand lock held. | ||
1380 | */ | ||
1381 | signalfd_notify(p, sig); | ||
1382 | |||
1383 | /* | ||
1384 | * Put this signal on the shared-pending queue. | ||
1385 | * We always use the shared queue for process-wide signals, | ||
1386 | * to avoid several races. | ||
1387 | */ | ||
1388 | list_add_tail(&q->list, &p->signal->shared_pending.list); | ||
1389 | sigaddset(&p->signal->shared_pending.signal, sig); | ||
1390 | 1362 | ||
1391 | __group_complete_signal(sig, p); | 1363 | __group_complete_signal(sig, p); |
1392 | out: | 1364 | |
1393 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1365 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1394 | read_unlock(&tasklist_lock); | 1366 | read_unlock(&tasklist_lock); |
1395 | return ret; | 1367 | return ret; |