aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-03-28 19:11:30 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-28 21:36:44 -0500
commit547679087bc60277d11b11631d826895762c4505 (patch)
tree91759a5584b9d42302d4b54ecdde644bc47e781d /kernel/signal.c
parenta1d5e21e3e388fb2c16463d007e788b1e41b74f1 (diff)
[PATCH] send_sigqueue: simplify and fix the race
send_sigqueue() checks PF_EXITING, then locks p->sighand->siglock. This is unsafe: 'p' can exit in between and set ->sighand = NULL. The race is theoretical, the window is tiny and irqs are disabled by the caller, so I don't think we need the fix for -stable tree. Convert send_sigqueue() to use lock_task_sighand() helper. Also, delete 'p->flags & PF_EXITING' re-check, it is unneeded and the comment is wrong. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c41
1 files changed, 4 insertions, 37 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 0528a959daa9..4922928d91f6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1309,12 +1309,10 @@ void sigqueue_free(struct sigqueue *q)
1309 __sigqueue_free(q); 1309 __sigqueue_free(q);
1310} 1310}
1311 1311
1312int 1312int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1313send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1314{ 1313{
1315 unsigned long flags; 1314 unsigned long flags;
1316 int ret = 0; 1315 int ret = 0;
1317 struct sighand_struct *sh;
1318 1316
1319 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1317 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1320 1318
@@ -1328,48 +1326,17 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1328 */ 1326 */
1329 rcu_read_lock(); 1327 rcu_read_lock();
1330 1328
1331 if (unlikely(p->flags & PF_EXITING)) { 1329 if (!likely(lock_task_sighand(p, &flags))) {
1332 ret = -1; 1330 ret = -1;
1333 goto out_err; 1331 goto out_err;
1334 } 1332 }
1335 1333
1336retry:
1337 sh = rcu_dereference(p->sighand);
1338
1339 spin_lock_irqsave(&sh->siglock, flags);
1340 if (p->sighand != sh) {
1341 /* We raced with exec() in a multithreaded process... */
1342 spin_unlock_irqrestore(&sh->siglock, flags);
1343 goto retry;
1344 }
1345
1346 /*
1347 * We do the check here again to handle the following scenario:
1348 *
1349 * CPU 0 CPU 1
1350 * send_sigqueue
1351 * check PF_EXITING
1352 * interrupt exit code running
1353 * __exit_signal
1354 * lock sighand->siglock
1355 * unlock sighand->siglock
1356 * lock sh->siglock
1357 * add(tsk->pending) flush_sigqueue(tsk->pending)
1358 *
1359 */
1360
1361 if (unlikely(p->flags & PF_EXITING)) {
1362 ret = -1;
1363 goto out;
1364 }
1365
1366 if (unlikely(!list_empty(&q->list))) { 1334 if (unlikely(!list_empty(&q->list))) {
1367 /* 1335 /*
1368 * If an SI_TIMER entry is already queue just increment 1336 * If an SI_TIMER entry is already queue just increment
1369 * the overrun count. 1337 * the overrun count.
1370 */ 1338 */
1371 if (q->info.si_code != SI_TIMER) 1339 BUG_ON(q->info.si_code != SI_TIMER);
1372 BUG();
1373 q->info.si_overrun++; 1340 q->info.si_overrun++;
1374 goto out; 1341 goto out;
1375 } 1342 }
@@ -1385,7 +1352,7 @@ retry:
1385 signal_wake_up(p, sig == SIGKILL); 1352 signal_wake_up(p, sig == SIGKILL);
1386 1353
1387out: 1354out:
1388 spin_unlock_irqrestore(&sh->siglock, flags); 1355 unlock_task_sighand(p, &flags);
1389out_err: 1356out_err:
1390 rcu_read_unlock(); 1357 rcu_read_unlock();
1391 1358