aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/signal.c86
1 files changed, 29 insertions, 57 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 6610a95506b3..f9a52c721274 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1290,10 +1290,33 @@ void sigqueue_free(struct sigqueue *q)
1290 __sigqueue_free(q); 1290 __sigqueue_free(q);
1291} 1291}
1292 1292
1293static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t,
1294 struct sigpending *pending)
1295{
1296 if (unlikely(!list_empty(&q->list))) {
1297 /*
1298 * If an SI_TIMER entry is already queue just increment
1299 * the overrun count.
1300 */
1301
1302 BUG_ON(q->info.si_code != SI_TIMER);
1303 q->info.si_overrun++;
1304 return 0;
1305 }
1306
1307 if (sig_ignored(t, sig))
1308 return 1;
1309
1310 signalfd_notify(t, sig);
1311 list_add_tail(&q->list, &pending->list);
1312 sigaddset(&pending->signal, sig);
1313 return 0;
1314}
1315
1293int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1316int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1294{ 1317{
1295 unsigned long flags; 1318 unsigned long flags;
1296 int ret = 0; 1319 int ret = -1;
1297 1320
1298 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1321 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1299 1322
@@ -1307,37 +1330,14 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1307 */ 1330 */
1308 rcu_read_lock(); 1331 rcu_read_lock();
1309 1332
1310 if (!likely(lock_task_sighand(p, &flags))) { 1333 if (!likely(lock_task_sighand(p, &flags)))
1311 ret = -1;
1312 goto out_err; 1334 goto out_err;
1313 }
1314 1335
1315 if (unlikely(!list_empty(&q->list))) { 1336 ret = do_send_sigqueue(sig, q, p, &p->pending);
1316 /*
1317 * If an SI_TIMER entry is already queue just increment
1318 * the overrun count.
1319 */
1320 BUG_ON(q->info.si_code != SI_TIMER);
1321 q->info.si_overrun++;
1322 goto out;
1323 }
1324 /* Short-circuit ignored signals. */
1325 if (sig_ignored(p, sig)) {
1326 ret = 1;
1327 goto out;
1328 }
1329 /*
1330 * Deliver the signal to listening signalfds. This must be called
1331 * with the sighand lock held.
1332 */
1333 signalfd_notify(p, sig);
1334 1337
1335 list_add_tail(&q->list, &p->pending.list);
1336 sigaddset(&p->pending.signal, sig);
1337 if (!sigismember(&p->blocked, sig)) 1338 if (!sigismember(&p->blocked, sig))
1338 signal_wake_up(p, sig == SIGKILL); 1339 signal_wake_up(p, sig == SIGKILL);
1339 1340
1340out:
1341 unlock_task_sighand(p, &flags); 1341 unlock_task_sighand(p, &flags);
1342out_err: 1342out_err:
1343 rcu_read_unlock(); 1343 rcu_read_unlock();
@@ -1349,7 +1349,7 @@ int
1349send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) 1349send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1350{ 1350{
1351 unsigned long flags; 1351 unsigned long flags;
1352 int ret = 0; 1352 int ret;
1353 1353
1354 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1354 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1355 1355
@@ -1358,38 +1358,10 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1358 spin_lock_irqsave(&p->sighand->siglock, flags); 1358 spin_lock_irqsave(&p->sighand->siglock, flags);
1359 handle_stop_signal(sig, p); 1359 handle_stop_signal(sig, p);
1360 1360
1361 /* Short-circuit ignored signals. */ 1361 ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending);
1362 if (sig_ignored(p, sig)) {
1363 ret = 1;
1364 goto out;
1365 }
1366
1367 if (unlikely(!list_empty(&q->list))) {
1368 /*
1369 * If an SI_TIMER entry is already queue just increment
1370 * the overrun count. Other uses should not try to
1371 * send the signal multiple times.
1372 */
1373 BUG_ON(q->info.si_code != SI_TIMER);
1374 q->info.si_overrun++;
1375 goto out;
1376 }
1377 /*
1378 * Deliver the signal to listening signalfds. This must be called
1379 * with the sighand lock held.
1380 */
1381 signalfd_notify(p, sig);
1382
1383 /*
1384 * Put this signal on the shared-pending queue.
1385 * We always use the shared queue for process-wide signals,
1386 * to avoid several races.
1387 */
1388 list_add_tail(&q->list, &p->signal->shared_pending.list);
1389 sigaddset(&p->signal->shared_pending.signal, sig);
1390 1362
1391 __group_complete_signal(sig, p); 1363 __group_complete_signal(sig, p);
1392out: 1364
1393 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1365 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1394 read_unlock(&tasklist_lock); 1366 read_unlock(&tasklist_lock);
1395 return ret; 1367 return ret;