diff options
| author | Oleg Nesterov <oleg@tv-sign.ru> | 2008-04-30 03:52:56 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-30 11:29:36 -0400 |
| commit | e62e6650e99a3dffcd0bf0d063cd818fbc13fa95 (patch) | |
| tree | 75f79d45eb89fb75014117b6fdc67bae1608f826 /kernel | |
| parent | 4cd4b6d4e0372075f846feb85aea016cbdbfec4c (diff) | |
signals: unify send_sigqueue/send_group_sigqueue completely
Suggested by Pavel Emelyanov.
send_sigqueue/send_group_sigqueue are only differ in how they lock ->siglock.
Unify them. send_group_sigqueue() uses spin_lock() because it knows the task
can't exit, but in that case lock_task_sighand() can't fail and doesn't hurt.
Note that the "sig" argument is ignored, it is always equal to ->si_signo.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/signal.c | 58 |
1 files changed, 21 insertions, 37 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 87424f7a4f3d..367c6662b12f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -1240,14 +1240,27 @@ void sigqueue_free(struct sigqueue *q) | |||
| 1240 | __sigqueue_free(q); | 1240 | __sigqueue_free(q); |
| 1241 | } | 1241 | } |
| 1242 | 1242 | ||
| 1243 | static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, | 1243 | static int do_send_sigqueue(struct sigqueue *q, struct task_struct *t, |
| 1244 | int group) | 1244 | int group) |
| 1245 | { | 1245 | { |
| 1246 | int sig = q->info.si_signo; | ||
| 1246 | struct sigpending *pending; | 1247 | struct sigpending *pending; |
| 1248 | unsigned long flags; | ||
| 1249 | int ret; | ||
| 1247 | 1250 | ||
| 1248 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1251 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
| 1252 | |||
| 1253 | ret = -1; | ||
| 1254 | if (!likely(lock_task_sighand(t, &flags))) | ||
| 1255 | goto ret; | ||
| 1256 | |||
| 1249 | handle_stop_signal(sig, t); | 1257 | handle_stop_signal(sig, t); |
| 1250 | 1258 | ||
| 1259 | ret = 1; | ||
| 1260 | if (sig_ignored(t, sig)) | ||
| 1261 | goto out; | ||
| 1262 | |||
| 1263 | ret = 0; | ||
| 1251 | if (unlikely(!list_empty(&q->list))) { | 1264 | if (unlikely(!list_empty(&q->list))) { |
| 1252 | /* | 1265 | /* |
| 1253 | * If an SI_TIMER entry is already queue just increment | 1266 | * If an SI_TIMER entry is already queue just increment |
| @@ -1256,58 +1269,29 @@ static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, | |||
| 1256 | 1269 | ||
| 1257 | BUG_ON(q->info.si_code != SI_TIMER); | 1270 | BUG_ON(q->info.si_code != SI_TIMER); |
| 1258 | q->info.si_overrun++; | 1271 | q->info.si_overrun++; |
| 1259 | return 0; | 1272 | goto out; |
| 1260 | } | 1273 | } |
| 1261 | 1274 | ||
| 1262 | if (sig_ignored(t, sig)) | ||
| 1263 | return 1; | ||
| 1264 | |||
| 1265 | signalfd_notify(t, sig); | 1275 | signalfd_notify(t, sig); |
| 1266 | pending = group ? &t->signal->shared_pending : &t->pending; | 1276 | pending = group ? &t->signal->shared_pending : &t->pending; |
| 1267 | list_add_tail(&q->list, &pending->list); | 1277 | list_add_tail(&q->list, &pending->list); |
| 1268 | sigaddset(&pending->signal, sig); | 1278 | sigaddset(&pending->signal, sig); |
| 1269 | complete_signal(sig, t, group); | 1279 | complete_signal(sig, t, group); |
| 1270 | 1280 | out: | |
| 1271 | return 0; | 1281 | unlock_task_sighand(t, &flags); |
| 1282 | ret: | ||
| 1283 | return ret; | ||
| 1272 | } | 1284 | } |
| 1273 | 1285 | ||
| 1274 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | 1286 | int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
| 1275 | { | 1287 | { |
| 1276 | unsigned long flags; | 1288 | return do_send_sigqueue(q, p, 0); |
| 1277 | int ret = -1; | ||
| 1278 | |||
| 1279 | /* | ||
| 1280 | * The rcu based delayed sighand destroy makes it possible to | ||
| 1281 | * run this without tasklist lock held. The task struct itself | ||
| 1282 | * cannot go away as create_timer did get_task_struct(). | ||
| 1283 | * | ||
| 1284 | * We return -1, when the task is marked exiting, so | ||
| 1285 | * posix_timer_event can redirect it to the group leader | ||
| 1286 | */ | ||
| 1287 | if (!likely(lock_task_sighand(p, &flags))) | ||
| 1288 | goto out_err; | ||
| 1289 | |||
| 1290 | ret = do_send_sigqueue(sig, q, p, 0); | ||
| 1291 | |||
| 1292 | unlock_task_sighand(p, &flags); | ||
| 1293 | out_err: | ||
| 1294 | return ret; | ||
| 1295 | } | 1289 | } |
| 1296 | 1290 | ||
| 1297 | int | 1291 | int |
| 1298 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) | 1292 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
| 1299 | { | 1293 | { |
| 1300 | unsigned long flags; | 1294 | return do_send_sigqueue(q, p, 1); |
| 1301 | int ret; | ||
| 1302 | |||
| 1303 | /* Since it_lock is held, p->sighand cannot be NULL. */ | ||
| 1304 | spin_lock_irqsave(&p->sighand->siglock, flags); | ||
| 1305 | |||
| 1306 | ret = do_send_sigqueue(sig, q, p, 1); | ||
| 1307 | |||
| 1308 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | ||
| 1309 | |||
| 1310 | return ret; | ||
| 1311 | } | 1295 | } |
| 1312 | 1296 | ||
| 1313 | /* | 1297 | /* |
