diff options
Diffstat (limited to 'kernel/signal.c')
-rw-r--r-- | kernel/signal.c | 98 |
1 files changed, 86 insertions, 12 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 934ae5e687b9..9dda83b9db01 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -206,13 +206,47 @@ static inline void print_dropped_signal(int sig) | |||
206 | current->comm, current->pid, sig); | 206 | current->comm, current->pid, sig); |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef __HAVE_ARCH_CMPXCHG | ||
210 | static inline struct sigqueue *get_task_cache(struct task_struct *t) | ||
211 | { | ||
212 | struct sigqueue *q = t->sigqueue_cache; | ||
213 | |||
214 | if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) | ||
215 | return NULL; | ||
216 | |||
217 | return q; | ||
218 | } | ||
219 | |||
220 | static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) | ||
221 | { | ||
222 | if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) | ||
223 | return 0; | ||
224 | |||
225 | return 1; | ||
226 | } | ||
227 | |||
228 | #else | ||
229 | |||
230 | static inline struct sigqueue *get_task_cache(struct task_struct *t) | ||
231 | { | ||
232 | return NULL; | ||
233 | } | ||
234 | |||
235 | static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) | ||
236 | { | ||
237 | return 1; | ||
238 | } | ||
239 | |||
240 | #endif | ||
241 | |||
209 | /* | 242 | /* |
210 | * allocate a new signal queue record | 243 | * allocate a new signal queue record |
211 | * - this may be called without locks if and only if t == current, otherwise an | 244 | * - this may be called without locks if and only if t == current, otherwise an |
212 | * appopriate lock must be held to stop the target task from exiting | 245 | * appopriate lock must be held to stop the target task from exiting |
213 | */ | 246 | */ |
214 | static struct sigqueue * | 247 | static struct sigqueue * |
215 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) | 248 | __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, |
249 | int override_rlimit, int fromslab) | ||
216 | { | 250 | { |
217 | struct sigqueue *q = NULL; | 251 | struct sigqueue *q = NULL; |
218 | struct user_struct *user; | 252 | struct user_struct *user; |
@@ -229,7 +263,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi | |||
229 | if (override_rlimit || | 263 | if (override_rlimit || |
230 | atomic_read(&user->sigpending) <= | 264 | atomic_read(&user->sigpending) <= |
231 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) { | 265 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) { |
232 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 266 | if (!fromslab) |
267 | q = get_task_cache(t); | ||
268 | if (!q) | ||
269 | q = kmem_cache_alloc(sigqueue_cachep, flags); | ||
233 | } else { | 270 | } else { |
234 | print_dropped_signal(sig); | 271 | print_dropped_signal(sig); |
235 | } | 272 | } |
@@ -246,6 +283,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi | |||
246 | return q; | 283 | return q; |
247 | } | 284 | } |
248 | 285 | ||
286 | static struct sigqueue * | ||
287 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, | ||
288 | int override_rlimit) | ||
289 | { | ||
290 | return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); | ||
291 | } | ||
292 | |||
249 | static void __sigqueue_free(struct sigqueue *q) | 293 | static void __sigqueue_free(struct sigqueue *q) |
250 | { | 294 | { |
251 | if (q->flags & SIGQUEUE_PREALLOC) | 295 | if (q->flags & SIGQUEUE_PREALLOC) |
@@ -255,6 +299,21 @@ static void __sigqueue_free(struct sigqueue *q) | |||
255 | kmem_cache_free(sigqueue_cachep, q); | 299 | kmem_cache_free(sigqueue_cachep, q); |
256 | } | 300 | } |
257 | 301 | ||
302 | static void sigqueue_free_current(struct sigqueue *q) | ||
303 | { | ||
304 | struct user_struct *up; | ||
305 | |||
306 | if (q->flags & SIGQUEUE_PREALLOC) | ||
307 | return; | ||
308 | |||
309 | up = q->user; | ||
310 | if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { | ||
311 | atomic_dec(&up->sigpending); | ||
312 | free_uid(up); | ||
313 | } else | ||
314 | __sigqueue_free(q); | ||
315 | } | ||
316 | |||
258 | void flush_sigqueue(struct sigpending *queue) | 317 | void flush_sigqueue(struct sigpending *queue) |
259 | { | 318 | { |
260 | struct sigqueue *q; | 319 | struct sigqueue *q; |
@@ -268,6 +327,21 @@ void flush_sigqueue(struct sigpending *queue) | |||
268 | } | 327 | } |
269 | 328 | ||
270 | /* | 329 | /* |
330 | * Called from __exit_signal. Flush tsk->pending and | ||
331 | * tsk->sigqueue_cache | ||
332 | */ | ||
333 | void flush_task_sigqueue(struct task_struct *tsk) | ||
334 | { | ||
335 | struct sigqueue *q; | ||
336 | |||
337 | flush_sigqueue(&tsk->pending); | ||
338 | |||
339 | q = get_task_cache(tsk); | ||
340 | if (q) | ||
341 | kmem_cache_free(sigqueue_cachep, q); | ||
342 | } | ||
343 | |||
344 | /* | ||
271 | * Flush all pending signals for a task. | 345 | * Flush all pending signals for a task. |
272 | */ | 346 | */ |
273 | void __flush_signals(struct task_struct *t) | 347 | void __flush_signals(struct task_struct *t) |
@@ -415,7 +489,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) | |||
415 | still_pending: | 489 | still_pending: |
416 | list_del_init(&first->list); | 490 | list_del_init(&first->list); |
417 | copy_siginfo(info, &first->info); | 491 | copy_siginfo(info, &first->info); |
418 | __sigqueue_free(first); | 492 | sigqueue_free_current(first); |
419 | } else { | 493 | } else { |
420 | /* Ok, it wasn't in the queue. This must be | 494 | /* Ok, it wasn't in the queue. This must be |
421 | a fast-pathed signal or we must have been | 495 | a fast-pathed signal or we must have been |
@@ -460,6 +534,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
460 | { | 534 | { |
461 | int signr; | 535 | int signr; |
462 | 536 | ||
537 | WARN_ON_ONCE(tsk != current); | ||
538 | |||
463 | /* We only dequeue private signals from ourselves, we don't let | 539 | /* We only dequeue private signals from ourselves, we don't let |
464 | * signalfd steal them | 540 | * signalfd steal them |
465 | */ | 541 | */ |
@@ -542,6 +618,9 @@ void signal_wake_up(struct task_struct *t, int resume) | |||
542 | 618 | ||
543 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 619 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
544 | 620 | ||
621 | if (unlikely(t == current)) | ||
622 | return; | ||
623 | |||
545 | /* | 624 | /* |
546 | * For SIGKILL, we want to wake it up in the stopped/traced/killable | 625 | * For SIGKILL, we want to wake it up in the stopped/traced/killable |
547 | * case. We don't check t->state here because there is a race with it | 626 | * case. We don't check t->state here because there is a race with it |
@@ -870,7 +949,9 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
870 | 949 | ||
871 | trace_signal_generate(sig, info, t); | 950 | trace_signal_generate(sig, info, t); |
872 | 951 | ||
952 | #ifdef CONFIG_SMP | ||
873 | assert_spin_locked(&t->sighand->siglock); | 953 | assert_spin_locked(&t->sighand->siglock); |
954 | #endif | ||
874 | 955 | ||
875 | if (!prepare_signal(sig, t, from_ancestor_ns)) | 956 | if (!prepare_signal(sig, t, from_ancestor_ns)) |
876 | return 0; | 957 | return 0; |
@@ -1337,7 +1418,8 @@ EXPORT_SYMBOL(kill_pid); | |||
1337 | */ | 1418 | */ |
1338 | struct sigqueue *sigqueue_alloc(void) | 1419 | struct sigqueue *sigqueue_alloc(void) |
1339 | { | 1420 | { |
1340 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); | 1421 | /* Preallocated sigqueue objects always from the slabcache ! */ |
1422 | struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); | ||
1341 | 1423 | ||
1342 | if (q) | 1424 | if (q) |
1343 | q->flags |= SIGQUEUE_PREALLOC; | 1425 | q->flags |= SIGQUEUE_PREALLOC; |
@@ -1630,15 +1712,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1630 | read_lock(&tasklist_lock); | 1712 | read_lock(&tasklist_lock); |
1631 | if (may_ptrace_stop()) { | 1713 | if (may_ptrace_stop()) { |
1632 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1714 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
1633 | /* | ||
1634 | * Don't want to allow preemption here, because | ||
1635 | * sys_ptrace() needs this task to be inactive. | ||
1636 | * | ||
1637 | * XXX: implement read_unlock_no_resched(). | ||
1638 | */ | ||
1639 | preempt_disable(); | ||
1640 | read_unlock(&tasklist_lock); | 1715 | read_unlock(&tasklist_lock); |
1641 | preempt_enable_no_resched(); | ||
1642 | schedule(); | 1716 | schedule(); |
1643 | } else { | 1717 | } else { |
1644 | /* | 1718 | /* |