aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavide Libenzi <davidel@xmailserver.org>2007-09-20 15:40:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-09-20 16:19:59 -0400
commitb8fceee17a310f189188599a8fa5e9beaff57eb0 (patch)
tree21308319be2579059a4d4d7db680a73334659f82
parent9db619e66503494e41159de3c76fafabe80d016b (diff)
signalfd simplification
This simplifies signalfd code, by avoiding it to remain attached to the sighand during its lifetime. In this way, the signalfd remain attached to the sighand only during poll(2) (and select and epoll) and read(2). This also allows to remove all the custom "tsk == current" checks in kernel/signal.c, since dequeue_signal() will only be called by "current". I think this is also what Ben was suggesting time ago. The external effect of this, is that a thread can extract only its own private signals and the group ones. I think this is an acceptable behaviour, in that those are the signals the thread would be able to fetch w/out signalfd. Signed-off-by: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/exec.c3
-rw-r--r--fs/signalfd.c190
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/signalfd.h40
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/signal.c8
8 files changed, 39 insertions, 217 deletions
diff --git a/fs/exec.c b/fs/exec.c
index c21a8cc06277..073b0b8c6d05 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -50,7 +50,6 @@
50#include <linux/tsacct_kern.h> 50#include <linux/tsacct_kern.h>
51#include <linux/cn_proc.h> 51#include <linux/cn_proc.h>
52#include <linux/audit.h> 52#include <linux/audit.h>
53#include <linux/signalfd.h>
54 53
55#include <asm/uaccess.h> 54#include <asm/uaccess.h>
56#include <asm/mmu_context.h> 55#include <asm/mmu_context.h>
@@ -784,7 +783,6 @@ static int de_thread(struct task_struct *tsk)
784 * and we can just re-use it all. 783 * and we can just re-use it all.
785 */ 784 */
786 if (atomic_read(&oldsighand->count) <= 1) { 785 if (atomic_read(&oldsighand->count) <= 1) {
787 signalfd_detach(tsk);
788 exit_itimers(sig); 786 exit_itimers(sig);
789 return 0; 787 return 0;
790 } 788 }
@@ -923,7 +921,6 @@ static int de_thread(struct task_struct *tsk)
923 sig->flags = 0; 921 sig->flags = 0;
924 922
925no_thread_group: 923no_thread_group:
926 signalfd_detach(tsk);
927 exit_itimers(sig); 924 exit_itimers(sig);
928 if (leader) 925 if (leader)
929 release_task(leader); 926 release_task(leader);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index a8e293d30034..aefb0be07942 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -11,8 +11,10 @@
11 * Now using anonymous inode source. 11 * Now using anonymous inode source.
12 * Thanks to Oleg Nesterov for useful code review and suggestions. 12 * Thanks to Oleg Nesterov for useful code review and suggestions.
13 * More comments and suggestions from Arnd Bergmann. 13 * More comments and suggestions from Arnd Bergmann.
14 * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br> 14 * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
15 * Retrieve multiple signals with one read() call 15 * Retrieve multiple signals with one read() call
16 * Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
17 * Attach to the sighand only during read() and poll().
16 */ 18 */
17 19
18#include <linux/file.h> 20#include <linux/file.h>
@@ -27,102 +29,12 @@
27#include <linux/signalfd.h> 29#include <linux/signalfd.h>
28 30
29struct signalfd_ctx { 31struct signalfd_ctx {
30 struct list_head lnk;
31 wait_queue_head_t wqh;
32 sigset_t sigmask; 32 sigset_t sigmask;
33 struct task_struct *tsk;
34}; 33};
35 34
36struct signalfd_lockctx {
37 struct task_struct *tsk;
38 unsigned long flags;
39};
40
41/*
42 * Tries to acquire the sighand lock. We do not increment the sighand
43 * use count, and we do not even pin the task struct, so we need to
44 * do it inside an RCU read lock, and we must be prepared for the
45 * ctx->tsk going to NULL (in signalfd_deliver()), and for the sighand
46 * being detached. We return 0 if the sighand has been detached, or
47 * 1 if we were able to pin the sighand lock.
48 */
49static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk)
50{
51 struct sighand_struct *sighand = NULL;
52
53 rcu_read_lock();
54 lk->tsk = rcu_dereference(ctx->tsk);
55 if (likely(lk->tsk != NULL))
56 sighand = lock_task_sighand(lk->tsk, &lk->flags);
57 rcu_read_unlock();
58
59 if (!sighand)
60 return 0;
61
62 if (!ctx->tsk) {
63 unlock_task_sighand(lk->tsk, &lk->flags);
64 return 0;
65 }
66
67 if (lk->tsk->tgid == current->tgid)
68 lk->tsk = current;
69
70 return 1;
71}
72
73static void signalfd_unlock(struct signalfd_lockctx *lk)
74{
75 unlock_task_sighand(lk->tsk, &lk->flags);
76}
77
78/*
79 * This must be called with the sighand lock held.
80 */
81void signalfd_deliver(struct task_struct *tsk, int sig)
82{
83 struct sighand_struct *sighand = tsk->sighand;
84 struct signalfd_ctx *ctx, *tmp;
85
86 BUG_ON(!sig);
87 list_for_each_entry_safe(ctx, tmp, &sighand->signalfd_list, lnk) {
88 /*
89 * We use a negative signal value as a way to broadcast that the
90 * sighand has been orphaned, so that we can notify all the
91 * listeners about this. Remember the ctx->sigmask is inverted,
92 * so if the user is interested in a signal, that corresponding
93 * bit will be zero.
94 */
95 if (sig < 0) {
96 if (ctx->tsk == tsk) {
97 ctx->tsk = NULL;
98 list_del_init(&ctx->lnk);
99 wake_up(&ctx->wqh);
100 }
101 } else {
102 if (!sigismember(&ctx->sigmask, sig))
103 wake_up(&ctx->wqh);
104 }
105 }
106}
107
108static void signalfd_cleanup(struct signalfd_ctx *ctx)
109{
110 struct signalfd_lockctx lk;
111
112 /*
113 * This is tricky. If the sighand is gone, we do not need to remove
114 * context from the list, the list itself won't be there anymore.
115 */
116 if (signalfd_lock(ctx, &lk)) {
117 list_del(&ctx->lnk);
118 signalfd_unlock(&lk);
119 }
120 kfree(ctx);
121}
122
123static int signalfd_release(struct inode *inode, struct file *file) 35static int signalfd_release(struct inode *inode, struct file *file)
124{ 36{
125 signalfd_cleanup(file->private_data); 37 kfree(file->private_data);
126 return 0; 38 return 0;
127} 39}
128 40
@@ -130,23 +42,15 @@ static unsigned int signalfd_poll(struct file *file, poll_table *wait)
130{ 42{
131 struct signalfd_ctx *ctx = file->private_data; 43 struct signalfd_ctx *ctx = file->private_data;
132 unsigned int events = 0; 44 unsigned int events = 0;
133 struct signalfd_lockctx lk;
134 45
135 poll_wait(file, &ctx->wqh, wait); 46 poll_wait(file, &current->sighand->signalfd_wqh, wait);
136 47
137 /* 48 spin_lock_irq(&current->sighand->siglock);
138 * Let the caller get a POLLIN in this case, ala socket recv() when 49 if (next_signal(&current->pending, &ctx->sigmask) ||
139 * the peer disconnects. 50 next_signal(&current->signal->shared_pending,
140 */ 51 &ctx->sigmask))
141 if (signalfd_lock(ctx, &lk)) {
142 if ((lk.tsk == current &&
143 next_signal(&lk.tsk->pending, &ctx->sigmask) > 0) ||
144 next_signal(&lk.tsk->signal->shared_pending,
145 &ctx->sigmask) > 0)
146 events |= POLLIN;
147 signalfd_unlock(&lk);
148 } else
149 events |= POLLIN; 52 events |= POLLIN;
53 spin_unlock_irq(&current->sighand->siglock);
150 54
151 return events; 55 return events;
152} 56}
@@ -219,59 +123,46 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
219 int nonblock) 123 int nonblock)
220{ 124{
221 ssize_t ret; 125 ssize_t ret;
222 struct signalfd_lockctx lk;
223 DECLARE_WAITQUEUE(wait, current); 126 DECLARE_WAITQUEUE(wait, current);
224 127
225 if (!signalfd_lock(ctx, &lk)) 128 spin_lock_irq(&current->sighand->siglock);
226 return 0; 129 ret = dequeue_signal(current, &ctx->sigmask, info);
227
228 ret = dequeue_signal(lk.tsk, &ctx->sigmask, info);
229 switch (ret) { 130 switch (ret) {
230 case 0: 131 case 0:
231 if (!nonblock) 132 if (!nonblock)
232 break; 133 break;
233 ret = -EAGAIN; 134 ret = -EAGAIN;
234 default: 135 default:
235 signalfd_unlock(&lk); 136 spin_unlock_irq(&current->sighand->siglock);
236 return ret; 137 return ret;
237 } 138 }
238 139
239 add_wait_queue(&ctx->wqh, &wait); 140 add_wait_queue(&current->sighand->signalfd_wqh, &wait);
240 for (;;) { 141 for (;;) {
241 set_current_state(TASK_INTERRUPTIBLE); 142 set_current_state(TASK_INTERRUPTIBLE);
242 ret = dequeue_signal(lk.tsk, &ctx->sigmask, info); 143 ret = dequeue_signal(current, &ctx->sigmask, info);
243 signalfd_unlock(&lk);
244 if (ret != 0) 144 if (ret != 0)
245 break; 145 break;
246 if (signal_pending(current)) { 146 if (signal_pending(current)) {
247 ret = -ERESTARTSYS; 147 ret = -ERESTARTSYS;
248 break; 148 break;
249 } 149 }
150 spin_unlock_irq(&current->sighand->siglock);
250 schedule(); 151 schedule();
251 ret = signalfd_lock(ctx, &lk); 152 spin_lock_irq(&current->sighand->siglock);
252 if (unlikely(!ret)) {
253 /*
254 * Let the caller read zero byte, ala socket
255 * recv() when the peer disconnect. This test
256 * must be done before doing a dequeue_signal(),
257 * because if the sighand has been orphaned,
258 * the dequeue_signal() call is going to crash
259 * because ->sighand will be long gone.
260 */
261 break;
262 }
263 } 153 }
154 spin_unlock_irq(&current->sighand->siglock);
264 155
265 remove_wait_queue(&ctx->wqh, &wait); 156 remove_wait_queue(&current->sighand->signalfd_wqh, &wait);
266 __set_current_state(TASK_RUNNING); 157 __set_current_state(TASK_RUNNING);
267 158
268 return ret; 159 return ret;
269} 160}
270 161
271/* 162/*
272 * Returns either the size of a "struct signalfd_siginfo", or zero if the 163 * Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
273 * sighand we are attached to, has been orphaned. The "count" parameter 164 * error code. The "count" parameter must be at least the size of a
274 * must be at least the size of a "struct signalfd_siginfo". 165 * "struct signalfd_siginfo".
275 */ 166 */
276static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, 167static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
277 loff_t *ppos) 168 loff_t *ppos)
@@ -287,7 +178,6 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
287 return -EINVAL; 178 return -EINVAL;
288 179
289 siginfo = (struct signalfd_siginfo __user *) buf; 180 siginfo = (struct signalfd_siginfo __user *) buf;
290
291 do { 181 do {
292 ret = signalfd_dequeue(ctx, &info, nonblock); 182 ret = signalfd_dequeue(ctx, &info, nonblock);
293 if (unlikely(ret <= 0)) 183 if (unlikely(ret <= 0))
@@ -300,7 +190,7 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
300 nonblock = 1; 190 nonblock = 1;
301 } while (--count); 191 } while (--count);
302 192
303 return total ? total : ret; 193 return total ? total: ret;
304} 194}
305 195
306static const struct file_operations signalfd_fops = { 196static const struct file_operations signalfd_fops = {
@@ -309,20 +199,13 @@ static const struct file_operations signalfd_fops = {
309 .read = signalfd_read, 199 .read = signalfd_read,
310}; 200};
311 201
312/*
313 * Create a file descriptor that is associated with our signal
314 * state. We can pass it around to others if we want to, but
315 * it will always be _our_ signal state.
316 */
317asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask) 202asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask)
318{ 203{
319 int error; 204 int error;
320 sigset_t sigmask; 205 sigset_t sigmask;
321 struct signalfd_ctx *ctx; 206 struct signalfd_ctx *ctx;
322 struct sighand_struct *sighand;
323 struct file *file; 207 struct file *file;
324 struct inode *inode; 208 struct inode *inode;
325 struct signalfd_lockctx lk;
326 209
327 if (sizemask != sizeof(sigset_t) || 210 if (sizemask != sizeof(sigset_t) ||
328 copy_from_user(&sigmask, user_mask, sizeof(sigmask))) 211 copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
@@ -335,17 +218,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
335 if (!ctx) 218 if (!ctx)
336 return -ENOMEM; 219 return -ENOMEM;
337 220
338 init_waitqueue_head(&ctx->wqh);
339 ctx->sigmask = sigmask; 221 ctx->sigmask = sigmask;
340 ctx->tsk = current->group_leader;
341
342 sighand = current->sighand;
343 /*
344 * Add this fd to the list of signal listeners.
345 */
346 spin_lock_irq(&sighand->siglock);
347 list_add_tail(&ctx->lnk, &sighand->signalfd_list);
348 spin_unlock_irq(&sighand->siglock);
349 222
350 /* 223 /*
351 * When we call this, the initialization must be complete, since 224 * When we call this, the initialization must be complete, since
@@ -364,23 +237,18 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
364 fput(file); 237 fput(file);
365 return -EINVAL; 238 return -EINVAL;
366 } 239 }
367 /* 240 spin_lock_irq(&current->sighand->siglock);
368 * We need to be prepared of the fact that the sighand this fd 241 ctx->sigmask = sigmask;
369 * is attached to, has been detched. In that case signalfd_lock() 242 spin_unlock_irq(&current->sighand->siglock);
370 * will return 0, and we'll just skip setting the new mask. 243
371 */ 244 wake_up(&current->sighand->signalfd_wqh);
372 if (signalfd_lock(ctx, &lk)) {
373 ctx->sigmask = sigmask;
374 signalfd_unlock(&lk);
375 }
376 wake_up(&ctx->wqh);
377 fput(file); 245 fput(file);
378 } 246 }
379 247
380 return ufd; 248 return ufd;
381 249
382err_fdalloc: 250err_fdalloc:
383 signalfd_cleanup(ctx); 251 kfree(ctx);
384 return error; 252 return error;
385} 253}
386 254
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index cab741c2d603..f8abfa349ef9 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -86,7 +86,7 @@ extern struct nsproxy init_nsproxy;
86 .count = ATOMIC_INIT(1), \ 86 .count = ATOMIC_INIT(1), \
87 .action = { { { .sa_handler = NULL, } }, }, \ 87 .action = { { { .sa_handler = NULL, } }, }, \
88 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ 88 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
89 .signalfd_list = LIST_HEAD_INIT(sighand.signalfd_list), \ 89 .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
90} 90}
91 91
92extern struct group_info init_groups; 92extern struct group_info init_groups;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3de79016f2a6..a01ac6dd5f5e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -438,7 +438,7 @@ struct sighand_struct {
438 atomic_t count; 438 atomic_t count;
439 struct k_sigaction action[_NSIG]; 439 struct k_sigaction action[_NSIG];
440 spinlock_t siglock; 440 spinlock_t siglock;
441 struct list_head signalfd_list; 441 wait_queue_head_t signalfd_wqh;
442}; 442};
443 443
444struct pacct_struct { 444struct pacct_struct {
diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
index 510429495690..4c9ff0910ae0 100644
--- a/include/linux/signalfd.h
+++ b/include/linux/signalfd.h
@@ -45,49 +45,17 @@ struct signalfd_siginfo {
45#ifdef CONFIG_SIGNALFD 45#ifdef CONFIG_SIGNALFD
46 46
47/* 47/*
48 * Deliver the signal to listening signalfd. This must be called 48 * Deliver the signal to listening signalfd.
49 * with the sighand lock held. Same are the following that end up
50 * calling signalfd_deliver().
51 */
52void signalfd_deliver(struct task_struct *tsk, int sig);
53
54/*
55 * No need to fall inside signalfd_deliver() if no signal listeners
56 * are available.
57 */ 49 */
58static inline void signalfd_notify(struct task_struct *tsk, int sig) 50static inline void signalfd_notify(struct task_struct *tsk, int sig)
59{ 51{
60 if (unlikely(!list_empty(&tsk->sighand->signalfd_list))) 52 if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh)))
61 signalfd_deliver(tsk, sig); 53 wake_up(&tsk->sighand->signalfd_wqh);
62}
63
64/*
65 * The signal -1 is used to notify the signalfd that the sighand
66 * is on its way to be detached.
67 */
68static inline void signalfd_detach_locked(struct task_struct *tsk)
69{
70 if (unlikely(!list_empty(&tsk->sighand->signalfd_list)))
71 signalfd_deliver(tsk, -1);
72}
73
74static inline void signalfd_detach(struct task_struct *tsk)
75{
76 struct sighand_struct *sighand = tsk->sighand;
77
78 if (unlikely(!list_empty(&sighand->signalfd_list))) {
79 spin_lock_irq(&sighand->siglock);
80 signalfd_deliver(tsk, -1);
81 spin_unlock_irq(&sighand->siglock);
82 }
83} 54}
84 55
85#else /* CONFIG_SIGNALFD */ 56#else /* CONFIG_SIGNALFD */
86 57
87#define signalfd_deliver(t, s) do { } while (0) 58static inline void signalfd_notify(struct task_struct *tsk, int sig) { }
88#define signalfd_notify(t, s) do { } while (0)
89#define signalfd_detach_locked(t) do { } while (0)
90#define signalfd_detach(t) do { } while (0)
91 59
92#endif /* CONFIG_SIGNALFD */ 60#endif /* CONFIG_SIGNALFD */
93 61
diff --git a/kernel/exit.c b/kernel/exit.c
index 06b24b3aa370..993369ee94d1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -24,7 +24,6 @@
24#include <linux/pid_namespace.h> 24#include <linux/pid_namespace.h>
25#include <linux/ptrace.h> 25#include <linux/ptrace.h>
26#include <linux/profile.h> 26#include <linux/profile.h>
27#include <linux/signalfd.h>
28#include <linux/mount.h> 27#include <linux/mount.h>
29#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
30#include <linux/kthread.h> 29#include <linux/kthread.h>
@@ -86,14 +85,6 @@ static void __exit_signal(struct task_struct *tsk)
86 sighand = rcu_dereference(tsk->sighand); 85 sighand = rcu_dereference(tsk->sighand);
87 spin_lock(&sighand->siglock); 86 spin_lock(&sighand->siglock);
88 87
89 /*
90 * Notify that this sighand has been detached. This must
91 * be called with the tsk->sighand lock held. Also, this
92 * access tsk->sighand internally, so it must be called
93 * before tsk->sighand is reset.
94 */
95 signalfd_detach_locked(tsk);
96
97 posix_cpu_timers_exit(tsk); 88 posix_cpu_timers_exit(tsk);
98 if (atomic_dec_and_test(&sig->count)) 89 if (atomic_dec_and_test(&sig->count))
99 posix_cpu_timers_exit_group(tsk); 90 posix_cpu_timers_exit_group(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 7332e236d367..33f12f48684a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1438,7 +1438,7 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep,
1438 struct sighand_struct *sighand = data; 1438 struct sighand_struct *sighand = data;
1439 1439
1440 spin_lock_init(&sighand->siglock); 1440 spin_lock_init(&sighand->siglock);
1441 INIT_LIST_HEAD(&sighand->signalfd_list); 1441 init_waitqueue_head(&sighand->signalfd_wqh);
1442} 1442}
1443 1443
1444void __init proc_caches_init(void) 1444void __init proc_caches_init(void)
diff --git a/kernel/signal.c b/kernel/signal.c
index 3169bed0b4d0..9fb91a32edda 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -378,8 +378,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
378 /* We only dequeue private signals from ourselves, we don't let 378 /* We only dequeue private signals from ourselves, we don't let
379 * signalfd steal them 379 * signalfd steal them
380 */ 380 */
381 if (likely(tsk == current)) 381 signr = __dequeue_signal(&tsk->pending, mask, info);
382 signr = __dequeue_signal(&tsk->pending, mask, info);
383 if (!signr) { 382 if (!signr) {
384 signr = __dequeue_signal(&tsk->signal->shared_pending, 383 signr = __dequeue_signal(&tsk->signal->shared_pending,
385 mask, info); 384 mask, info);
@@ -407,8 +406,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
407 } 406 }
408 } 407 }
409 } 408 }
410 if (likely(tsk == current)) 409 recalc_sigpending();
411 recalc_sigpending();
412 if (signr && unlikely(sig_kernel_stop(signr))) { 410 if (signr && unlikely(sig_kernel_stop(signr))) {
413 /* 411 /*
414 * Set a marker that we have dequeued a stop signal. Our 412 * Set a marker that we have dequeued a stop signal. Our
@@ -425,7 +423,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
425 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 423 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
426 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 424 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
427 } 425 }
428 if (signr && likely(tsk == current) && 426 if (signr &&
429 ((info->si_code & __SI_MASK) == __SI_TIMER) && 427 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
430 info->si_sys_private){ 428 info->si_sys_private){
431 /* 429 /*