aboutsummaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorTony Battersby <tonyb@cybernetics.com>2009-03-31 18:24:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 11:59:19 -0400
commitabff55cee1039b5a3b96f7a5eb6e65b9f247a274 (patch)
treedfbcc71256c2129d2e0ee3e077461103338b9591 /fs/eventpoll.c
parentbb57c3edcd2fc51d95914c39448f36e43af9d6af (diff)
epoll: don't use current in irq context
ep_call_nested() (formerly ep_poll_safewake()) uses "current" (without dereferencing it) to detect callback recursion, but it may be called from irq context where the use of current is generally discouraged. It would be better to use get_cpu() and put_cpu() to detect the callback recursion. Signed-off-by: Tony Battersby <tonyb@cybernetics.com> Acked-by: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 205a1e1c77c..db4365f8a75 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -97,8 +97,8 @@ struct epoll_filefd {
97 */ 97 */
98struct nested_call_node { 98struct nested_call_node {
99 struct list_head llink; 99 struct list_head llink;
100 struct task_struct *task;
101 void *cookie; 100 void *cookie;
101 int cpu;
102}; 102};
103 103
104/* 104/*
@@ -327,7 +327,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
327{ 327{
328 int error, call_nests = 0; 328 int error, call_nests = 0;
329 unsigned long flags; 329 unsigned long flags;
330 struct task_struct *this_task = current; 330 int this_cpu = get_cpu();
331 struct list_head *lsthead = &ncalls->tasks_call_list; 331 struct list_head *lsthead = &ncalls->tasks_call_list;
332 struct nested_call_node *tncur; 332 struct nested_call_node *tncur;
333 struct nested_call_node tnode; 333 struct nested_call_node tnode;
@@ -340,20 +340,19 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
340 * very much limited. 340 * very much limited.
341 */ 341 */
342 list_for_each_entry(tncur, lsthead, llink) { 342 list_for_each_entry(tncur, lsthead, llink) {
343 if (tncur->task == this_task && 343 if (tncur->cpu == this_cpu &&
344 (tncur->cookie == cookie || ++call_nests > max_nests)) { 344 (tncur->cookie == cookie || ++call_nests > max_nests)) {
345 /* 345 /*
346 * Ops ... loop detected or maximum nest level reached. 346 * Ops ... loop detected or maximum nest level reached.
347 * We abort this wake by breaking the cycle itself. 347 * We abort this wake by breaking the cycle itself.
348 */ 348 */
349 spin_unlock_irqrestore(&ncalls->lock, flags); 349 error = -1;
350 350 goto out_unlock;
351 return -1;
352 } 351 }
353 } 352 }
354 353
355 /* Add the current task and cookie to the list */ 354 /* Add the current task and cookie to the list */
356 tnode.task = this_task; 355 tnode.cpu = this_cpu;
357 tnode.cookie = cookie; 356 tnode.cookie = cookie;
358 list_add(&tnode.llink, lsthead); 357 list_add(&tnode.llink, lsthead);
359 358
@@ -365,8 +364,10 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
365 /* Remove the current task from the list */ 364 /* Remove the current task from the list */
366 spin_lock_irqsave(&ncalls->lock, flags); 365 spin_lock_irqsave(&ncalls->lock, flags);
367 list_del(&tnode.llink); 366 list_del(&tnode.llink);
367 out_unlock:
368 spin_unlock_irqrestore(&ncalls->lock, flags); 368 spin_unlock_irqrestore(&ncalls->lock, flags);
369 369
370 put_cpu();
370 return error; 371 return error;
371} 372}
372 373