aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-05 01:27:20 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:07 -0500
commit0ccf831cbee94df9c5006dd46248c0f07847dd7c (patch)
tree4de8d53c51dc4aff80f35a95cdd185229f0df79e
parent96cf49a2c13e8dcf442abaadf6645f6a1fb3ae92 (diff)
lockdep: annotate epoll
On Sat, 2008-01-05 at 13:35 -0800, Davide Libenzi wrote: > I remember I talked with Arjan about this time ago. Basically, since 1) > you can drop an epoll fd inside another epoll fd 2) callback-based wakeups > are used, you can see a wake_up() from inside another wake_up(), but they > will never refer to the same lock instance. > Think about: > > dfd = socket(...); > efd1 = epoll_create(); > efd2 = epoll_create(); > epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...); > epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); > > When a packet arrives to the device underneath "dfd", the net code will > issue a wake_up() on its poll wake list. Epoll (efd1) has installed a > callback wakeup entry on that queue, and the wake_up() performed by the > "dfd" net code will end up in ep_poll_callback(). At this point epoll > (efd1) notices that it may have some event ready, so it needs to wake up > the waiters on its poll wait list (efd2). So it calls ep_poll_safewake() > that ends up in another wake_up(), after having checked about the > recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to > avoid stack blasting. Never hit the same queue, to avoid loops like: > > epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); > epoll_ctl(efd3, EPOLL_CTL_ADD, efd2, ...); > epoll_ctl(efd4, EPOLL_CTL_ADD, efd3, ...); > epoll_ctl(efd1, EPOLL_CTL_ADD, efd4, ...); > > The code "if (tncur->wq == wq || ..." prevents re-entering the same > queue/lock. Since the epoll code is very careful to not nest same instance locks allow the recursion. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Stefan Richter <stefanr@s5r6.in-berlin.de> Acked-by: Davide Libenzi <davidel@xmailserver.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/eventpoll.c2
-rw-r--r--include/linux/wait.h16
2 files changed, 17 insertions, 1 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 81c04abfb1aa..a415f42d32cf 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -353,7 +353,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
353 spin_unlock_irqrestore(&psw->lock, flags); 353 spin_unlock_irqrestore(&psw->lock, flags);
354 354
355 /* Do really wake up now */ 355 /* Do really wake up now */
356 wake_up(wq); 356 wake_up_nested(wq, 1 + wake_nests);
357 357
358 /* Remove the current task from the list */ 358 /* Remove the current task from the list */
359 spin_lock_irqsave(&psw->lock, flags); 359 spin_lock_irqsave(&psw->lock, flags);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1f4fb0a81ecd..33a2aa9e02f2 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -162,6 +162,22 @@ wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
162#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 162#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
163#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) 163#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
164 164
165#ifdef CONFIG_DEBUG_LOCK_ALLOC
166/*
167 * macro to avoid include hell
168 */
169#define wake_up_nested(x, s) \
170do { \
171 unsigned long flags; \
172 \
173 spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \
174 wake_up_locked(x); \
175 spin_unlock_irqrestore(&(x)->lock, flags); \
176} while (0)
177#else
178#define wake_up_nested(x, s) wake_up(x)
179#endif
180
165#define __wait_event(wq, condition) \ 181#define __wait_event(wq, condition) \
166do { \ 182do { \
167 DEFINE_WAIT(__wait); \ 183 DEFINE_WAIT(__wait); \