diff options
author | Tony Battersby <tonyb@cybernetics.com> | 2009-03-31 18:24:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-01 11:59:19 -0400 |
commit | e057e15ff66a620eda4c407486cbb8f8fbb7d878 (patch) | |
tree | aa9e15c13ce5882ca0f0b442eec71d99fec8a4ff /fs/eventpoll.c | |
parent | d1bc90dd5d037079f96b3327f943eb6ae8ef7491 (diff) |
epoll: clean up ep_modify
ep_modify() doesn't need to set event.data from within the ep->lock
spinlock as the comment suggests. The only place event.data is used is
ep_send_events_proc(), and this is protected by ep->mtx instead of
ep->lock. Also update the comment for mutex_lock() at the top of
ep_scan_ready_list(), which mentions epoll_ctl(EPOLL_CTL_DEL) but not
epoll_ctl(EPOLL_CTL_MOD).
ep_modify() can also use spin_lock_irq() instead of spin_lock_irqsave().
Signed-off-by: Tony Battersby <tonyb@cybernetics.com>
Acked-by: Davide Libenzi <davidel@xmailserver.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 64c55037c13b..744377c01869 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -435,7 +435,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, | |||
435 | 435 | ||
436 | /* | 436 | /* |
437 | * We need to lock this because we could be hit by | 437 | * We need to lock this because we could be hit by |
438 | * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL). | 438 | * eventpoll_release_file() and epoll_ctl(). |
439 | */ | 439 | */ |
440 | mutex_lock(&ep->mtx); | 440 | mutex_lock(&ep->mtx); |
441 | 441 | ||
@@ -972,15 +972,14 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even | |||
972 | { | 972 | { |
973 | int pwake = 0; | 973 | int pwake = 0; |
974 | unsigned int revents; | 974 | unsigned int revents; |
975 | unsigned long flags; | ||
976 | 975 | ||
977 | /* | 976 | /* |
978 | * Set the new event interest mask before calling f_op->poll(), otherwise | 977 | * Set the new event interest mask before calling f_op->poll(); |
979 | * a potential race might occur. In fact if we do this operation inside | 978 | * otherwise we might miss an event that happens between the |
980 | * the lock, an event might happen between the f_op->poll() call and the | 979 | * f_op->poll() call and the new event set registering. |
981 | * new event set registering. | ||
982 | */ | 980 | */ |
983 | epi->event.events = event->events; | 981 | epi->event.events = event->events; |
982 | epi->event.data = event->data; /* protected by mtx */ | ||
984 | 983 | ||
985 | /* | 984 | /* |
986 | * Get current event bits. We can safely use the file* here because | 985 | * Get current event bits. We can safely use the file* here because |
@@ -988,16 +987,12 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even | |||
988 | */ | 987 | */ |
989 | revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); | 988 | revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); |
990 | 989 | ||
991 | spin_lock_irqsave(&ep->lock, flags); | ||
992 | |||
993 | /* Copy the data member from inside the lock */ | ||
994 | epi->event.data = event->data; | ||
995 | |||
996 | /* | 990 | /* |
997 | * If the item is "hot" and it is not registered inside the ready | 991 | * If the item is "hot" and it is not registered inside the ready |
998 | * list, push it inside. | 992 | * list, push it inside. |
999 | */ | 993 | */ |
1000 | if (revents & event->events) { | 994 | if (revents & event->events) { |
995 | spin_lock_irq(&ep->lock); | ||
1001 | if (!ep_is_linked(&epi->rdllink)) { | 996 | if (!ep_is_linked(&epi->rdllink)) { |
1002 | list_add_tail(&epi->rdllink, &ep->rdllist); | 997 | list_add_tail(&epi->rdllink, &ep->rdllist); |
1003 | 998 | ||
@@ -1007,8 +1002,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even | |||
1007 | if (waitqueue_active(&ep->poll_wait)) | 1002 | if (waitqueue_active(&ep->poll_wait)) |
1008 | pwake++; | 1003 | pwake++; |
1009 | } | 1004 | } |
1005 | spin_unlock_irq(&ep->lock); | ||
1010 | } | 1006 | } |
1011 | spin_unlock_irqrestore(&ep->lock, flags); | ||
1012 | 1007 | ||
1013 | /* We have to call this outside the lock */ | 1008 | /* We have to call this outside the lock */ |
1014 | if (pwake) | 1009 | if (pwake) |