aboutsummaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 64c55037c13b..744377c01869 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -435,7 +435,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
435 435
436 /* 436 /*
437 * We need to lock this because we could be hit by 437 * We need to lock this because we could be hit by
438 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL). 438 * eventpoll_release_file() and epoll_ctl().
439 */ 439 */
440 mutex_lock(&ep->mtx); 440 mutex_lock(&ep->mtx);
441 441
@@ -972,15 +972,14 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
972{ 972{
973 int pwake = 0; 973 int pwake = 0;
974 unsigned int revents; 974 unsigned int revents;
975 unsigned long flags;
976 975
977 /* 976 /*
978 * Set the new event interest mask before calling f_op->poll(), otherwise 977 * Set the new event interest mask before calling f_op->poll();
979 * a potential race might occur. In fact if we do this operation inside 978 * otherwise we might miss an event that happens between the
980 * the lock, an event might happen between the f_op->poll() call and the 979 * f_op->poll() call and the new event set registering.
981 * new event set registering.
982 */ 980 */
983 epi->event.events = event->events; 981 epi->event.events = event->events;
982 epi->event.data = event->data; /* protected by mtx */
984 983
985 /* 984 /*
986 * Get current event bits. We can safely use the file* here because 985 * Get current event bits. We can safely use the file* here because
@@ -988,16 +987,12 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
988 */ 987 */
989 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); 988 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
990 989
991 spin_lock_irqsave(&ep->lock, flags);
992
993 /* Copy the data member from inside the lock */
994 epi->event.data = event->data;
995
996 /* 990 /*
997 * If the item is "hot" and it is not registered inside the ready 991 * If the item is "hot" and it is not registered inside the ready
998 * list, push it inside. 992 * list, push it inside.
999 */ 993 */
1000 if (revents & event->events) { 994 if (revents & event->events) {
995 spin_lock_irq(&ep->lock);
1001 if (!ep_is_linked(&epi->rdllink)) { 996 if (!ep_is_linked(&epi->rdllink)) {
1002 list_add_tail(&epi->rdllink, &ep->rdllist); 997 list_add_tail(&epi->rdllink, &ep->rdllist);
1003 998
@@ -1007,8 +1002,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
1007 if (waitqueue_active(&ep->poll_wait)) 1002 if (waitqueue_active(&ep->poll_wait))
1008 pwake++; 1003 pwake++;
1009 } 1004 }
1005 spin_unlock_irq(&ep->lock);
1010 } 1006 }
1011 spin_unlock_irqrestore(&ep->lock, flags);
1012 1007
1013 /* We have to call this outside the lock */ 1008 /* We have to call this outside the lock */
1014 if (pwake) 1009 if (pwake)