aboutsummaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 4d9d3a45e356..ca300071e79c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -699,9 +699,12 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
699 void *priv) 699 void *priv)
700{ 700{
701 struct epitem *epi, *tmp; 701 struct epitem *epi, *tmp;
702 poll_table pt;
702 703
704 init_poll_funcptr(&pt, NULL);
703 list_for_each_entry_safe(epi, tmp, head, rdllink) { 705 list_for_each_entry_safe(epi, tmp, head, rdllink) {
704 if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & 706 pt._key = epi->event.events;
707 if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
705 epi->event.events) 708 epi->event.events)
706 return POLLIN | POLLRDNORM; 709 return POLLIN | POLLRDNORM;
707 else { 710 else {
@@ -1097,6 +1100,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1097 /* Initialize the poll table using the queue callback */ 1100 /* Initialize the poll table using the queue callback */
1098 epq.epi = epi; 1101 epq.epi = epi;
1099 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); 1102 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1103 epq.pt._key = event->events;
1100 1104
1101 /* 1105 /*
1102 * Attach the item to the poll hooks and get current event bits. 1106 * Attach the item to the poll hooks and get current event bits.
@@ -1191,6 +1195,9 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
1191{ 1195{
1192 int pwake = 0; 1196 int pwake = 0;
1193 unsigned int revents; 1197 unsigned int revents;
1198 poll_table pt;
1199
1200 init_poll_funcptr(&pt, NULL);
1194 1201
1195 /* 1202 /*
1196 * Set the new event interest mask before calling f_op->poll(); 1203 * Set the new event interest mask before calling f_op->poll();
@@ -1198,13 +1205,14 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
1198 * f_op->poll() call and the new event set registering. 1205 * f_op->poll() call and the new event set registering.
1199 */ 1206 */
1200 epi->event.events = event->events; 1207 epi->event.events = event->events;
1208 pt._key = event->events;
1201 epi->event.data = event->data; /* protected by mtx */ 1209 epi->event.data = event->data; /* protected by mtx */
1202 1210
1203 /* 1211 /*
1204 * Get current event bits. We can safely use the file* here because 1212 * Get current event bits. We can safely use the file* here because
1205 * its usage count has been increased by the caller of this function. 1213 * its usage count has been increased by the caller of this function.
1206 */ 1214 */
1207 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); 1215 revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt);
1208 1216
1209 /* 1217 /*
1210 * If the item is "hot" and it is not registered inside the ready 1218 * If the item is "hot" and it is not registered inside the ready
@@ -1239,6 +1247,9 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1239 unsigned int revents; 1247 unsigned int revents;
1240 struct epitem *epi; 1248 struct epitem *epi;
1241 struct epoll_event __user *uevent; 1249 struct epoll_event __user *uevent;
1250 poll_table pt;
1251
1252 init_poll_funcptr(&pt, NULL);
1242 1253
1243 /* 1254 /*
1244 * We can loop without lock because we are passed a task private list. 1255 * We can loop without lock because we are passed a task private list.
@@ -1251,7 +1262,8 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1251 1262
1252 list_del_init(&epi->rdllink); 1263 list_del_init(&epi->rdllink);
1253 1264
1254 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & 1265 pt._key = epi->event.events;
1266 revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
1255 epi->event.events; 1267 epi->event.events;
1256 1268
1257 /* 1269 /*