aboutsummaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c64
1 files changed, 50 insertions, 14 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 4a09af9e9a63..ed38801b57a7 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -62,7 +62,7 @@
62 * This mutex is acquired by ep_free() during the epoll file 62 * This mutex is acquired by ep_free() during the epoll file
63 * cleanup path and it is also acquired by eventpoll_release_file() 63 * cleanup path and it is also acquired by eventpoll_release_file()
64 * if a file has been pushed inside an epoll set and it is then 64 * if a file has been pushed inside an epoll set and it is then
65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). 65 * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
66 * It is also acquired when inserting an epoll fd onto another epoll 66 * It is also acquired when inserting an epoll fd onto another epoll
67 * fd. We do this so that we walk the epoll tree and ensure that this 67 * fd. We do this so that we walk the epoll tree and ensure that this
68 * insertion does not create a cycle of epoll file descriptors, which 68 * insertion does not create a cycle of epoll file descriptors, which
@@ -152,11 +152,11 @@ struct epitem {
152 152
153/* 153/*
154 * This structure is stored inside the "private_data" member of the file 154 * This structure is stored inside the "private_data" member of the file
155 * structure and rapresent the main data sructure for the eventpoll 155 * structure and represents the main data structure for the eventpoll
156 * interface. 156 * interface.
157 */ 157 */
158struct eventpoll { 158struct eventpoll {
159 /* Protect the this structure access */ 159 /* Protect the access to this structure */
160 spinlock_t lock; 160 spinlock_t lock;
161 161
162 /* 162 /*
@@ -316,6 +316,19 @@ static void ep_nested_calls_init(struct nested_calls *ncalls)
316} 316}
317 317
318/** 318/**
319 * ep_events_available - Checks if ready events might be available.
320 *
321 * @ep: Pointer to the eventpoll context.
322 *
323 * Returns: Returns a value different than zero if ready events are available,
324 * or zero otherwise.
325 */
326static inline int ep_events_available(struct eventpoll *ep)
327{
328 return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
329}
330
331/**
319 * ep_call_nested - Perform a bound (possibly) nested call, by checking 332 * ep_call_nested - Perform a bound (possibly) nested call, by checking
320 * that the recursion limit is not exceeded, and that 333 * that the recursion limit is not exceeded, and that
321 * the same nested call (by the meaning of same cookie) is 334 * the same nested call (by the meaning of same cookie) is
@@ -793,7 +806,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
793 806
794/* 807/*
795 * This is the callback that is passed to the wait queue wakeup 808 * This is the callback that is passed to the wait queue wakeup
796 * machanism. It is called by the stored file descriptors when they 809 * mechanism. It is called by the stored file descriptors when they
797 * have events to report. 810 * have events to report.
798 */ 811 */
799static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) 812static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
@@ -824,9 +837,9 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
824 goto out_unlock; 837 goto out_unlock;
825 838
826 /* 839 /*
827 * If we are trasfering events to userspace, we can hold no locks 840 * If we are transferring events to userspace, we can hold no locks
828 * (because we're accessing user memory, and because of linux f_op->poll() 841 * (because we're accessing user memory, and because of linux f_op->poll()
829 * semantics). All the events that happens during that period of time are 842 * semantics). All the events that happen during that period of time are
830 * chained in ep->ovflist and requeued later on. 843 * chained in ep->ovflist and requeued later on.
831 */ 844 */
832 if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { 845 if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
@@ -1135,12 +1148,29 @@ static inline struct timespec ep_set_mstimeout(long ms)
1135 return timespec_add_safe(now, ts); 1148 return timespec_add_safe(now, ts);
1136} 1149}
1137 1150
1151/**
1152 * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1153 * event buffer.
1154 *
1155 * @ep: Pointer to the eventpoll context.
1156 * @events: Pointer to the userspace buffer where the ready events should be
1157 * stored.
1158 * @maxevents: Size (in terms of number of events) of the caller event buffer.
1159 * @timeout: Maximum timeout for the ready events fetch operation, in
1160 * milliseconds. If the @timeout is zero, the function will not block,
1161 * while if the @timeout is less than zero, the function will block
1162 * until at least one event has been retrieved (or an error
1163 * occurred).
1164 *
1165 * Returns: Returns the number of ready events which have been fetched, or an
1166 * error code, in case of error.
1167 */
1138static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, 1168static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1139 int maxevents, long timeout) 1169 int maxevents, long timeout)
1140{ 1170{
1141 int res, eavail, timed_out = 0; 1171 int res = 0, eavail, timed_out = 0;
1142 unsigned long flags; 1172 unsigned long flags;
1143 long slack; 1173 long slack = 0;
1144 wait_queue_t wait; 1174 wait_queue_t wait;
1145 ktime_t expires, *to = NULL; 1175 ktime_t expires, *to = NULL;
1146 1176
@@ -1151,14 +1181,19 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1151 to = &expires; 1181 to = &expires;
1152 *to = timespec_to_ktime(end_time); 1182 *to = timespec_to_ktime(end_time);
1153 } else if (timeout == 0) { 1183 } else if (timeout == 0) {
1184 /*
1185 * Avoid the unnecessary trip to the wait queue loop, if the
1186 * caller specified a non blocking operation.
1187 */
1154 timed_out = 1; 1188 timed_out = 1;
1189 spin_lock_irqsave(&ep->lock, flags);
1190 goto check_events;
1155 } 1191 }
1156 1192
1157retry: 1193fetch_events:
1158 spin_lock_irqsave(&ep->lock, flags); 1194 spin_lock_irqsave(&ep->lock, flags);
1159 1195
1160 res = 0; 1196 if (!ep_events_available(ep)) {
1161 if (list_empty(&ep->rdllist)) {
1162 /* 1197 /*
1163 * We don't have any available event to return to the caller. 1198 * We don't have any available event to return to the caller.
1164 * We need to sleep here, and we will be wake up by 1199 * We need to sleep here, and we will be wake up by
@@ -1174,7 +1209,7 @@ retry:
1174 * to TASK_INTERRUPTIBLE before doing the checks. 1209 * to TASK_INTERRUPTIBLE before doing the checks.
1175 */ 1210 */
1176 set_current_state(TASK_INTERRUPTIBLE); 1211 set_current_state(TASK_INTERRUPTIBLE);
1177 if (!list_empty(&ep->rdllist) || timed_out) 1212 if (ep_events_available(ep) || timed_out)
1178 break; 1213 break;
1179 if (signal_pending(current)) { 1214 if (signal_pending(current)) {
1180 res = -EINTR; 1215 res = -EINTR;
@@ -1191,8 +1226,9 @@ retry:
1191 1226
1192 set_current_state(TASK_RUNNING); 1227 set_current_state(TASK_RUNNING);
1193 } 1228 }
1229check_events:
1194 /* Is it worth to try to dig for events ? */ 1230 /* Is it worth to try to dig for events ? */
1195 eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; 1231 eavail = ep_events_available(ep);
1196 1232
1197 spin_unlock_irqrestore(&ep->lock, flags); 1233 spin_unlock_irqrestore(&ep->lock, flags);
1198 1234
@@ -1203,7 +1239,7 @@ retry:
1203 */ 1239 */
1204 if (!res && eavail && 1240 if (!res && eavail &&
1205 !(res = ep_send_events(ep, events, maxevents)) && !timed_out) 1241 !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1206 goto retry; 1242 goto fetch_events;
1207 1243
1208 return res; 1244 return res;
1209} 1245}