diff options
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 52 |
1 files changed, 44 insertions, 8 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ff12f7ac73ef..ed38801b57a7 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -316,6 +316,19 @@ static void ep_nested_calls_init(struct nested_calls *ncalls) | |||
316 | } | 316 | } |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * ep_events_available - Checks if ready events might be available. | ||
320 | * | ||
321 | * @ep: Pointer to the eventpoll context. | ||
322 | * | ||
323 | * Returns: Returns a value different than zero if ready events are available, | ||
324 | * or zero otherwise. | ||
325 | */ | ||
326 | static inline int ep_events_available(struct eventpoll *ep) | ||
327 | { | ||
328 | return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; | ||
329 | } | ||
330 | |||
331 | /** | ||
319 | * ep_call_nested - Perform a bound (possibly) nested call, by checking | 332 | * ep_call_nested - Perform a bound (possibly) nested call, by checking |
320 | * that the recursion limit is not exceeded, and that | 333 | * that the recursion limit is not exceeded, and that |
321 | * the same nested call (by the meaning of same cookie) is | 334 | * the same nested call (by the meaning of same cookie) is |
@@ -1135,12 +1148,29 @@ static inline struct timespec ep_set_mstimeout(long ms) | |||
1135 | return timespec_add_safe(now, ts); | 1148 | return timespec_add_safe(now, ts); |
1136 | } | 1149 | } |
1137 | 1150 | ||
1151 | /** | ||
1152 | * ep_poll - Retrieves ready events, and delivers them to the caller supplied | ||
1153 | * event buffer. | ||
1154 | * | ||
1155 | * @ep: Pointer to the eventpoll context. | ||
1156 | * @events: Pointer to the userspace buffer where the ready events should be | ||
1157 | * stored. | ||
1158 | * @maxevents: Size (in terms of number of events) of the caller event buffer. | ||
1159 | * @timeout: Maximum timeout for the ready events fetch operation, in | ||
1160 | * milliseconds. If the @timeout is zero, the function will not block, | ||
1161 | * while if the @timeout is less than zero, the function will block | ||
1162 | * until at least one event has been retrieved (or an error | ||
1163 | * occurred). | ||
1164 | * | ||
1165 | * Returns: Returns the number of ready events which have been fetched, or an | ||
1166 | * error code, in case of error. | ||
1167 | */ | ||
1138 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | 1168 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, |
1139 | int maxevents, long timeout) | 1169 | int maxevents, long timeout) |
1140 | { | 1170 | { |
1141 | int res, eavail, timed_out = 0; | 1171 | int res = 0, eavail, timed_out = 0; |
1142 | unsigned long flags; | 1172 | unsigned long flags; |
1143 | long slack; | 1173 | long slack = 0; |
1144 | wait_queue_t wait; | 1174 | wait_queue_t wait; |
1145 | ktime_t expires, *to = NULL; | 1175 | ktime_t expires, *to = NULL; |
1146 | 1176 | ||
@@ -1151,14 +1181,19 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | |||
1151 | to = &expires; | 1181 | to = &expires; |
1152 | *to = timespec_to_ktime(end_time); | 1182 | *to = timespec_to_ktime(end_time); |
1153 | } else if (timeout == 0) { | 1183 | } else if (timeout == 0) { |
1184 | /* | ||
1185 | * Avoid the unnecessary trip to the wait queue loop, if the | ||
1186 | * caller specified a non blocking operation. | ||
1187 | */ | ||
1154 | timed_out = 1; | 1188 | timed_out = 1; |
1189 | spin_lock_irqsave(&ep->lock, flags); | ||
1190 | goto check_events; | ||
1155 | } | 1191 | } |
1156 | 1192 | ||
1157 | retry: | 1193 | fetch_events: |
1158 | spin_lock_irqsave(&ep->lock, flags); | 1194 | spin_lock_irqsave(&ep->lock, flags); |
1159 | 1195 | ||
1160 | res = 0; | 1196 | if (!ep_events_available(ep)) { |
1161 | if (list_empty(&ep->rdllist)) { | ||
1162 | /* | 1197 | /* |
1163 | * We don't have any available event to return to the caller. | 1198 | * We don't have any available event to return to the caller. |
1164 | * We need to sleep here, and we will be wake up by | 1199 | * We need to sleep here, and we will be wake up by |
@@ -1174,7 +1209,7 @@ retry: | |||
1174 | * to TASK_INTERRUPTIBLE before doing the checks. | 1209 | * to TASK_INTERRUPTIBLE before doing the checks. |
1175 | */ | 1210 | */ |
1176 | set_current_state(TASK_INTERRUPTIBLE); | 1211 | set_current_state(TASK_INTERRUPTIBLE); |
1177 | if (!list_empty(&ep->rdllist) || timed_out) | 1212 | if (ep_events_available(ep) || timed_out) |
1178 | break; | 1213 | break; |
1179 | if (signal_pending(current)) { | 1214 | if (signal_pending(current)) { |
1180 | res = -EINTR; | 1215 | res = -EINTR; |
@@ -1191,8 +1226,9 @@ retry: | |||
1191 | 1226 | ||
1192 | set_current_state(TASK_RUNNING); | 1227 | set_current_state(TASK_RUNNING); |
1193 | } | 1228 | } |
1229 | check_events: | ||
1194 | /* Is it worth to try to dig for events ? */ | 1230 | /* Is it worth to try to dig for events ? */ |
1195 | eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; | 1231 | eavail = ep_events_available(ep); |
1196 | 1232 | ||
1197 | spin_unlock_irqrestore(&ep->lock, flags); | 1233 | spin_unlock_irqrestore(&ep->lock, flags); |
1198 | 1234 | ||
@@ -1203,7 +1239,7 @@ retry: | |||
1203 | */ | 1239 | */ |
1204 | if (!res && eavail && | 1240 | if (!res && eavail && |
1205 | !(res = ep_send_events(ep, events, maxevents)) && !timed_out) | 1241 | !(res = ep_send_events(ep, events, maxevents)) && !timed_out) |
1206 | goto retry; | 1242 | goto fetch_events; |
1207 | 1243 | ||
1208 | return res; | 1244 | return res; |
1209 | } | 1245 | } |