aboutsummaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c561
1 files changed, 243 insertions, 318 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1aad34ea61a4..0b73cd45a06d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * fs/eventpoll.c ( Efficent event polling implementation ) 2 * fs/eventpoll.c (Efficent event polling implementation)
3 * Copyright (C) 2001,...,2006 Davide Libenzi 3 * Copyright (C) 2001,...,2007 Davide Libenzi
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -26,7 +26,6 @@
26#include <linux/hash.h> 26#include <linux/hash.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/syscalls.h> 28#include <linux/syscalls.h>
29#include <linux/rwsem.h>
30#include <linux/rbtree.h> 29#include <linux/rbtree.h>
31#include <linux/wait.h> 30#include <linux/wait.h>
32#include <linux/eventpoll.h> 31#include <linux/eventpoll.h>
@@ -39,15 +38,14 @@
39#include <asm/io.h> 38#include <asm/io.h>
40#include <asm/mman.h> 39#include <asm/mman.h>
41#include <asm/atomic.h> 40#include <asm/atomic.h>
42#include <asm/semaphore.h>
43 41
44/* 42/*
45 * LOCKING: 43 * LOCKING:
46 * There are three level of locking required by epoll : 44 * There are three level of locking required by epoll :
47 * 45 *
48 * 1) epmutex (mutex) 46 * 1) epmutex (mutex)
49 * 2) ep->sem (rw_semaphore) 47 * 2) ep->mtx (mutex)
50 * 3) ep->lock (rw_lock) 48 * 3) ep->lock (spinlock)
51 * 49 *
52 * The acquire order is the one listed above, from 1 to 3. 50 * The acquire order is the one listed above, from 1 to 3.
53 * We need a spinlock (ep->lock) because we manipulate objects 51 * We need a spinlock (ep->lock) because we manipulate objects
@@ -57,20 +55,20 @@
57 * a spinlock. During the event transfer loop (from kernel to 55 * a spinlock. During the event transfer loop (from kernel to
58 * user space) we could end up sleeping due a copy_to_user(), so 56 * user space) we could end up sleeping due a copy_to_user(), so
59 * we need a lock that will allow us to sleep. This lock is a 57 * we need a lock that will allow us to sleep. This lock is a
60 * read-write semaphore (ep->sem). It is acquired on read during 58 * mutex (ep->mtx). It is acquired during the event transfer loop,
61 * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL) 59 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
62 * and during eventpoll_release_file(). Then we also need a global 60 * Then we also need a global mutex to serialize eventpoll_release_file()
63 * semaphore to serialize eventpoll_release_file() and ep_free(). 61 * and ep_free().
64 * This semaphore is acquired by ep_free() during the epoll file 62 * This mutex is acquired by ep_free() during the epoll file
65 * cleanup path and it is also acquired by eventpoll_release_file() 63 * cleanup path and it is also acquired by eventpoll_release_file()
66 * if a file has been pushed inside an epoll set and it is then 64 * if a file has been pushed inside an epoll set and it is then
67 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). 65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
68 * It is possible to drop the "ep->sem" and to use the global 66 * It is possible to drop the "ep->mtx" and to use the global
69 * semaphore "epmutex" (together with "ep->lock") to have it working, 67 * mutex "epmutex" (together with "ep->lock") to have it working,
70 * but having "ep->sem" will make the interface more scalable. 68 * but having "ep->mtx" will make the interface more scalable.
71 * Events that require holding "epmutex" are very rare, while for 69 * Events that require holding "epmutex" are very rare, while for
72 * normal operations the epoll private "ep->sem" will guarantee 70 * normal operations the epoll private "ep->mtx" will guarantee
73 * a greater scalability. 71 * a better scalability.
74 */ 72 */
75 73
76#define DEBUG_EPOLL 0 74#define DEBUG_EPOLL 0
@@ -102,6 +100,8 @@
102 100
103#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) 101#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
104 102
103#define EP_UNACTIVE_PTR ((void *) -1L)
104
105struct epoll_filefd { 105struct epoll_filefd {
106 struct file *file; 106 struct file *file;
107 int fd; 107 int fd;
@@ -111,7 +111,7 @@ struct epoll_filefd {
111 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake". 111 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
112 * It is used to keep track on all tasks that are currently inside the wake_up() code 112 * It is used to keep track on all tasks that are currently inside the wake_up() code
113 * to 1) short-circuit the one coming from the same task and same wait queue head 113 * to 1) short-circuit the one coming from the same task and same wait queue head
114 * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting 114 * (loop) 2) allow a maximum number of epoll descriptors inclusion nesting
115 * 3) let go the ones coming from other tasks. 115 * 3) let go the ones coming from other tasks.
116 */ 116 */
117struct wake_task_node { 117struct wake_task_node {
@@ -130,21 +130,57 @@ struct poll_safewake {
130}; 130};
131 131
132/* 132/*
133 * Each file descriptor added to the eventpoll interface will
134 * have an entry of this type linked to the "rbr" RB tree.
135 */
136struct epitem {
137 /* RB tree node used to link this structure to the eventpoll RB tree */
138 struct rb_node rbn;
139
140 /* List header used to link this structure to the eventpoll ready list */
141 struct list_head rdllink;
142
143 /*
144 * Works together "struct eventpoll"->ovflist in keeping the
145 * single linked chain of items.
146 */
147 struct epitem *next;
148
149 /* The file descriptor information this item refers to */
150 struct epoll_filefd ffd;
151
152 /* Number of active wait queue attached to poll operations */
153 int nwait;
154
155 /* List containing poll wait queues */
156 struct list_head pwqlist;
157
158 /* The "container" of this item */
159 struct eventpoll *ep;
160
161 /* List header used to link this item to the "struct file" items list */
162 struct list_head fllink;
163
164 /* The structure that describe the interested events and the source fd */
165 struct epoll_event event;
166};
167
168/*
133 * This structure is stored inside the "private_data" member of the file 169 * This structure is stored inside the "private_data" member of the file
134 * structure and rapresent the main data sructure for the eventpoll 170 * structure and rapresent the main data sructure for the eventpoll
135 * interface. 171 * interface.
136 */ 172 */
137struct eventpoll { 173struct eventpoll {
138 /* Protect the this structure access */ 174 /* Protect the this structure access */
139 rwlock_t lock; 175 spinlock_t lock;
140 176
141 /* 177 /*
142 * This semaphore is used to ensure that files are not removed 178 * This mutex is used to ensure that files are not removed
143 * while epoll is using them. This is read-held during the event 179 * while epoll is using them. This is held during the event
144 * collection loop and it is write-held during the file cleanup 180 * collection loop, the file cleanup path, the epoll file exit
145 * path, the epoll file exit code and the ctl operations. 181 * code and the ctl operations.
146 */ 182 */
147 struct rw_semaphore sem; 183 struct mutex mtx;
148 184
149 /* Wait queue used by sys_epoll_wait() */ 185 /* Wait queue used by sys_epoll_wait() */
150 wait_queue_head_t wq; 186 wait_queue_head_t wq;
@@ -155,8 +191,15 @@ struct eventpoll {
155 /* List of ready file descriptors */ 191 /* List of ready file descriptors */
156 struct list_head rdllist; 192 struct list_head rdllist;
157 193
158 /* RB-Tree root used to store monitored fd structs */ 194 /* RB tree root used to store monitored fd structs */
159 struct rb_root rbr; 195 struct rb_root rbr;
196
197 /*
198 * This is a single linked list that chains all the "struct epitem" that
199 * happened while transfering ready events to userspace w/out
200 * holding ->lock.
201 */
202 struct epitem *ovflist;
160}; 203};
161 204
162/* Wait structure used by the poll hooks */ 205/* Wait structure used by the poll hooks */
@@ -177,42 +220,6 @@ struct eppoll_entry {
177 wait_queue_head_t *whead; 220 wait_queue_head_t *whead;
178}; 221};
179 222
180/*
181 * Each file descriptor added to the eventpoll interface will
182 * have an entry of this type linked to the "rbr" RB tree.
183 */
184struct epitem {
185 /* RB-Tree node used to link this structure to the eventpoll rb-tree */
186 struct rb_node rbn;
187
188 /* List header used to link this structure to the eventpoll ready list */
189 struct list_head rdllink;
190
191 /* The file descriptor information this item refers to */
192 struct epoll_filefd ffd;
193
194 /* Number of active wait queue attached to poll operations */
195 int nwait;
196
197 /* List containing poll wait queues */
198 struct list_head pwqlist;
199
200 /* The "container" of this item */
201 struct eventpoll *ep;
202
203 /* The structure that describe the interested events and the source fd */
204 struct epoll_event event;
205
206 /*
207 * Used to keep track of the usage count of the structure. This avoids
208 * that the structure will desappear from underneath our processing.
209 */
210 atomic_t usecnt;
211
212 /* List header used to link this item to the "struct file" items list */
213 struct list_head fllink;
214};
215
216/* Wrapper struct used by poll queueing */ 223/* Wrapper struct used by poll queueing */
217struct ep_pqueue { 224struct ep_pqueue {
218 poll_table pt; 225 poll_table pt;
@@ -220,7 +227,7 @@ struct ep_pqueue {
220}; 227};
221 228
222/* 229/*
223 * This semaphore is used to serialize ep_free() and eventpoll_release_file(). 230 * This mutex is used to serialize ep_free() and eventpoll_release_file().
224 */ 231 */
225static struct mutex epmutex; 232static struct mutex epmutex;
226 233
@@ -234,7 +241,7 @@ static struct kmem_cache *epi_cache __read_mostly;
234static struct kmem_cache *pwq_cache __read_mostly; 241static struct kmem_cache *pwq_cache __read_mostly;
235 242
236 243
237/* Setup the structure that is used as key for the rb-tree */ 244/* Setup the structure that is used as key for the RB tree */
238static inline void ep_set_ffd(struct epoll_filefd *ffd, 245static inline void ep_set_ffd(struct epoll_filefd *ffd,
239 struct file *file, int fd) 246 struct file *file, int fd)
240{ 247{
@@ -242,7 +249,7 @@ static inline void ep_set_ffd(struct epoll_filefd *ffd,
242 ffd->fd = fd; 249 ffd->fd = fd;
243} 250}
244 251
245/* Compare rb-tree keys */ 252/* Compare RB tree keys */
246static inline int ep_cmp_ffd(struct epoll_filefd *p1, 253static inline int ep_cmp_ffd(struct epoll_filefd *p1,
247 struct epoll_filefd *p2) 254 struct epoll_filefd *p2)
248{ 255{
@@ -250,20 +257,20 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1,
250 (p1->file < p2->file ? -1 : p1->fd - p2->fd)); 257 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
251} 258}
252 259
253/* Special initialization for the rb-tree node to detect linkage */ 260/* Special initialization for the RB tree node to detect linkage */
254static inline void ep_rb_initnode(struct rb_node *n) 261static inline void ep_rb_initnode(struct rb_node *n)
255{ 262{
256 rb_set_parent(n, n); 263 rb_set_parent(n, n);
257} 264}
258 265
259/* Removes a node from the rb-tree and marks it for a fast is-linked check */ 266/* Removes a node from the RB tree and marks it for a fast is-linked check */
260static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r) 267static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
261{ 268{
262 rb_erase(n, r); 269 rb_erase(n, r);
263 rb_set_parent(n, n); 270 rb_set_parent(n, n);
264} 271}
265 272
266/* Fast check to verify that the item is linked to the main rb-tree */ 273/* Fast check to verify that the item is linked to the main RB tree */
267static inline int ep_rb_linked(struct rb_node *n) 274static inline int ep_rb_linked(struct rb_node *n)
268{ 275{
269 return rb_parent(n) != n; 276 return rb_parent(n) != n;
@@ -381,78 +388,11 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
381} 388}
382 389
383/* 390/*
384 * Unlink the "struct epitem" from all places it might have been hooked up.
385 * This function must be called with write IRQ lock on "ep->lock".
386 */
387static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
388{
389 int error;
390
391 /*
392 * It can happen that this one is called for an item already unlinked.
393 * The check protect us from doing a double unlink ( crash ).
394 */
395 error = -ENOENT;
396 if (!ep_rb_linked(&epi->rbn))
397 goto error_return;
398
399 /*
400 * Clear the event mask for the unlinked item. This will avoid item
401 * notifications to be sent after the unlink operation from inside
402 * the kernel->userspace event transfer loop.
403 */
404 epi->event.events = 0;
405
406 /*
407 * At this point is safe to do the job, unlink the item from our rb-tree.
408 * This operation togheter with the above check closes the door to
409 * double unlinks.
410 */
411 ep_rb_erase(&epi->rbn, &ep->rbr);
412
413 /*
414 * If the item we are going to remove is inside the ready file descriptors
415 * we want to remove it from this list to avoid stale events.
416 */
417 if (ep_is_linked(&epi->rdllink))
418 list_del_init(&epi->rdllink);
419
420 error = 0;
421error_return:
422
423 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
424 current, ep, epi->ffd.file, error));
425
426 return error;
427}
428
429/*
430 * Increment the usage count of the "struct epitem" making it sure
431 * that the user will have a valid pointer to reference.
432 */
433static void ep_use_epitem(struct epitem *epi)
434{
435 atomic_inc(&epi->usecnt);
436}
437
438/*
439 * Decrement ( release ) the usage count by signaling that the user
440 * has finished using the structure. It might lead to freeing the
441 * structure itself if the count goes to zero.
442 */
443static void ep_release_epitem(struct epitem *epi)
444{
445 if (atomic_dec_and_test(&epi->usecnt))
446 kmem_cache_free(epi_cache, epi);
447}
448
449/*
450 * Removes a "struct epitem" from the eventpoll RB tree and deallocates 391 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
451 * all the associated resources. 392 * all the associated resources. Must be called with "mtx" held.
452 */ 393 */
453static int ep_remove(struct eventpoll *ep, struct epitem *epi) 394static int ep_remove(struct eventpoll *ep, struct epitem *epi)
454{ 395{
455 int error;
456 unsigned long flags; 396 unsigned long flags;
457 struct file *file = epi->ffd.file; 397 struct file *file = epi->ffd.file;
458 398
@@ -472,26 +412,21 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
472 list_del_init(&epi->fllink); 412 list_del_init(&epi->fllink);
473 spin_unlock(&file->f_ep_lock); 413 spin_unlock(&file->f_ep_lock);
474 414
475 /* We need to acquire the write IRQ lock before calling ep_unlink() */ 415 if (ep_rb_linked(&epi->rbn))
476 write_lock_irqsave(&ep->lock, flags); 416 ep_rb_erase(&epi->rbn, &ep->rbr);
477
478 /* Really unlink the item from the RB tree */
479 error = ep_unlink(ep, epi);
480
481 write_unlock_irqrestore(&ep->lock, flags);
482 417
483 if (error) 418 spin_lock_irqsave(&ep->lock, flags);
484 goto error_return; 419 if (ep_is_linked(&epi->rdllink))
420 list_del_init(&epi->rdllink);
421 spin_unlock_irqrestore(&ep->lock, flags);
485 422
486 /* At this point it is safe to free the eventpoll item */ 423 /* At this point it is safe to free the eventpoll item */
487 ep_release_epitem(epi); 424 kmem_cache_free(epi_cache, epi);
488 425
489 error = 0; 426 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
490error_return: 427 current, ep, file));
491 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n",
492 current, ep, file, error));
493 428
494 return error; 429 return 0;
495} 430}
496 431
497static void ep_free(struct eventpoll *ep) 432static void ep_free(struct eventpoll *ep)
@@ -506,7 +441,7 @@ static void ep_free(struct eventpoll *ep)
506 /* 441 /*
507 * We need to lock this because we could be hit by 442 * We need to lock this because we could be hit by
508 * eventpoll_release_file() while we're freeing the "struct eventpoll". 443 * eventpoll_release_file() while we're freeing the "struct eventpoll".
509 * We do not need to hold "ep->sem" here because the epoll file 444 * We do not need to hold "ep->mtx" here because the epoll file
510 * is on the way to be removed and no one has references to it 445 * is on the way to be removed and no one has references to it
511 * anymore. The only hit might come from eventpoll_release_file() but 446 * anymore. The only hit might come from eventpoll_release_file() but
512 * holding "epmutex" is sufficent here. 447 * holding "epmutex" is sufficent here.
@@ -525,7 +460,7 @@ static void ep_free(struct eventpoll *ep)
525 /* 460 /*
526 * Walks through the whole tree by freeing each "struct epitem". At this 461 * Walks through the whole tree by freeing each "struct epitem". At this
527 * point we are sure no poll callbacks will be lingering around, and also by 462 * point we are sure no poll callbacks will be lingering around, and also by
528 * write-holding "sem" we can be sure that no file cleanup code will hit 463 * holding "epmutex" we can be sure that no file cleanup code will hit
529 * us during this operation. So we can avoid the lock on "ep->lock". 464 * us during this operation. So we can avoid the lock on "ep->lock".
530 */ 465 */
531 while ((rbp = rb_first(&ep->rbr)) != 0) { 466 while ((rbp = rb_first(&ep->rbr)) != 0) {
@@ -534,16 +469,16 @@ static void ep_free(struct eventpoll *ep)
534 } 469 }
535 470
536 mutex_unlock(&epmutex); 471 mutex_unlock(&epmutex);
472 mutex_destroy(&ep->mtx);
473 kfree(ep);
537} 474}
538 475
539static int ep_eventpoll_release(struct inode *inode, struct file *file) 476static int ep_eventpoll_release(struct inode *inode, struct file *file)
540{ 477{
541 struct eventpoll *ep = file->private_data; 478 struct eventpoll *ep = file->private_data;
542 479
543 if (ep) { 480 if (ep)
544 ep_free(ep); 481 ep_free(ep);
545 kfree(ep);
546 }
547 482
548 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep)); 483 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep));
549 return 0; 484 return 0;
@@ -559,10 +494,10 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
559 poll_wait(file, &ep->poll_wait, wait); 494 poll_wait(file, &ep->poll_wait, wait);
560 495
561 /* Check our condition */ 496 /* Check our condition */
562 read_lock_irqsave(&ep->lock, flags); 497 spin_lock_irqsave(&ep->lock, flags);
563 if (!list_empty(&ep->rdllist)) 498 if (!list_empty(&ep->rdllist))
564 pollflags = POLLIN | POLLRDNORM; 499 pollflags = POLLIN | POLLRDNORM;
565 read_unlock_irqrestore(&ep->lock, flags); 500 spin_unlock_irqrestore(&ep->lock, flags);
566 501
567 return pollflags; 502 return pollflags;
568} 503}
@@ -594,9 +529,11 @@ void eventpoll_release_file(struct file *file)
594 * We don't want to get "file->f_ep_lock" because it is not 529 * We don't want to get "file->f_ep_lock" because it is not
595 * necessary. It is not necessary because we're in the "struct file" 530 * necessary. It is not necessary because we're in the "struct file"
596 * cleanup path, and this means that noone is using this file anymore. 531 * cleanup path, and this means that noone is using this file anymore.
597 * The only hit might come from ep_free() but by holding the semaphore 532 * So, for example, epoll_ctl() cannot hit here sicne if we reach this
533 * point, the file counter already went to zero and fget() would fail.
534 * The only hit might come from ep_free() but by holding the mutex
598 * will correctly serialize the operation. We do need to acquire 535 * will correctly serialize the operation. We do need to acquire
599 * "ep->sem" after "epmutex" because ep_remove() requires it when called 536 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
600 * from anywhere but ep_free(). 537 * from anywhere but ep_free().
601 */ 538 */
602 mutex_lock(&epmutex); 539 mutex_lock(&epmutex);
@@ -606,9 +543,9 @@ void eventpoll_release_file(struct file *file)
606 543
607 ep = epi->ep; 544 ep = epi->ep;
608 list_del_init(&epi->fllink); 545 list_del_init(&epi->fllink);
609 down_write(&ep->sem); 546 mutex_lock(&ep->mtx);
610 ep_remove(ep, epi); 547 ep_remove(ep, epi);
611 up_write(&ep->sem); 548 mutex_unlock(&ep->mtx);
612 } 549 }
613 550
614 mutex_unlock(&epmutex); 551 mutex_unlock(&epmutex);
@@ -621,12 +558,13 @@ static int ep_alloc(struct eventpoll **pep)
621 if (!ep) 558 if (!ep)
622 return -ENOMEM; 559 return -ENOMEM;
623 560
624 rwlock_init(&ep->lock); 561 spin_lock_init(&ep->lock);
625 init_rwsem(&ep->sem); 562 mutex_init(&ep->mtx);
626 init_waitqueue_head(&ep->wq); 563 init_waitqueue_head(&ep->wq);
627 init_waitqueue_head(&ep->poll_wait); 564 init_waitqueue_head(&ep->poll_wait);
628 INIT_LIST_HEAD(&ep->rdllist); 565 INIT_LIST_HEAD(&ep->rdllist);
629 ep->rbr = RB_ROOT; 566 ep->rbr = RB_ROOT;
567 ep->ovflist = EP_UNACTIVE_PTR;
630 568
631 *pep = ep; 569 *pep = ep;
632 570
@@ -636,20 +574,18 @@ static int ep_alloc(struct eventpoll **pep)
636} 574}
637 575
638/* 576/*
639 * Search the file inside the eventpoll tree. It add usage count to 577 * Search the file inside the eventpoll tree. The RB tree operations
640 * the returned item, so the caller must call ep_release_epitem() 578 * are protected by the "mtx" mutex, and ep_find() must be called with
641 * after finished using the "struct epitem". 579 * "mtx" held.
642 */ 580 */
643static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) 581static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
644{ 582{
645 int kcmp; 583 int kcmp;
646 unsigned long flags;
647 struct rb_node *rbp; 584 struct rb_node *rbp;
648 struct epitem *epi, *epir = NULL; 585 struct epitem *epi, *epir = NULL;
649 struct epoll_filefd ffd; 586 struct epoll_filefd ffd;
650 587
651 ep_set_ffd(&ffd, file, fd); 588 ep_set_ffd(&ffd, file, fd);
652 read_lock_irqsave(&ep->lock, flags);
653 for (rbp = ep->rbr.rb_node; rbp; ) { 589 for (rbp = ep->rbr.rb_node; rbp; ) {
654 epi = rb_entry(rbp, struct epitem, rbn); 590 epi = rb_entry(rbp, struct epitem, rbn);
655 kcmp = ep_cmp_ffd(&ffd, &epi->ffd); 591 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
@@ -658,12 +594,10 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
658 else if (kcmp < 0) 594 else if (kcmp < 0)
659 rbp = rbp->rb_left; 595 rbp = rbp->rb_left;
660 else { 596 else {
661 ep_use_epitem(epi);
662 epir = epi; 597 epir = epi;
663 break; 598 break;
664 } 599 }
665 } 600 }
666 read_unlock_irqrestore(&ep->lock, flags);
667 601
668 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n", 602 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
669 current, file, epir)); 603 current, file, epir));
@@ -686,7 +620,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
686 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", 620 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
687 current, epi->ffd.file, epi, ep)); 621 current, epi->ffd.file, epi, ep));
688 622
689 write_lock_irqsave(&ep->lock, flags); 623 spin_lock_irqsave(&ep->lock, flags);
690 624
691 /* 625 /*
692 * If the event mask does not contain any poll(2) event, we consider the 626 * If the event mask does not contain any poll(2) event, we consider the
@@ -695,7 +629,21 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
695 * until the next EPOLL_CTL_MOD will be issued. 629 * until the next EPOLL_CTL_MOD will be issued.
696 */ 630 */
697 if (!(epi->event.events & ~EP_PRIVATE_BITS)) 631 if (!(epi->event.events & ~EP_PRIVATE_BITS))
698 goto is_disabled; 632 goto out_unlock;
633
634 /*
635 * If we are trasfering events to userspace, we can hold no locks
636 * (because we're accessing user memory, and because of linux f_op->poll()
637 * semantics). All the events that happens during that period of time are
638 * chained in ep->ovflist and requeued later on.
639 */
640 if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
641 if (epi->next == EP_UNACTIVE_PTR) {
642 epi->next = ep->ovflist;
643 ep->ovflist = epi;
644 }
645 goto out_unlock;
646 }
699 647
700 /* If this file is already in the ready list we exit soon */ 648 /* If this file is already in the ready list we exit soon */
701 if (ep_is_linked(&epi->rdllink)) 649 if (ep_is_linked(&epi->rdllink))
@@ -714,8 +662,8 @@ is_linked:
714 if (waitqueue_active(&ep->poll_wait)) 662 if (waitqueue_active(&ep->poll_wait))
715 pwake++; 663 pwake++;
716 664
717is_disabled: 665out_unlock:
718 write_unlock_irqrestore(&ep->lock, flags); 666 spin_unlock_irqrestore(&ep->lock, flags);
719 667
720 /* We have to call this outside the lock */ 668 /* We have to call this outside the lock */
721 if (pwake) 669 if (pwake)
@@ -766,6 +714,9 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
766 rb_insert_color(&epi->rbn, &ep->rbr); 714 rb_insert_color(&epi->rbn, &ep->rbr);
767} 715}
768 716
717/*
718 * Must be called with "mtx" held.
719 */
769static int ep_insert(struct eventpoll *ep, struct epoll_event *event, 720static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
770 struct file *tfile, int fd) 721 struct file *tfile, int fd)
771{ 722{
@@ -786,8 +737,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
786 epi->ep = ep; 737 epi->ep = ep;
787 ep_set_ffd(&epi->ffd, tfile, fd); 738 ep_set_ffd(&epi->ffd, tfile, fd);
788 epi->event = *event; 739 epi->event = *event;
789 atomic_set(&epi->usecnt, 1);
790 epi->nwait = 0; 740 epi->nwait = 0;
741 epi->next = EP_UNACTIVE_PTR;
791 742
792 /* Initialize the poll table using the queue callback */ 743 /* Initialize the poll table using the queue callback */
793 epq.epi = epi; 744 epq.epi = epi;
@@ -796,7 +747,9 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
796 /* 747 /*
797 * Attach the item to the poll hooks and get current event bits. 748 * Attach the item to the poll hooks and get current event bits.
798 * We can safely use the file* here because its usage count has 749 * We can safely use the file* here because its usage count has
799 * been increased by the caller of this function. 750 * been increased by the caller of this function. Note that after
751 * this operation completes, the poll callback can start hitting
752 * the new item.
800 */ 753 */
801 revents = tfile->f_op->poll(tfile, &epq.pt); 754 revents = tfile->f_op->poll(tfile, &epq.pt);
802 755
@@ -813,12 +766,15 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
813 list_add_tail(&epi->fllink, &tfile->f_ep_links); 766 list_add_tail(&epi->fllink, &tfile->f_ep_links);
814 spin_unlock(&tfile->f_ep_lock); 767 spin_unlock(&tfile->f_ep_lock);
815 768
816 /* We have to drop the new item inside our item list to keep track of it */ 769 /*
817 write_lock_irqsave(&ep->lock, flags); 770 * Add the current item to the RB tree. All RB tree operations are
818 771 * protected by "mtx", and ep_insert() is called with "mtx" held.
819 /* Add the current item to the rb-tree */ 772 */
820 ep_rbtree_insert(ep, epi); 773 ep_rbtree_insert(ep, epi);
821 774
775 /* We have to drop the new item inside our item list to keep track of it */
776 spin_lock_irqsave(&ep->lock, flags);
777
822 /* If the file is already "ready" we drop it inside the ready list */ 778 /* If the file is already "ready" we drop it inside the ready list */
823 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { 779 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
824 list_add_tail(&epi->rdllink, &ep->rdllist); 780 list_add_tail(&epi->rdllink, &ep->rdllist);
@@ -830,7 +786,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
830 pwake++; 786 pwake++;
831 } 787 }
832 788
833 write_unlock_irqrestore(&ep->lock, flags); 789 spin_unlock_irqrestore(&ep->lock, flags);
834 790
835 /* We have to call this outside the lock */ 791 /* We have to call this outside the lock */
836 if (pwake) 792 if (pwake)
@@ -846,12 +802,14 @@ error_unregister:
846 802
847 /* 803 /*
848 * We need to do this because an event could have been arrived on some 804 * We need to do this because an event could have been arrived on some
849 * allocated wait queue. 805 * allocated wait queue. Note that we don't care about the ep->ovflist
806 * list, since that is used/cleaned only inside a section bound by "mtx".
807 * And ep_insert() is called with "mtx" held.
850 */ 808 */
851 write_lock_irqsave(&ep->lock, flags); 809 spin_lock_irqsave(&ep->lock, flags);
852 if (ep_is_linked(&epi->rdllink)) 810 if (ep_is_linked(&epi->rdllink))
853 list_del_init(&epi->rdllink); 811 list_del_init(&epi->rdllink);
854 write_unlock_irqrestore(&ep->lock, flags); 812 spin_unlock_irqrestore(&ep->lock, flags);
855 813
856 kmem_cache_free(epi_cache, epi); 814 kmem_cache_free(epi_cache, epi);
857error_return: 815error_return:
@@ -860,7 +818,7 @@ error_return:
860 818
861/* 819/*
862 * Modify the interest event mask by dropping an event if the new mask 820 * Modify the interest event mask by dropping an event if the new mask
863 * has a match in the current file status. 821 * has a match in the current file status. Must be called with "mtx" held.
864 */ 822 */
865static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) 823static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
866{ 824{
@@ -882,36 +840,28 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
882 */ 840 */
883 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); 841 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
884 842
885 write_lock_irqsave(&ep->lock, flags); 843 spin_lock_irqsave(&ep->lock, flags);
886 844
887 /* Copy the data member from inside the lock */ 845 /* Copy the data member from inside the lock */
888 epi->event.data = event->data; 846 epi->event.data = event->data;
889 847
890 /* 848 /*
891 * If the item is not linked to the RB tree it means that it's on its 849 * If the item is "hot" and it is not registered inside the ready
892 * way toward the removal. Do nothing in this case. 850 * list, push it inside.
893 */ 851 */
894 if (ep_rb_linked(&epi->rbn)) { 852 if (revents & event->events) {
895 /* 853 if (!ep_is_linked(&epi->rdllink)) {
896 * If the item is "hot" and it is not registered inside the ready 854 list_add_tail(&epi->rdllink, &ep->rdllist);
897 * list, push it inside. If the item is not "hot" and it is currently 855
898 * registered inside the ready list, unlink it. 856 /* Notify waiting tasks that events are available */
899 */ 857 if (waitqueue_active(&ep->wq))
900 if (revents & event->events) { 858 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
901 if (!ep_is_linked(&epi->rdllink)) { 859 TASK_INTERRUPTIBLE);
902 list_add_tail(&epi->rdllink, &ep->rdllist); 860 if (waitqueue_active(&ep->poll_wait))
903 861 pwake++;
904 /* Notify waiting tasks that events are available */
905 if (waitqueue_active(&ep->wq))
906 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
907 TASK_INTERRUPTIBLE);
908 if (waitqueue_active(&ep->poll_wait))
909 pwake++;
910 }
911 } 862 }
912 } 863 }
913 864 spin_unlock_irqrestore(&ep->lock, flags);
914 write_unlock_irqrestore(&ep->lock, flags);
915 865
916 /* We have to call this outside the lock */ 866 /* We have to call this outside the lock */
917 if (pwake) 867 if (pwake)
@@ -920,36 +870,50 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
920 return 0; 870 return 0;
921} 871}
922 872
923/* 873static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events,
924 * This function is called without holding the "ep->lock" since the call to 874 int maxevents)
925 * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
926 * because of the way poll() is traditionally implemented in Linux.
927 */
928static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
929 struct epoll_event __user *events, int maxevents)
930{ 875{
931 int eventcnt, error = -EFAULT, pwake = 0; 876 int eventcnt, error = -EFAULT, pwake = 0;
932 unsigned int revents; 877 unsigned int revents;
933 unsigned long flags; 878 unsigned long flags;
934 struct epitem *epi; 879 struct epitem *epi, *nepi;
935 struct list_head injlist; 880 struct list_head txlist;
881
882 INIT_LIST_HEAD(&txlist);
883
884 /*
885 * We need to lock this because we could be hit by
886 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
887 */
888 mutex_lock(&ep->mtx);
936 889
937 INIT_LIST_HEAD(&injlist); 890 /*
891 * Steal the ready list, and re-init the original one to the
892 * empty list. Also, set ep->ovflist to NULL so that events
893 * happening while looping w/out locks, are not lost. We cannot
894 * have the poll callback to queue directly on ep->rdllist,
895 * because we are doing it in the loop below, in a lockless way.
896 */
897 spin_lock_irqsave(&ep->lock, flags);
898 list_splice(&ep->rdllist, &txlist);
899 INIT_LIST_HEAD(&ep->rdllist);
900 ep->ovflist = NULL;
901 spin_unlock_irqrestore(&ep->lock, flags);
938 902
939 /* 903 /*
940 * We can loop without lock because this is a task private list. 904 * We can loop without lock because this is a task private list.
941 * We just splice'd out the ep->rdllist in ep_collect_ready_items(). 905 * We just splice'd out the ep->rdllist in ep_collect_ready_items().
942 * Items cannot vanish during the loop because we are holding "sem" in 906 * Items cannot vanish during the loop because we are holding "mtx".
943 * read.
944 */ 907 */
945 for (eventcnt = 0; !list_empty(txlist) && eventcnt < maxevents;) { 908 for (eventcnt = 0; !list_empty(&txlist) && eventcnt < maxevents;) {
946 epi = list_first_entry(txlist, struct epitem, rdllink); 909 epi = list_first_entry(&txlist, struct epitem, rdllink);
947 prefetch(epi->rdllink.next); 910
911 list_del_init(&epi->rdllink);
948 912
949 /* 913 /*
950 * Get the ready file event set. We can safely use the file 914 * Get the ready file event set. We can safely use the file
951 * because we are holding the "sem" in read and this will 915 * because we are holding the "mtx" and this will guarantee
952 * guarantee that both the file and the item will not vanish. 916 * that both the file and the item will not vanish.
953 */ 917 */
954 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); 918 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
955 revents &= epi->event.events; 919 revents &= epi->event.events;
@@ -957,8 +921,8 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
957 /* 921 /*
958 * Is the event mask intersect the caller-requested one, 922 * Is the event mask intersect the caller-requested one,
959 * deliver the event to userspace. Again, we are holding 923 * deliver the event to userspace. Again, we are holding
960 * "sem" in read, so no operations coming from userspace 924 * "mtx", so no operations coming from userspace can change
961 * can change the item. 925 * the item.
962 */ 926 */
963 if (revents) { 927 if (revents) {
964 if (__put_user(revents, 928 if (__put_user(revents,
@@ -970,59 +934,59 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
970 epi->event.events &= EP_PRIVATE_BITS; 934 epi->event.events &= EP_PRIVATE_BITS;
971 eventcnt++; 935 eventcnt++;
972 } 936 }
973
974 /* 937 /*
975 * This is tricky. We are holding the "sem" in read, and this 938 * At this point, noone can insert into ep->rdllist besides
976 * means that the operations that can change the "linked" status 939 * us. The epoll_ctl() callers are locked out by us holding
977 * of the epoll item (epi->rbn and epi->rdllink), cannot touch 940 * "mtx" and the poll callback will queue them in ep->ovflist.
978 * them. Also, since we are "linked" from a epi->rdllink POV
979 * (the item is linked to our transmission list we just
980 * spliced), the ep_poll_callback() cannot touch us either,
981 * because of the check present in there. Another parallel
982 * epoll_wait() will not get the same result set, since we
983 * spliced the ready list before. Note that list_del() still
984 * shows the item as linked to the test in ep_poll_callback().
985 */ 941 */
986 list_del(&epi->rdllink);
987 if (!(epi->event.events & EPOLLET) && 942 if (!(epi->event.events & EPOLLET) &&
988 (revents & epi->event.events)) 943 (revents & epi->event.events))
989 list_add_tail(&epi->rdllink, &injlist); 944 list_add_tail(&epi->rdllink, &ep->rdllist);
990 else {
991 /*
992 * Be sure the item is totally detached before re-init
993 * the list_head. After INIT_LIST_HEAD() is committed,
994 * the ep_poll_callback() can requeue the item again,
995 * but we don't care since we are already past it.
996 */
997 smp_mb();
998 INIT_LIST_HEAD(&epi->rdllink);
999 }
1000 } 945 }
1001 error = 0; 946 error = 0;
1002 947
1003 errxit: 948errxit:
1004 949
950 spin_lock_irqsave(&ep->lock, flags);
1005 /* 951 /*
1006 * If the re-injection list or the txlist are not empty, re-splice 952 * During the time we spent in the loop above, some other events
1007 * them to the ready list and do proper wakeups. 953 * might have been queued by the poll callback. We re-insert them
954 * here (in case they are not already queued, or they're one-shot).
1008 */ 955 */
1009 if (!list_empty(&injlist) || !list_empty(txlist)) { 956 for (nepi = ep->ovflist; (epi = nepi) != NULL;
1010 write_lock_irqsave(&ep->lock, flags); 957 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
958 if (!ep_is_linked(&epi->rdllink) &&
959 (epi->event.events & ~EP_PRIVATE_BITS))
960 list_add_tail(&epi->rdllink, &ep->rdllist);
961 }
962 /*
963 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
964 * releasing the lock, events will be queued in the normal way inside
965 * ep->rdllist.
966 */
967 ep->ovflist = EP_UNACTIVE_PTR;
968
969 /*
970 * In case of error in the event-send loop, or in case the number of
971 * ready events exceeds the userspace limit, we need to splice the
972 * "txlist" back inside ep->rdllist.
973 */
974 list_splice(&txlist, &ep->rdllist);
1011 975
1012 list_splice(txlist, &ep->rdllist); 976 if (!list_empty(&ep->rdllist)) {
1013 list_splice(&injlist, &ep->rdllist);
1014 /* 977 /*
1015 * Wake up ( if active ) both the eventpoll wait list and the ->poll() 978 * Wake up (if active) both the eventpoll wait list and the ->poll()
1016 * wait list. 979 * wait list (delayed after we release the lock).
1017 */ 980 */
1018 if (waitqueue_active(&ep->wq)) 981 if (waitqueue_active(&ep->wq))
1019 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | 982 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
1020 TASK_INTERRUPTIBLE); 983 TASK_INTERRUPTIBLE);
1021 if (waitqueue_active(&ep->poll_wait)) 984 if (waitqueue_active(&ep->poll_wait))
1022 pwake++; 985 pwake++;
1023
1024 write_unlock_irqrestore(&ep->lock, flags);
1025 } 986 }
987 spin_unlock_irqrestore(&ep->lock, flags);
988
989 mutex_unlock(&ep->mtx);
1026 990
1027 /* We have to call this outside the lock */ 991 /* We have to call this outside the lock */
1028 if (pwake) 992 if (pwake)
@@ -1031,41 +995,6 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
1031 return eventcnt == 0 ? error: eventcnt; 995 return eventcnt == 0 ? error: eventcnt;
1032} 996}
1033 997
1034/*
1035 * Perform the transfer of events to user space.
1036 */
1037static int ep_events_transfer(struct eventpoll *ep,
1038 struct epoll_event __user *events, int maxevents)
1039{
1040 int eventcnt;
1041 unsigned long flags;
1042 struct list_head txlist;
1043
1044 INIT_LIST_HEAD(&txlist);
1045
1046 /*
1047 * We need to lock this because we could be hit by
1048 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
1049 */
1050 down_read(&ep->sem);
1051
1052 /*
1053 * Steal the ready list, and re-init the original one to the
1054 * empty list.
1055 */
1056 write_lock_irqsave(&ep->lock, flags);
1057 list_splice(&ep->rdllist, &txlist);
1058 INIT_LIST_HEAD(&ep->rdllist);
1059 write_unlock_irqrestore(&ep->lock, flags);
1060
1061 /* Build result set in userspace */
1062 eventcnt = ep_send_events(ep, &txlist, events, maxevents);
1063
1064 up_read(&ep->sem);
1065
1066 return eventcnt;
1067}
1068
1069static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, 998static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1070 int maxevents, long timeout) 999 int maxevents, long timeout)
1071{ 1000{
@@ -1083,7 +1012,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1083 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; 1012 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
1084 1013
1085retry: 1014retry:
1086 write_lock_irqsave(&ep->lock, flags); 1015 spin_lock_irqsave(&ep->lock, flags);
1087 1016
1088 res = 0; 1017 res = 0;
1089 if (list_empty(&ep->rdllist)) { 1018 if (list_empty(&ep->rdllist)) {
@@ -1093,6 +1022,7 @@ retry:
1093 * ep_poll_callback() when events will become available. 1022 * ep_poll_callback() when events will become available.
1094 */ 1023 */
1095 init_waitqueue_entry(&wait, current); 1024 init_waitqueue_entry(&wait, current);
1025 wait.flags |= WQ_FLAG_EXCLUSIVE;
1096 __add_wait_queue(&ep->wq, &wait); 1026 __add_wait_queue(&ep->wq, &wait);
1097 1027
1098 for (;;) { 1028 for (;;) {
@@ -1109,9 +1039,9 @@ retry:
1109 break; 1039 break;
1110 } 1040 }
1111 1041
1112 write_unlock_irqrestore(&ep->lock, flags); 1042 spin_unlock_irqrestore(&ep->lock, flags);
1113 jtimeout = schedule_timeout(jtimeout); 1043 jtimeout = schedule_timeout(jtimeout);
1114 write_lock_irqsave(&ep->lock, flags); 1044 spin_lock_irqsave(&ep->lock, flags);
1115 } 1045 }
1116 __remove_wait_queue(&ep->wq, &wait); 1046 __remove_wait_queue(&ep->wq, &wait);
1117 1047
@@ -1121,7 +1051,7 @@ retry:
1121 /* Is it worth to try to dig for events ? */ 1051 /* Is it worth to try to dig for events ? */
1122 eavail = !list_empty(&ep->rdllist); 1052 eavail = !list_empty(&ep->rdllist);
1123 1053
1124 write_unlock_irqrestore(&ep->lock, flags); 1054 spin_unlock_irqrestore(&ep->lock, flags);
1125 1055
1126 /* 1056 /*
1127 * Try to transfer events to user space. In case we get 0 events and 1057 * Try to transfer events to user space. In case we get 0 events and
@@ -1129,18 +1059,17 @@ retry:
1129 * more luck. 1059 * more luck.
1130 */ 1060 */
1131 if (!res && eavail && 1061 if (!res && eavail &&
1132 !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout) 1062 !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
1133 goto retry; 1063 goto retry;
1134 1064
1135 return res; 1065 return res;
1136} 1066}
1137 1067
1138/* 1068/*
1139 * It opens an eventpoll file descriptor by suggesting a storage of "size" 1069 * It opens an eventpoll file descriptor. The "size" parameter is there
1140 * file descriptors. The size parameter is just an hint about how to size 1070 * for historical reasons, when epoll was using an hash instead of an
1141 * data structures. It won't prevent the user to store more than "size" 1071 * RB tree. With the current implementation, the "size" parameter is ignored
1142 * file descriptors inside the epoll interface. It is the kernel part of 1072 * (besides sanity checks).
1143 * the userspace epoll_create(2).
1144 */ 1073 */
1145asmlinkage long sys_epoll_create(int size) 1074asmlinkage long sys_epoll_create(int size)
1146{ 1075{
@@ -1176,7 +1105,6 @@ asmlinkage long sys_epoll_create(int size)
1176 1105
1177error_free: 1106error_free:
1178 ep_free(ep); 1107 ep_free(ep);
1179 kfree(ep);
1180error_return: 1108error_return:
1181 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", 1109 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
1182 current, size, error)); 1110 current, size, error));
@@ -1186,8 +1114,7 @@ error_return:
1186/* 1114/*
1187 * The following function implements the controller interface for 1115 * The following function implements the controller interface for
1188 * the eventpoll file that enables the insertion/removal/change of 1116 * the eventpoll file that enables the insertion/removal/change of
1189 * file descriptors inside the interest set. It represents 1117 * file descriptors inside the interest set.
1190 * the kernel part of the user space epoll_ctl(2).
1191 */ 1118 */
1192asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, 1119asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1193 struct epoll_event __user *event) 1120 struct epoll_event __user *event)
@@ -1237,9 +1164,13 @@ asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1237 */ 1164 */
1238 ep = file->private_data; 1165 ep = file->private_data;
1239 1166
1240 down_write(&ep->sem); 1167 mutex_lock(&ep->mtx);
1241 1168
1242 /* Try to lookup the file inside our RB tree */ 1169 /*
1170 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
1171 * above, we can be sure to be able to use the item looked up by
1172 * ep_find() till we release the mutex.
1173 */
1243 epi = ep_find(ep, tfile, fd); 1174 epi = ep_find(ep, tfile, fd);
1244 1175
1245 error = -EINVAL; 1176 error = -EINVAL;
@@ -1266,13 +1197,7 @@ asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1266 error = -ENOENT; 1197 error = -ENOENT;
1267 break; 1198 break;
1268 } 1199 }
1269 /* 1200 mutex_unlock(&ep->mtx);
1270 * The function ep_find() increments the usage count of the structure
1271 * so, if this is not NULL, we need to release it.
1272 */
1273 if (epi)
1274 ep_release_epitem(epi);
1275 up_write(&ep->sem);
1276 1201
1277error_tgt_fput: 1202error_tgt_fput:
1278 fput(tfile); 1203 fput(tfile);
@@ -1378,7 +1303,7 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
1378 if (sigmask) { 1303 if (sigmask) {
1379 if (error == -EINTR) { 1304 if (error == -EINTR) {
1380 memcpy(&current->saved_sigmask, &sigsaved, 1305 memcpy(&current->saved_sigmask, &sigsaved,
1381 sizeof(sigsaved)); 1306 sizeof(sigsaved));
1382 set_thread_flag(TIF_RESTORE_SIGMASK); 1307 set_thread_flag(TIF_RESTORE_SIGMASK);
1383 } else 1308 } else
1384 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1309 sigprocmask(SIG_SETMASK, &sigsaved, NULL);