aboutsummaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c234
1 files changed, 73 insertions, 161 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1dbedc71a28c..4c16127c96be 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * fs/eventpoll.c ( Efficent event polling implementation ) 2 * fs/eventpoll.c (Efficent event polling implementation)
3 * Copyright (C) 2001,...,2006 Davide Libenzi 3 * Copyright (C) 2001,...,2007 Davide Libenzi
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -44,8 +44,8 @@
44 * There are three level of locking required by epoll : 44 * There are three level of locking required by epoll :
45 * 45 *
46 * 1) epmutex (mutex) 46 * 1) epmutex (mutex)
47 * 2) ep->mtx (mutes) 47 * 2) ep->mtx (mutex)
48 * 3) ep->lock (rw_lock) 48 * 3) ep->lock (spinlock)
49 * 49 *
50 * The acquire order is the one listed above, from 1 to 3. 50 * The acquire order is the one listed above, from 1 to 3.
51 * We need a spinlock (ep->lock) because we manipulate objects 51 * We need a spinlock (ep->lock) because we manipulate objects
@@ -140,6 +140,12 @@ struct epitem {
140 /* List header used to link this structure to the eventpoll ready list */ 140 /* List header used to link this structure to the eventpoll ready list */
141 struct list_head rdllink; 141 struct list_head rdllink;
142 142
143 /*
144 * Works together "struct eventpoll"->ovflist in keeping the
145 * single linked chain of items.
146 */
147 struct epitem *next;
148
143 /* The file descriptor information this item refers to */ 149 /* The file descriptor information this item refers to */
144 struct epoll_filefd ffd; 150 struct epoll_filefd ffd;
145 151
@@ -152,23 +158,11 @@ struct epitem {
152 /* The "container" of this item */ 158 /* The "container" of this item */
153 struct eventpoll *ep; 159 struct eventpoll *ep;
154 160
155 /* The structure that describe the interested events and the source fd */
156 struct epoll_event event;
157
158 /*
159 * Used to keep track of the usage count of the structure. This avoids
160 * that the structure will desappear from underneath our processing.
161 */
162 atomic_t usecnt;
163
164 /* List header used to link this item to the "struct file" items list */ 161 /* List header used to link this item to the "struct file" items list */
165 struct list_head fllink; 162 struct list_head fllink;
166 163
167 /* 164 /* The structure that describe the interested events and the source fd */
168 * Works together "struct eventpoll"->ovflist in keeping the 165 struct epoll_event event;
169 * single linked chain of items.
170 */
171 struct epitem *next;
172}; 166};
173 167
174/* 168/*
@@ -178,7 +172,7 @@ struct epitem {
178 */ 172 */
179struct eventpoll { 173struct eventpoll {
180 /* Protect the this structure access */ 174 /* Protect the this structure access */
181 rwlock_t lock; 175 spinlock_t lock;
182 176
183 /* 177 /*
184 * This mutex is used to ensure that files are not removed 178 * This mutex is used to ensure that files are not removed
@@ -394,78 +388,11 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
394} 388}
395 389
396/* 390/*
397 * Unlink the "struct epitem" from all places it might have been hooked up.
398 * This function must be called with write IRQ lock on "ep->lock".
399 */
400static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
401{
402 int error;
403
404 /*
405 * It can happen that this one is called for an item already unlinked.
406 * The check protect us from doing a double unlink ( crash ).
407 */
408 error = -ENOENT;
409 if (!ep_rb_linked(&epi->rbn))
410 goto error_return;
411
412 /*
413 * Clear the event mask for the unlinked item. This will avoid item
414 * notifications to be sent after the unlink operation from inside
415 * the kernel->userspace event transfer loop.
416 */
417 epi->event.events = 0;
418
419 /*
420 * At this point is safe to do the job, unlink the item from our rb-tree.
421 * This operation togheter with the above check closes the door to
422 * double unlinks.
423 */
424 ep_rb_erase(&epi->rbn, &ep->rbr);
425
426 /*
427 * If the item we are going to remove is inside the ready file descriptors
428 * we want to remove it from this list to avoid stale events.
429 */
430 if (ep_is_linked(&epi->rdllink))
431 list_del_init(&epi->rdllink);
432
433 error = 0;
434error_return:
435
436 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
437 current, ep, epi->ffd.file, error));
438
439 return error;
440}
441
442/*
443 * Increment the usage count of the "struct epitem" making it sure
444 * that the user will have a valid pointer to reference.
445 */
446static void ep_use_epitem(struct epitem *epi)
447{
448 atomic_inc(&epi->usecnt);
449}
450
451/*
452 * Decrement ( release ) the usage count by signaling that the user
453 * has finished using the structure. It might lead to freeing the
454 * structure itself if the count goes to zero.
455 */
456static void ep_release_epitem(struct epitem *epi)
457{
458 if (atomic_dec_and_test(&epi->usecnt))
459 kmem_cache_free(epi_cache, epi);
460}
461
462/*
463 * Removes a "struct epitem" from the eventpoll RB tree and deallocates 391 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
464 * all the associated resources. 392 * all the associated resources. Must be called with "mtx" held.
465 */ 393 */
466static int ep_remove(struct eventpoll *ep, struct epitem *epi) 394static int ep_remove(struct eventpoll *ep, struct epitem *epi)
467{ 395{
468 int error;
469 unsigned long flags; 396 unsigned long flags;
470 struct file *file = epi->ffd.file; 397 struct file *file = epi->ffd.file;
471 398
@@ -485,26 +412,21 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
485 list_del_init(&epi->fllink); 412 list_del_init(&epi->fllink);
486 spin_unlock(&file->f_ep_lock); 413 spin_unlock(&file->f_ep_lock);
487 414
488 /* We need to acquire the write IRQ lock before calling ep_unlink() */ 415 if (ep_rb_linked(&epi->rbn))
489 write_lock_irqsave(&ep->lock, flags); 416 ep_rb_erase(&epi->rbn, &ep->rbr);
490
491 /* Really unlink the item from the RB tree */
492 error = ep_unlink(ep, epi);
493
494 write_unlock_irqrestore(&ep->lock, flags);
495 417
496 if (error) 418 spin_lock_irqsave(&ep->lock, flags);
497 goto error_return; 419 if (ep_is_linked(&epi->rdllink))
420 list_del_init(&epi->rdllink);
421 spin_unlock_irqrestore(&ep->lock, flags);
498 422
499 /* At this point it is safe to free the eventpoll item */ 423 /* At this point it is safe to free the eventpoll item */
500 ep_release_epitem(epi); 424 kmem_cache_free(epi_cache, epi);
501 425
502 error = 0; 426 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
503error_return: 427 current, ep, file));
504 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n",
505 current, ep, file, error));
506 428
507 return error; 429 return 0;
508} 430}
509 431
510static void ep_free(struct eventpoll *ep) 432static void ep_free(struct eventpoll *ep)
@@ -574,10 +496,10 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
574 poll_wait(file, &ep->poll_wait, wait); 496 poll_wait(file, &ep->poll_wait, wait);
575 497
576 /* Check our condition */ 498 /* Check our condition */
577 read_lock_irqsave(&ep->lock, flags); 499 spin_lock_irqsave(&ep->lock, flags);
578 if (!list_empty(&ep->rdllist)) 500 if (!list_empty(&ep->rdllist))
579 pollflags = POLLIN | POLLRDNORM; 501 pollflags = POLLIN | POLLRDNORM;
580 read_unlock_irqrestore(&ep->lock, flags); 502 spin_unlock_irqrestore(&ep->lock, flags);
581 503
582 return pollflags; 504 return pollflags;
583} 505}
@@ -636,7 +558,7 @@ static int ep_alloc(struct eventpoll **pep)
636 if (!ep) 558 if (!ep)
637 return -ENOMEM; 559 return -ENOMEM;
638 560
639 rwlock_init(&ep->lock); 561 spin_lock_init(&ep->lock);
640 mutex_init(&ep->mtx); 562 mutex_init(&ep->mtx);
641 init_waitqueue_head(&ep->wq); 563 init_waitqueue_head(&ep->wq);
642 init_waitqueue_head(&ep->poll_wait); 564 init_waitqueue_head(&ep->poll_wait);
@@ -652,20 +574,18 @@ static int ep_alloc(struct eventpoll **pep)
652} 574}
653 575
654/* 576/*
655 * Search the file inside the eventpoll tree. It add usage count to 577 * Search the file inside the eventpoll tree. The RB tree operations
656 * the returned item, so the caller must call ep_release_epitem() 578 * are protected by the "mtx" mutex, and ep_find() must be called with
657 * after finished using the "struct epitem". 579 * "mtx" held.
658 */ 580 */
659static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) 581static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
660{ 582{
661 int kcmp; 583 int kcmp;
662 unsigned long flags;
663 struct rb_node *rbp; 584 struct rb_node *rbp;
664 struct epitem *epi, *epir = NULL; 585 struct epitem *epi, *epir = NULL;
665 struct epoll_filefd ffd; 586 struct epoll_filefd ffd;
666 587
667 ep_set_ffd(&ffd, file, fd); 588 ep_set_ffd(&ffd, file, fd);
668 read_lock_irqsave(&ep->lock, flags);
669 for (rbp = ep->rbr.rb_node; rbp; ) { 589 for (rbp = ep->rbr.rb_node; rbp; ) {
670 epi = rb_entry(rbp, struct epitem, rbn); 590 epi = rb_entry(rbp, struct epitem, rbn);
671 kcmp = ep_cmp_ffd(&ffd, &epi->ffd); 591 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
@@ -674,12 +594,10 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
674 else if (kcmp < 0) 594 else if (kcmp < 0)
675 rbp = rbp->rb_left; 595 rbp = rbp->rb_left;
676 else { 596 else {
677 ep_use_epitem(epi);
678 epir = epi; 597 epir = epi;
679 break; 598 break;
680 } 599 }
681 } 600 }
682 read_unlock_irqrestore(&ep->lock, flags);
683 601
684 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n", 602 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
685 current, file, epir)); 603 current, file, epir));
@@ -702,7 +620,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
702 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", 620 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
703 current, epi->ffd.file, epi, ep)); 621 current, epi->ffd.file, epi, ep));
704 622
705 write_lock_irqsave(&ep->lock, flags); 623 spin_lock_irqsave(&ep->lock, flags);
706 624
707 /* 625 /*
708 * If the event mask does not contain any poll(2) event, we consider the 626 * If the event mask does not contain any poll(2) event, we consider the
@@ -745,7 +663,7 @@ is_linked:
745 pwake++; 663 pwake++;
746 664
747out_unlock: 665out_unlock:
748 write_unlock_irqrestore(&ep->lock, flags); 666 spin_unlock_irqrestore(&ep->lock, flags);
749 667
750 /* We have to call this outside the lock */ 668 /* We have to call this outside the lock */
751 if (pwake) 669 if (pwake)
@@ -796,6 +714,9 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
796 rb_insert_color(&epi->rbn, &ep->rbr); 714 rb_insert_color(&epi->rbn, &ep->rbr);
797} 715}
798 716
717/*
718 * Must be called with "mtx" held.
719 */
799static int ep_insert(struct eventpoll *ep, struct epoll_event *event, 720static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
800 struct file *tfile, int fd) 721 struct file *tfile, int fd)
801{ 722{
@@ -816,7 +737,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
816 epi->ep = ep; 737 epi->ep = ep;
817 ep_set_ffd(&epi->ffd, tfile, fd); 738 ep_set_ffd(&epi->ffd, tfile, fd);
818 epi->event = *event; 739 epi->event = *event;
819 atomic_set(&epi->usecnt, 1);
820 epi->nwait = 0; 740 epi->nwait = 0;
821 epi->next = EP_UNACTIVE_PTR; 741 epi->next = EP_UNACTIVE_PTR;
822 742
@@ -827,7 +747,9 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
827 /* 747 /*
828 * Attach the item to the poll hooks and get current event bits. 748 * Attach the item to the poll hooks and get current event bits.
829 * We can safely use the file* here because its usage count has 749 * We can safely use the file* here because its usage count has
830 * been increased by the caller of this function. 750 * been increased by the caller of this function. Note that after
751 * this operation completes, the poll callback can start hitting
752 * the new item.
831 */ 753 */
832 revents = tfile->f_op->poll(tfile, &epq.pt); 754 revents = tfile->f_op->poll(tfile, &epq.pt);
833 755
@@ -844,12 +766,15 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
844 list_add_tail(&epi->fllink, &tfile->f_ep_links); 766 list_add_tail(&epi->fllink, &tfile->f_ep_links);
845 spin_unlock(&tfile->f_ep_lock); 767 spin_unlock(&tfile->f_ep_lock);
846 768
847 /* We have to drop the new item inside our item list to keep track of it */ 769 /*
848 write_lock_irqsave(&ep->lock, flags); 770 * Add the current item to the RB tree. All RB tree operations are
849 771 * protected by "mtx", and ep_insert() is called with "mtx" held.
850 /* Add the current item to the rb-tree */ 772 */
851 ep_rbtree_insert(ep, epi); 773 ep_rbtree_insert(ep, epi);
852 774
775 /* We have to drop the new item inside our item list to keep track of it */
776 spin_lock_irqsave(&ep->lock, flags);
777
853 /* If the file is already "ready" we drop it inside the ready list */ 778 /* If the file is already "ready" we drop it inside the ready list */
854 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { 779 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
855 list_add_tail(&epi->rdllink, &ep->rdllist); 780 list_add_tail(&epi->rdllink, &ep->rdllist);
@@ -861,7 +786,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
861 pwake++; 786 pwake++;
862 } 787 }
863 788
864 write_unlock_irqrestore(&ep->lock, flags); 789 spin_unlock_irqrestore(&ep->lock, flags);
865 790
866 /* We have to call this outside the lock */ 791 /* We have to call this outside the lock */
867 if (pwake) 792 if (pwake)
@@ -879,10 +804,10 @@ error_unregister:
879 * We need to do this because an event could have been arrived on some 804 * We need to do this because an event could have been arrived on some
880 * allocated wait queue. 805 * allocated wait queue.
881 */ 806 */
882 write_lock_irqsave(&ep->lock, flags); 807 spin_lock_irqsave(&ep->lock, flags);
883 if (ep_is_linked(&epi->rdllink)) 808 if (ep_is_linked(&epi->rdllink))
884 list_del_init(&epi->rdllink); 809 list_del_init(&epi->rdllink);
885 write_unlock_irqrestore(&ep->lock, flags); 810 spin_unlock_irqrestore(&ep->lock, flags);
886 811
887 kmem_cache_free(epi_cache, epi); 812 kmem_cache_free(epi_cache, epi);
888error_return: 813error_return:
@@ -891,7 +816,7 @@ error_return:
891 816
892/* 817/*
893 * Modify the interest event mask by dropping an event if the new mask 818 * Modify the interest event mask by dropping an event if the new mask
894 * has a match in the current file status. 819 * has a match in the current file status. Must be called with "mtx" held.
895 */ 820 */
896static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) 821static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
897{ 822{
@@ -913,36 +838,29 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
913 */ 838 */
914 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); 839 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
915 840
916 write_lock_irqsave(&ep->lock, flags); 841 spin_lock_irqsave(&ep->lock, flags);
917 842
918 /* Copy the data member from inside the lock */ 843 /* Copy the data member from inside the lock */
919 epi->event.data = event->data; 844 epi->event.data = event->data;
920 845
921 /* 846 /*
922 * If the item is not linked to the RB tree it means that it's on its 847 * If the item is "hot" and it is not registered inside the ready
923 * way toward the removal. Do nothing in this case. 848 * list, push it inside. If the item is not "hot" and it is currently
849 * registered inside the ready list, unlink it.
924 */ 850 */
925 if (ep_rb_linked(&epi->rbn)) { 851 if (revents & event->events) {
926 /* 852 if (!ep_is_linked(&epi->rdllink)) {
927 * If the item is "hot" and it is not registered inside the ready 853 list_add_tail(&epi->rdllink, &ep->rdllist);
928 * list, push it inside. If the item is not "hot" and it is currently 854
929 * registered inside the ready list, unlink it. 855 /* Notify waiting tasks that events are available */
930 */ 856 if (waitqueue_active(&ep->wq))
931 if (revents & event->events) { 857 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
932 if (!ep_is_linked(&epi->rdllink)) { 858 TASK_INTERRUPTIBLE);
933 list_add_tail(&epi->rdllink, &ep->rdllist); 859 if (waitqueue_active(&ep->poll_wait))
934 860 pwake++;
935 /* Notify waiting tasks that events are available */
936 if (waitqueue_active(&ep->wq))
937 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
938 TASK_INTERRUPTIBLE);
939 if (waitqueue_active(&ep->poll_wait))
940 pwake++;
941 }
942 } 861 }
943 } 862 }
944 863 spin_unlock_irqrestore(&ep->lock, flags);
945 write_unlock_irqrestore(&ep->lock, flags);
946 864
947 /* We have to call this outside the lock */ 865 /* We have to call this outside the lock */
948 if (pwake) 866 if (pwake)
@@ -975,11 +893,11 @@ static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *event
975 * have the poll callback to queue directly on ep->rdllist, 893 * have the poll callback to queue directly on ep->rdllist,
976 * because we are doing it in the loop below, in a lockless way. 894 * because we are doing it in the loop below, in a lockless way.
977 */ 895 */
978 write_lock_irqsave(&ep->lock, flags); 896 spin_lock_irqsave(&ep->lock, flags);
979 list_splice(&ep->rdllist, &txlist); 897 list_splice(&ep->rdllist, &txlist);
980 INIT_LIST_HEAD(&ep->rdllist); 898 INIT_LIST_HEAD(&ep->rdllist);
981 ep->ovflist = NULL; 899 ep->ovflist = NULL;
982 write_unlock_irqrestore(&ep->lock, flags); 900 spin_unlock_irqrestore(&ep->lock, flags);
983 901
984 /* 902 /*
985 * We can loop without lock because this is a task private list. 903 * We can loop without lock because this is a task private list.
@@ -1028,7 +946,7 @@ static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *event
1028 946
1029errxit: 947errxit:
1030 948
1031 write_lock_irqsave(&ep->lock, flags); 949 spin_lock_irqsave(&ep->lock, flags);
1032 /* 950 /*
1033 * During the time we spent in the loop above, some other events 951 * During the time we spent in the loop above, some other events
1034 * might have been queued by the poll callback. We re-insert them 952 * might have been queued by the poll callback. We re-insert them
@@ -1064,7 +982,7 @@ errxit:
1064 if (waitqueue_active(&ep->poll_wait)) 982 if (waitqueue_active(&ep->poll_wait))
1065 pwake++; 983 pwake++;
1066 } 984 }
1067 write_unlock_irqrestore(&ep->lock, flags); 985 spin_unlock_irqrestore(&ep->lock, flags);
1068 986
1069 mutex_unlock(&ep->mtx); 987 mutex_unlock(&ep->mtx);
1070 988
@@ -1092,7 +1010,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1092 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; 1010 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
1093 1011
1094retry: 1012retry:
1095 write_lock_irqsave(&ep->lock, flags); 1013 spin_lock_irqsave(&ep->lock, flags);
1096 1014
1097 res = 0; 1015 res = 0;
1098 if (list_empty(&ep->rdllist)) { 1016 if (list_empty(&ep->rdllist)) {
@@ -1119,9 +1037,9 @@ retry:
1119 break; 1037 break;
1120 } 1038 }
1121 1039
1122 write_unlock_irqrestore(&ep->lock, flags); 1040 spin_unlock_irqrestore(&ep->lock, flags);
1123 jtimeout = schedule_timeout(jtimeout); 1041 jtimeout = schedule_timeout(jtimeout);
1124 write_lock_irqsave(&ep->lock, flags); 1042 spin_lock_irqsave(&ep->lock, flags);
1125 } 1043 }
1126 __remove_wait_queue(&ep->wq, &wait); 1044 __remove_wait_queue(&ep->wq, &wait);
1127 1045
@@ -1131,7 +1049,7 @@ retry:
1131 /* Is it worth to try to dig for events ? */ 1049 /* Is it worth to try to dig for events ? */
1132 eavail = !list_empty(&ep->rdllist); 1050 eavail = !list_empty(&ep->rdllist);
1133 1051
1134 write_unlock_irqrestore(&ep->lock, flags); 1052 spin_unlock_irqrestore(&ep->lock, flags);
1135 1053
1136 /* 1054 /*
1137 * Try to transfer events to user space. In case we get 0 events and 1055 * Try to transfer events to user space. In case we get 0 events and
@@ -1276,12 +1194,6 @@ asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1276 error = -ENOENT; 1194 error = -ENOENT;
1277 break; 1195 break;
1278 } 1196 }
1279 /*
1280 * The function ep_find() increments the usage count of the structure
1281 * so, if this is not NULL, we need to release it.
1282 */
1283 if (epi)
1284 ep_release_epitem(epi);
1285 mutex_unlock(&ep->mtx); 1197 mutex_unlock(&ep->mtx);
1286 1198
1287error_tgt_fput: 1199error_tgt_fput:
@@ -1388,7 +1300,7 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
1388 if (sigmask) { 1300 if (sigmask) {
1389 if (error == -EINTR) { 1301 if (error == -EINTR) {
1390 memcpy(&current->saved_sigmask, &sigsaved, 1302 memcpy(&current->saved_sigmask, &sigsaved,
1391 sizeof(sigsaved)); 1303 sizeof(sigsaved));
1392 set_thread_flag(TIF_RESTORE_SIGMASK); 1304 set_thread_flag(TIF_RESTORE_SIGMASK);
1393 } else 1305 } else
1394 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1306 sigprocmask(SIG_SETMASK, &sigsaved, NULL);