diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-11 16:57:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-11 16:57:19 -0500 |
commit | ee5daa1361fceb6f482c005bcc9ba8d01b92ea5c (patch) | |
tree | 71736f733c7efec13218756ab8635b258b8a1c53 /fs/eventpoll.c | |
parent | 3fc928dc5308f6533d0e8004c8c63b46709a5276 (diff) | |
parent | d85e2aa2e34dac793e70b900d865f48c69ecbc27 (diff) |
Merge branch 'work.poll2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull more poll annotation updates from Al Viro:
"This is preparation to solving the problems you've mentioned in the
original poll series.
After this series, the kernel is ready for running
for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do
L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'`
for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done
done
as a for bulk search-and-replace.
After that, the kernel is ready to apply the patch to unify
{de,}mangle_poll(), and then get rid of kernel-side POLL... uses
entirely, and we should be all done with that stuff.
Basically, that's what you suggested wrt KPOLL..., except that we can
use EPOLL... instead - they already are arch-independent (and equal to
what is currently kernel-side POLL...).
After the preparations (in this series) switch to returning EPOLL...
from ->poll() instances is completely mechanical and kernel-side
POLL... can go away. The last step (killing kernel-side POLL... and
unifying {de,}mangle_poll() has to be done after the
search-and-replace job, since we need userland-side POLL... for
unified {de,}mangle_poll(), thus the cherry-pick at the last step.
After that we will have:
- POLL{IN,OUT,...} *not* in __poll_t, so any stray instances of
->poll() still using those will be caught by sparse.
- eventpoll.c and select.c warning-free wrt __poll_t
- no more kernel-side definitions of POLL... - userland ones are
visible through the entire kernel (and used pretty much only for
mangle/demangle)
- same behavior as after the first series (i.e. sparc et.al. epoll(2)
working correctly)"
* 'work.poll2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
annotate ep_scan_ready_list()
ep_send_events_proc(): return result via esed->res
preparation to switching ->poll() to returning EPOLL...
add EPOLLNVAL, annotate EPOLL... and event_poll->event
use linux/poll.h instead of asm/poll.h
xen: fix poll misannotation
smc: missing poll annotations
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 42e35a6977c9..d1a490c7e6c3 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -260,6 +260,7 @@ struct ep_pqueue { | |||
260 | struct ep_send_events_data { | 260 | struct ep_send_events_data { |
261 | int maxevents; | 261 | int maxevents; |
262 | struct epoll_event __user *events; | 262 | struct epoll_event __user *events; |
263 | int res; | ||
263 | }; | 264 | }; |
264 | 265 | ||
265 | /* | 266 | /* |
@@ -660,12 +661,13 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi) | |||
660 | * | 661 | * |
661 | * Returns: The same integer error code returned by the @sproc callback. | 662 | * Returns: The same integer error code returned by the @sproc callback. |
662 | */ | 663 | */ |
663 | static int ep_scan_ready_list(struct eventpoll *ep, | 664 | static __poll_t ep_scan_ready_list(struct eventpoll *ep, |
664 | int (*sproc)(struct eventpoll *, | 665 | __poll_t (*sproc)(struct eventpoll *, |
665 | struct list_head *, void *), | 666 | struct list_head *, void *), |
666 | void *priv, int depth, bool ep_locked) | 667 | void *priv, int depth, bool ep_locked) |
667 | { | 668 | { |
668 | int error, pwake = 0; | 669 | __poll_t res; |
670 | int pwake = 0; | ||
669 | unsigned long flags; | 671 | unsigned long flags; |
670 | struct epitem *epi, *nepi; | 672 | struct epitem *epi, *nepi; |
671 | LIST_HEAD(txlist); | 673 | LIST_HEAD(txlist); |
@@ -694,7 +696,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, | |||
694 | /* | 696 | /* |
695 | * Now call the callback function. | 697 | * Now call the callback function. |
696 | */ | 698 | */ |
697 | error = (*sproc)(ep, &txlist, priv); | 699 | res = (*sproc)(ep, &txlist, priv); |
698 | 700 | ||
699 | spin_lock_irqsave(&ep->lock, flags); | 701 | spin_lock_irqsave(&ep->lock, flags); |
700 | /* | 702 | /* |
@@ -747,7 +749,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, | |||
747 | if (pwake) | 749 | if (pwake) |
748 | ep_poll_safewake(&ep->poll_wait); | 750 | ep_poll_safewake(&ep->poll_wait); |
749 | 751 | ||
750 | return error; | 752 | return res; |
751 | } | 753 | } |
752 | 754 | ||
753 | static void epi_rcu_free(struct rcu_head *head) | 755 | static void epi_rcu_free(struct rcu_head *head) |
@@ -864,7 +866,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file) | |||
864 | return 0; | 866 | return 0; |
865 | } | 867 | } |
866 | 868 | ||
867 | static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, | 869 | static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, |
868 | void *priv); | 870 | void *priv); |
869 | static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, | 871 | static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, |
870 | poll_table *pt); | 872 | poll_table *pt); |
@@ -874,7 +876,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, | |||
874 | * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested() | 876 | * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested() |
875 | * is correctly annotated. | 877 | * is correctly annotated. |
876 | */ | 878 | */ |
877 | static unsigned int ep_item_poll(const struct epitem *epi, poll_table *pt, | 879 | static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, |
878 | int depth) | 880 | int depth) |
879 | { | 881 | { |
880 | struct eventpoll *ep; | 882 | struct eventpoll *ep; |
@@ -894,7 +896,7 @@ static unsigned int ep_item_poll(const struct epitem *epi, poll_table *pt, | |||
894 | locked) & epi->event.events; | 896 | locked) & epi->event.events; |
895 | } | 897 | } |
896 | 898 | ||
897 | static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, | 899 | static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, |
898 | void *priv) | 900 | void *priv) |
899 | { | 901 | { |
900 | struct epitem *epi, *tmp; | 902 | struct epitem *epi, *tmp; |
@@ -1414,7 +1416,8 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi) | |||
1414 | static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, | 1416 | static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, |
1415 | struct file *tfile, int fd, int full_check) | 1417 | struct file *tfile, int fd, int full_check) |
1416 | { | 1418 | { |
1417 | int error, revents, pwake = 0; | 1419 | int error, pwake = 0; |
1420 | __poll_t revents; | ||
1418 | unsigned long flags; | 1421 | unsigned long flags; |
1419 | long user_watches; | 1422 | long user_watches; |
1420 | struct epitem *epi; | 1423 | struct epitem *epi; |
@@ -1612,12 +1615,11 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, | |||
1612 | return 0; | 1615 | return 0; |
1613 | } | 1616 | } |
1614 | 1617 | ||
1615 | static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | 1618 | static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head, |
1616 | void *priv) | 1619 | void *priv) |
1617 | { | 1620 | { |
1618 | struct ep_send_events_data *esed = priv; | 1621 | struct ep_send_events_data *esed = priv; |
1619 | int eventcnt; | 1622 | __poll_t revents; |
1620 | unsigned int revents; | ||
1621 | struct epitem *epi; | 1623 | struct epitem *epi; |
1622 | struct epoll_event __user *uevent; | 1624 | struct epoll_event __user *uevent; |
1623 | struct wakeup_source *ws; | 1625 | struct wakeup_source *ws; |
@@ -1630,8 +1632,8 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | |||
1630 | * Items cannot vanish during the loop because ep_scan_ready_list() is | 1632 | * Items cannot vanish during the loop because ep_scan_ready_list() is |
1631 | * holding "mtx" during this call. | 1633 | * holding "mtx" during this call. |
1632 | */ | 1634 | */ |
1633 | for (eventcnt = 0, uevent = esed->events; | 1635 | for (esed->res = 0, uevent = esed->events; |
1634 | !list_empty(head) && eventcnt < esed->maxevents;) { | 1636 | !list_empty(head) && esed->res < esed->maxevents;) { |
1635 | epi = list_first_entry(head, struct epitem, rdllink); | 1637 | epi = list_first_entry(head, struct epitem, rdllink); |
1636 | 1638 | ||
1637 | /* | 1639 | /* |
@@ -1665,9 +1667,11 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | |||
1665 | __put_user(epi->event.data, &uevent->data)) { | 1667 | __put_user(epi->event.data, &uevent->data)) { |
1666 | list_add(&epi->rdllink, head); | 1668 | list_add(&epi->rdllink, head); |
1667 | ep_pm_stay_awake(epi); | 1669 | ep_pm_stay_awake(epi); |
1668 | return eventcnt ? eventcnt : -EFAULT; | 1670 | if (!esed->res) |
1671 | esed->res = -EFAULT; | ||
1672 | return 0; | ||
1669 | } | 1673 | } |
1670 | eventcnt++; | 1674 | esed->res++; |
1671 | uevent++; | 1675 | uevent++; |
1672 | if (epi->event.events & EPOLLONESHOT) | 1676 | if (epi->event.events & EPOLLONESHOT) |
1673 | epi->event.events &= EP_PRIVATE_BITS; | 1677 | epi->event.events &= EP_PRIVATE_BITS; |
@@ -1689,7 +1693,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | |||
1689 | } | 1693 | } |
1690 | } | 1694 | } |
1691 | 1695 | ||
1692 | return eventcnt; | 1696 | return 0; |
1693 | } | 1697 | } |
1694 | 1698 | ||
1695 | static int ep_send_events(struct eventpoll *ep, | 1699 | static int ep_send_events(struct eventpoll *ep, |
@@ -1700,7 +1704,8 @@ static int ep_send_events(struct eventpoll *ep, | |||
1700 | esed.maxevents = maxevents; | 1704 | esed.maxevents = maxevents; |
1701 | esed.events = events; | 1705 | esed.events = events; |
1702 | 1706 | ||
1703 | return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); | 1707 | ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); |
1708 | return esed.res; | ||
1704 | } | 1709 | } |
1705 | 1710 | ||
1706 | static inline struct timespec64 ep_set_mstimeout(long ms) | 1711 | static inline struct timespec64 ep_set_mstimeout(long ms) |