aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 12:43:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 13:40:47 -0400
commita11e1d432b51f63ba698d044441284a661f01144 (patch)
tree9f3c5a10bf0d7f9a342d5fb39c0c35ea14170124 /net/sctp/socket.c
parentf57494321cbf5b1e7769b6135407d2995a369e28 (diff)
Revert changes to convert to ->poll_mask() and aio IOCB_CMD_POLL
The poll() changes were not well thought out, and completely unexplained. They also caused a huge performance regression, because "->poll()" was no longer a trivial file operation that just called down to the underlying file operations, but instead did at least two indirect calls. Indirect calls are sadly slow now with the Spectre mitigation, but the performance problem could at least be largely mitigated by changing the "->get_poll_head()" operation to just have a per-file-descriptor pointer to the poll head instead. That gets rid of one of the new indirections. But that doesn't fix the new complexity that is completely unwarranted for the regular case. The (undocumented) reason for the poll() changes was some alleged AIO poll race fixing, but we don't make the common case slower and more complex for some uncommon special case, so this all really needs way more explanations and most likely a fundamental redesign. [ This revert is a revert of about 30 different commits, not reverted individually because that would just be unnecessarily messy - Linus ] Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d20f7addee19..ce620e878538 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7717,12 +7717,14 @@ out:
7717 * here, again, by modeling the current TCP/UDP code. We don't have 7717 * here, again, by modeling the current TCP/UDP code. We don't have
7718 * a good way to test with it yet. 7718 * a good way to test with it yet.
7719 */ 7719 */
7720__poll_t sctp_poll_mask(struct socket *sock, __poll_t events) 7720__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
7721{ 7721{
7722 struct sock *sk = sock->sk; 7722 struct sock *sk = sock->sk;
7723 struct sctp_sock *sp = sctp_sk(sk); 7723 struct sctp_sock *sp = sctp_sk(sk);
7724 __poll_t mask; 7724 __poll_t mask;
7725 7725
7726 poll_wait(file, sk_sleep(sk), wait);
7727
7726 sock_rps_record_flow(sk); 7728 sock_rps_record_flow(sk);
7727 7729
7728 /* A TCP-style listening socket becomes readable when the accept queue 7730 /* A TCP-style listening socket becomes readable when the accept queue