aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 12:43:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 13:40:47 -0400
commita11e1d432b51f63ba698d044441284a661f01144 (patch)
tree9f3c5a10bf0d7f9a342d5fb39c0c35ea14170124 /net/ipv4/tcp.c
parentf57494321cbf5b1e7769b6135407d2995a369e28 (diff)
Revert changes to convert to ->poll_mask() and aio IOCB_CMD_POLL
The poll() changes were not well thought out, and completely unexplained. They also caused a huge performance regression, because "->poll()" was no longer a trivial file operation that just called down to the underlying file operations, but instead did at least two indirect calls. Indirect calls are sadly slow now with the Spectre mitigation, but the performance problem could at least be largely mitigated by changing the "->get_poll_head()" operation to just have a per-file-descriptor pointer to the poll head instead. That gets rid of one of the new indirections. But that doesn't fix the new complexity that is completely unwarranted for the regular case. The (undocumented) reason for the poll() changes was some alleged AIO poll race fixing, but we don't make the common case slower and more complex for some uncommon special case, so this all really needs way more explanations and most likely a fundamental redesign. [ This revert is a revert of about 30 different commits, not reverted individually because that would just be unnecessarily messy - Linus ] Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 141acd92e58a..e7b53d2a971f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
494} 494}
495 495
496/* 496/*
497 * Socket is not locked. We are protected from async events by poll logic and 497 * Wait for a TCP event.
498 * correct handling of state changes made by other threads is impossible in 498 *
499 * any case. 499 * Note that we don't need to lock the socket, as the upper poll layers
500 * take care of normal races (between the test and the event) and we don't
501 * go look at any of the socket buffers directly.
500 */ 502 */
501__poll_t tcp_poll_mask(struct socket *sock, __poll_t events) 503__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
502{ 504{
505 __poll_t mask;
503 struct sock *sk = sock->sk; 506 struct sock *sk = sock->sk;
504 const struct tcp_sock *tp = tcp_sk(sk); 507 const struct tcp_sock *tp = tcp_sk(sk);
505 __poll_t mask = 0;
506 int state; 508 int state;
507 509
510 sock_poll_wait(file, sk_sleep(sk), wait);
511
508 state = inet_sk_state_load(sk); 512 state = inet_sk_state_load(sk);
509 if (state == TCP_LISTEN) 513 if (state == TCP_LISTEN)
510 return inet_csk_listen_poll(sk); 514 return inet_csk_listen_poll(sk);
511 515
516 /* Socket is not locked. We are protected from async events
517 * by poll logic and correct handling of state changes
518 * made by other threads is impossible in any case.
519 */
520
521 mask = 0;
522
512 /* 523 /*
513 * EPOLLHUP is certainly not done right. But poll() doesn't 524 * EPOLLHUP is certainly not done right. But poll() doesn't
514 * have a notion of HUP in just one direction, and for a 525 * have a notion of HUP in just one direction, and for a
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
589 600
590 return mask; 601 return mask;
591} 602}
592EXPORT_SYMBOL(tcp_poll_mask); 603EXPORT_SYMBOL(tcp_poll);
593 604
594int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 605int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
595{ 606{