aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls/tls_sw.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 12:43:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 13:40:47 -0400
commita11e1d432b51f63ba698d044441284a661f01144 (patch)
tree9f3c5a10bf0d7f9a342d5fb39c0c35ea14170124 /net/tls/tls_sw.c
parentf57494321cbf5b1e7769b6135407d2995a369e28 (diff)
Revert changes to convert to ->poll_mask() and aio IOCB_CMD_POLL
The poll() changes were not well thought out, and completely unexplained. They also caused a huge performance regression, because "->poll()" was no longer a trivial file operation that just called down to the underlying file operations, but instead did at least two indirect calls. Indirect calls are sadly slow now with the Spectre mitigation, but the performance problem could at least be largely mitigated by changing the "->get_poll_head()" operation to just have a per-file-descriptor pointer to the poll head instead. That gets rid of one of the new indirections. But that doesn't fix the new complexity that is completely unwarranted for the regular case. The (undocumented) reason for the poll() changes was some alleged AIO poll race fixing, but we don't make the common case slower and more complex for some uncommon special case, so this all really needs way more explanations and most likely a fundamental redesign. [ This revert is a revert of about 30 different commits, not reverted individually because that would just be unnecessarily messy - Linus ] Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/tls/tls_sw.c')
-rw-r--r--net/tls/tls_sw.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index f127fac88acf..d2380548f8f6 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -919,22 +919,23 @@ splice_read_end:
919 return copied ? : err; 919 return copied ? : err;
920} 920}
921 921
922__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events) 922unsigned int tls_sw_poll(struct file *file, struct socket *sock,
923 struct poll_table_struct *wait)
923{ 924{
925 unsigned int ret;
924 struct sock *sk = sock->sk; 926 struct sock *sk = sock->sk;
925 struct tls_context *tls_ctx = tls_get_ctx(sk); 927 struct tls_context *tls_ctx = tls_get_ctx(sk);
926 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 928 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
927 __poll_t mask;
928 929
929 /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */ 930 /* Grab POLLOUT and POLLHUP from the underlying socket */
930 mask = ctx->sk_poll_mask(sock, events); 931 ret = ctx->sk_poll(file, sock, wait);
931 932
932 /* Clear EPOLLIN bits, and set based on recv_pkt */ 933 /* Clear POLLIN bits, and set based on recv_pkt */
933 mask &= ~(EPOLLIN | EPOLLRDNORM); 934 ret &= ~(POLLIN | POLLRDNORM);
934 if (ctx->recv_pkt) 935 if (ctx->recv_pkt)
935 mask |= EPOLLIN | EPOLLRDNORM; 936 ret |= POLLIN | POLLRDNORM;
936 937
937 return mask; 938 return ret;
938} 939}
939 940
940static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 941static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1191,7 +1192,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1191 sk->sk_data_ready = tls_data_ready; 1192 sk->sk_data_ready = tls_data_ready;
1192 write_unlock_bh(&sk->sk_callback_lock); 1193 write_unlock_bh(&sk->sk_callback_lock);
1193 1194
1194 sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask; 1195 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
1195 1196
1196 strp_check_rcv(&sw_ctx_rx->strp); 1197 strp_check_rcv(&sw_ctx_rx->strp);
1197 } 1198 }