aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_timer.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2017-07-29 21:57:18 -0400
committerDavid S. Miller <davem@davemloft.net>2017-07-31 17:37:49 -0400
commite7942d0633c47c791ece6afa038be9cf977226de (patch)
tree27dddb46a5358137f6cb6e63bddab14a77a840ec /net/ipv4/tcp_timer.c
parent764646b08d09d29adced740c26447ecdaabc9088 (diff)
tcp: remove prequeue support
prequeue is a tcp receive optimization that moves part of rx processing from bh to process context. This only works if the socket being processed belongs to a process that is blocked in recv on that socket. In practice, this doesn't happen anymore that often because nowadays servers tend to use an event driven (epoll) model. Even normal client applications (web browsers) commonly use many tcp connections in parallel. This has measureable impact only in netperf (which uses plain recv and thus allows prequeue use) from host to locally running vm (~4%), however, there were no changes when using netperf between two physical hosts with ixgbe interfaces. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r--net/ipv4/tcp_timer.c12
1 files changed, 0 insertions, 12 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c0feeeef962a..f753f9d2fee3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -239,7 +239,6 @@ static int tcp_write_timeout(struct sock *sk)
239/* Called with BH disabled */ 239/* Called with BH disabled */
240void tcp_delack_timer_handler(struct sock *sk) 240void tcp_delack_timer_handler(struct sock *sk)
241{ 241{
242 struct tcp_sock *tp = tcp_sk(sk);
243 struct inet_connection_sock *icsk = inet_csk(sk); 242 struct inet_connection_sock *icsk = inet_csk(sk);
244 243
245 sk_mem_reclaim_partial(sk); 244 sk_mem_reclaim_partial(sk);
@@ -254,17 +253,6 @@ void tcp_delack_timer_handler(struct sock *sk)
254 } 253 }
255 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; 254 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
256 255
257 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
258 struct sk_buff *skb;
259
260 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
261
262 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
263 sk_backlog_rcv(sk, skb);
264
265 tp->ucopy.memory = 0;
266 }
267
268 if (inet_csk_ack_scheduled(sk)) { 256 if (inet_csk_ack_scheduled(sk)) {
269 if (!icsk->icsk_ack.pingpong) { 257 if (!icsk->icsk_ack.pingpong) {
270 /* Delayed ACK missed: inflate ATO. */ 258 /* Delayed ACK missed: inflate ATO. */