aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2017-07-29 21:57:18 -0400
committerDavid S. Miller <davem@davemloft.net>2017-07-31 17:37:49 -0400
commite7942d0633c47c791ece6afa038be9cf977226de (patch)
tree27dddb46a5358137f6cb6e63bddab14a77a840ec /net/ipv4/tcp_input.c
parent764646b08d09d29adced740c26447ecdaabc9088 (diff)
tcp: remove prequeue support
prequeue is a tcp receive optimization that moves part of rx processing from bh to process context. This only works if the socket being processed belongs to a process that is blocked in recv on that socket. In practice, this doesn't happen anymore that often because nowadays servers tend to use an event driven (epoll) model. Even normal client applications (web browsers) commonly use many tcp connections in parallel. This has measureable impact only in netperf (which uses plain recv and thus allows prequeue use) from host to locally running vm (~4%), however, there were no changes when using netperf between two physical hosts with ixgbe interfaces. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c62
1 files changed, 0 insertions, 62 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index adc3f3e9468c..770ce6cb3eca 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4611,22 +4611,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4611 goto out_of_window; 4611 goto out_of_window;
4612 4612
4613 /* Ok. In sequence. In window. */ 4613 /* Ok. In sequence. In window. */
4614 if (tp->ucopy.task == current &&
4615 tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
4616 sock_owned_by_user(sk) && !tp->urg_data) {
4617 int chunk = min_t(unsigned int, skb->len,
4618 tp->ucopy.len);
4619
4620 __set_current_state(TASK_RUNNING);
4621
4622 if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) {
4623 tp->ucopy.len -= chunk;
4624 tp->copied_seq += chunk;
4625 eaten = (chunk == skb->len);
4626 tcp_rcv_space_adjust(sk);
4627 }
4628 }
4629
4630 if (eaten <= 0) { 4614 if (eaten <= 0) {
4631queue_and_out: 4615queue_and_out:
4632 if (eaten < 0) { 4616 if (eaten < 0) {
@@ -5186,26 +5170,6 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
5186 } 5170 }
5187} 5171}
5188 5172
5189static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
5190{
5191 struct tcp_sock *tp = tcp_sk(sk);
5192 int chunk = skb->len - hlen;
5193 int err;
5194
5195 if (skb_csum_unnecessary(skb))
5196 err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk);
5197 else
5198 err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg);
5199
5200 if (!err) {
5201 tp->ucopy.len -= chunk;
5202 tp->copied_seq += chunk;
5203 tcp_rcv_space_adjust(sk);
5204 }
5205
5206 return err;
5207}
5208
5209/* Accept RST for rcv_nxt - 1 after a FIN. 5173/* Accept RST for rcv_nxt - 1 after a FIN.
5210 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a 5174 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
5211 * FIN is sent followed by a RST packet. The RST is sent with the same 5175 * FIN is sent followed by a RST packet. The RST is sent with the same
@@ -5446,32 +5410,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5446 int eaten = 0; 5410 int eaten = 0;
5447 bool fragstolen = false; 5411 bool fragstolen = false;
5448 5412
5449 if (tp->ucopy.task == current &&
5450 tp->copied_seq == tp->rcv_nxt &&
5451 len - tcp_header_len <= tp->ucopy.len &&
5452 sock_owned_by_user(sk)) {
5453 __set_current_state(TASK_RUNNING);
5454
5455 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
5456 /* Predicted packet is in window by definition.
5457 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5458 * Hence, check seq<=rcv_wup reduces to:
5459 */
5460 if (tcp_header_len ==
5461 (sizeof(struct tcphdr) +
5462 TCPOLEN_TSTAMP_ALIGNED) &&
5463 tp->rcv_nxt == tp->rcv_wup)
5464 tcp_store_ts_recent(tp);
5465
5466 tcp_rcv_rtt_measure_ts(sk, skb);
5467
5468 __skb_pull(skb, tcp_header_len);
5469 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
5470 NET_INC_STATS(sock_net(sk),
5471 LINUX_MIB_TCPHPHITSTOUSER);
5472 eaten = 1;
5473 }
5474 }
5475 if (!eaten) { 5413 if (!eaten) {
5476 if (tcp_checksum_complete(skb)) 5414 if (tcp_checksum_complete(skb))
5477 goto csum_error; 5415 goto csum_error;