aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c33
1 files changed, 26 insertions, 7 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8e5522c6833a..814ea43dd12f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -78,6 +78,7 @@
78#include <linux/errqueue.h> 78#include <linux/errqueue.h>
79#include <trace/events/tcp.h> 79#include <trace/events/tcp.h>
80#include <linux/static_key.h> 80#include <linux/static_key.h>
81#include <net/busy_poll.h>
81 82
82int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 83int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
83 84
@@ -582,9 +583,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
582{ 583{
583 struct tcp_sock *tp = tcp_sk(sk); 584 struct tcp_sock *tp = tcp_sk(sk);
584 585
585 if (tp->rx_opt.rcv_tsecr && 586 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr)
586 (TCP_SKB_CB(skb)->end_seq - 587 return;
587 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { 588 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
589
590 if (TCP_SKB_CB(skb)->end_seq -
591 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
588 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 592 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
589 u32 delta_us; 593 u32 delta_us;
590 594
@@ -4617,8 +4621,10 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4617 skb->data_len = data_len; 4621 skb->data_len = data_len;
4618 skb->len = size; 4622 skb->len = size;
4619 4623
4620 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4624 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
4625 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
4621 goto err_free; 4626 goto err_free;
4627 }
4622 4628
4623 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); 4629 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
4624 if (err) 4630 if (err)
@@ -4674,15 +4680,19 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4674 * Out of sequence packets to the out_of_order_queue. 4680 * Out of sequence packets to the out_of_order_queue.
4675 */ 4681 */
4676 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 4682 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
4677 if (tcp_receive_window(tp) == 0) 4683 if (tcp_receive_window(tp) == 0) {
4684 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
4678 goto out_of_window; 4685 goto out_of_window;
4686 }
4679 4687
4680 /* Ok. In sequence. In window. */ 4688 /* Ok. In sequence. In window. */
4681queue_and_out: 4689queue_and_out:
4682 if (skb_queue_len(&sk->sk_receive_queue) == 0) 4690 if (skb_queue_len(&sk->sk_receive_queue) == 0)
4683 sk_forced_mem_schedule(sk, skb->truesize); 4691 sk_forced_mem_schedule(sk, skb->truesize);
4684 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4692 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
4693 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
4685 goto drop; 4694 goto drop;
4695 }
4686 4696
4687 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4697 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
4688 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4698 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
@@ -4741,8 +4751,10 @@ drop:
4741 /* If window is closed, drop tail of packet. But after 4751 /* If window is closed, drop tail of packet. But after
4742 * remembering D-SACK for its head made in previous line. 4752 * remembering D-SACK for its head made in previous line.
4743 */ 4753 */
4744 if (!tcp_receive_window(tp)) 4754 if (!tcp_receive_window(tp)) {
4755 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
4745 goto out_of_window; 4756 goto out_of_window;
4757 }
4746 goto queue_and_out; 4758 goto queue_and_out;
4747 } 4759 }
4748 4760
@@ -5484,6 +5496,11 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
5484 tcp_ack(sk, skb, 0); 5496 tcp_ack(sk, skb, 0);
5485 __kfree_skb(skb); 5497 __kfree_skb(skb);
5486 tcp_data_snd_check(sk); 5498 tcp_data_snd_check(sk);
5499 /* When receiving pure ack in fast path, update
5500 * last ts ecr directly instead of calling
5501 * tcp_rcv_rtt_measure_ts()
5502 */
5503 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
5487 return; 5504 return;
5488 } else { /* Header too small */ 5505 } else { /* Header too small */
5489 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5506 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
@@ -5585,6 +5602,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5585 if (skb) { 5602 if (skb) {
5586 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5603 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5587 security_inet_conn_established(sk, skb); 5604 security_inet_conn_established(sk, skb);
5605 sk_mark_napi_id(sk, skb);
5588 } 5606 }
5589 5607
5590 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB); 5608 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB);
@@ -6413,6 +6431,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6413 tcp_rsk(req)->snt_isn = isn; 6431 tcp_rsk(req)->snt_isn = isn;
6414 tcp_rsk(req)->txhash = net_tx_rndhash(); 6432 tcp_rsk(req)->txhash = net_tx_rndhash();
6415 tcp_openreq_init_rwin(req, sk, dst); 6433 tcp_openreq_init_rwin(req, sk, dst);
6434 sk_rx_queue_set(req_to_sk(req), skb);
6416 if (!want_cookie) { 6435 if (!want_cookie) {
6417 tcp_reqsk_record_syn(sk, req, skb); 6436 tcp_reqsk_record_syn(sk, req, skb);
6418 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); 6437 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);