aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorYafang Shao <laoar.shao@gmail.com>2018-06-28 00:22:56 -0400
committerDavid S. Miller <davem@davemloft.net>2018-06-30 05:43:53 -0400
commitea5d0c32498e1a08ff5f3dbeafa4d74895851b0d (patch)
tree75f4e63b128510f799324bc81ce59fedb930ffb1 /net/ipv4/tcp_input.c
parent83607344d667315687e1a5ddd2ad2fbbff22cc43 (diff)
tcp: add new SNMP counter for drops when try to queue in rcv queue
When sk_rmem_alloc is larger than the receive buffer and we can't schedule more memory for it, the skb will be dropped. In above situation, if this skb is put into the ofo queue, LINUX_MIB_TCPOFODROP is incremented to track it. While if this skb is put into the receive queue, there's no record. So a new SNMP counter is introduced to track this behavior. LINUX_MIB_TCPRCVQDROP: Number of packets meant to be queued in rcv queue but dropped because socket rcvbuf limit hit. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9c5b3415413f..eecd359595fc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4611,8 +4611,10 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4611 skb->data_len = data_len; 4611 skb->data_len = data_len;
4612 skb->len = size; 4612 skb->len = size;
4613 4613
4614 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4614 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
4615 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
4615 goto err_free; 4616 goto err_free;
4617 }
4616 4618
4617 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); 4619 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
4618 if (err) 4620 if (err)
@@ -4677,8 +4679,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4677queue_and_out: 4679queue_and_out:
4678 if (skb_queue_len(&sk->sk_receive_queue) == 0) 4680 if (skb_queue_len(&sk->sk_receive_queue) == 0)
4679 sk_forced_mem_schedule(sk, skb->truesize); 4681 sk_forced_mem_schedule(sk, skb->truesize);
4680 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4682 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
4683 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
4681 goto drop; 4684 goto drop;
4685 }
4682 4686
4683 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4687 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
4684 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4688 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);