aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
authorGhantaKrishnamurthy MohanKrishna <mohan.krishna.ghanta.krishnamurthy@ericsson.com>2018-03-21 09:37:45 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-22 14:43:37 -0400
commit872619d8cf810c17279335ef531a2a34f3b4e589 (patch)
tree832466ed602f57586e25a2707d201e47d7bc2a9d /net/tipc/socket.c
parentc30b70deb5f4861f590031c33fd3ec6cc63f1df1 (diff)
tipc: step sk->sk_drops when rcv buffer is full
Currently when tipc is unable to queue a received message on a socket, the message is rejected back to the sender with error TIPC_ERR_OVERLOAD. However, the application on this socket has no knowledge about these discards. In this commit, we try to step the sk_drops counter when tipc is unable to queue a received message. Export sk_drops using tipc socket diagnostics. Acked-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: GhantaKrishnamurthy MohanKrishna <mohan.krishna.ghanta.krishnamurthy@ericsson.com> Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 07559ce4b8ba..732ec894f69f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2122,8 +2122,10 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2122 (!sk_conn && msg_connected(hdr)) || 2122 (!sk_conn && msg_connected(hdr)) ||
2123 (!grp && msg_in_group(hdr))) 2123 (!grp && msg_in_group(hdr)))
2124 err = TIPC_ERR_NO_PORT; 2124 err = TIPC_ERR_NO_PORT;
2125 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) 2125 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2126 atomic_inc(&sk->sk_drops);
2126 err = TIPC_ERR_OVERLOAD; 2127 err = TIPC_ERR_OVERLOAD;
2128 }
2127 2129
2128 if (unlikely(err)) { 2130 if (unlikely(err)) {
2129 tipc_skb_reject(net, err, skb, xmitq); 2131 tipc_skb_reject(net, err, skb, xmitq);
@@ -2202,6 +2204,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2202 2204
2203 /* Overload => reject message back to sender */ 2205 /* Overload => reject message back to sender */
2204 onode = tipc_own_addr(sock_net(sk)); 2206 onode = tipc_own_addr(sock_net(sk));
2207 atomic_inc(&sk->sk_drops);
2205 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2208 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2206 __skb_queue_tail(xmitq, skb); 2209 __skb_queue_tail(xmitq, skb);
2207 break; 2210 break;
@@ -3293,7 +3296,9 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk,
3293 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, 3296 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3294 skb_queue_len(&sk->sk_receive_queue)) || 3297 skb_queue_len(&sk->sk_receive_queue)) ||
3295 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, 3298 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3296 skb_queue_len(&sk->sk_write_queue))) 3299 skb_queue_len(&sk->sk_write_queue)) ||
3300 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3301 atomic_read(&sk->sk_drops)))
3297 goto stat_msg_cancel; 3302 goto stat_msg_cancel;
3298 3303
3299 if (tsk->cong_link_cnt && 3304 if (tsk->cong_link_cnt &&