summaryrefslogtreecommitdiffstats
path: root/net/tipc/socket.c
diff options
context:
space:
mode:
authorJon Maloy <jon.maloy@ericsson.com>2019-08-15 10:42:50 -0400
committerDavid S. Miller <davem@davemloft.net>2019-08-18 17:01:07 -0400
commite654f9f53b45fde3fcc8051830b212c7a8f36148 (patch)
tree1b3b3af06442bae94101ca20f1c0b6ca06d54e44 /net/tipc/socket.c
parent10086b345385bc1ca67b260aff236e742e2e630e (diff)
tipc: clean up skb list lock handling on send path
The policy for handling the skb list locks on the send and receive paths is simple. - On the send path we never need to grab the lock on the 'xmitq' list when the destination is an exernal node. - On the receive path we always need to grab the lock on the 'inputq' list, irrespective of source node. However, when transmitting node local messages those will eventually end up on the receive path of a local socket, meaning that the argument 'xmitq' in tipc_node_xmit() will become the 'ínputq' argument in the function tipc_sk_rcv(). This has been handled by always initializing the spinlock of the 'xmitq' list at message creation, just in case it may end up on the receive path later, and despite knowing that the lock in most cases never will be used. This approach is inaccurate and confusing, and has also concealed the fact that the stated 'no lock grabbing' policy for the send path is violated in some cases. We now clean up this by never initializing the lock at message creation, instead doing this at the moment we find that the message actually will enter the receive path. At the same time we fix the four locations where we incorrectly access the spinlock on the send/error path. This patch also reverts commit d12cffe9329f ("tipc: ensure head->lock is initialised") which has now become redundant. CC: Eric Dumazet <edumazet@google.com> Reported-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Reviewed-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r--net/tipc/socket.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 83ae41d7e554..3b9f8cc328f5 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -809,7 +809,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
809 msg_set_nameupper(hdr, seq->upper); 809 msg_set_nameupper(hdr, seq->upper);
810 810
811 /* Build message as chain of buffers */ 811 /* Build message as chain of buffers */
812 skb_queue_head_init(&pkts); 812 __skb_queue_head_init(&pkts);
813 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 813 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
814 814
815 /* Send message if build was successful */ 815 /* Send message if build was successful */
@@ -853,7 +853,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
853 msg_set_grp_bc_seqno(hdr, bc_snd_nxt); 853 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
854 854
855 /* Build message as chain of buffers */ 855 /* Build message as chain of buffers */
856 skb_queue_head_init(&pkts); 856 __skb_queue_head_init(&pkts);
857 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 857 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
858 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 858 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
859 if (unlikely(rc != dlen)) 859 if (unlikely(rc != dlen))
@@ -1058,7 +1058,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1058 msg_set_grp_bc_ack_req(hdr, ack); 1058 msg_set_grp_bc_ack_req(hdr, ack);
1059 1059
1060 /* Build message as chain of buffers */ 1060 /* Build message as chain of buffers */
1061 skb_queue_head_init(&pkts); 1061 __skb_queue_head_init(&pkts);
1062 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1062 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1063 if (unlikely(rc != dlen)) 1063 if (unlikely(rc != dlen))
1064 return rc; 1064 return rc;
@@ -1387,7 +1387,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1387 if (unlikely(rc)) 1387 if (unlikely(rc))
1388 return rc; 1388 return rc;
1389 1389
1390 skb_queue_head_init(&pkts); 1390 __skb_queue_head_init(&pkts);
1391 mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 1391 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1392 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); 1392 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1393 if (unlikely(rc != dlen)) 1393 if (unlikely(rc != dlen))
@@ -1445,7 +1445,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1445 int send, sent = 0; 1445 int send, sent = 0;
1446 int rc = 0; 1446 int rc = 0;
1447 1447
1448 skb_queue_head_init(&pkts); 1448 __skb_queue_head_init(&pkts);
1449 1449
1450 if (unlikely(dlen > INT_MAX)) 1450 if (unlikely(dlen > INT_MAX))
1451 return -EMSGSIZE; 1451 return -EMSGSIZE;
@@ -1805,7 +1805,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1805 1805
1806 /* Send group flow control advertisement when applicable */ 1806 /* Send group flow control advertisement when applicable */
1807 if (tsk->group && msg_in_group(hdr) && !grp_evt) { 1807 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1808 skb_queue_head_init(&xmitq); 1808 __skb_queue_head_init(&xmitq);
1809 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen), 1809 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1810 msg_orignode(hdr), msg_origport(hdr), 1810 msg_orignode(hdr), msg_origport(hdr),
1811 &xmitq); 1811 &xmitq);
@@ -2674,7 +2674,7 @@ static void tipc_sk_timeout(struct timer_list *t)
2674 struct sk_buff_head list; 2674 struct sk_buff_head list;
2675 int rc = 0; 2675 int rc = 0;
2676 2676
2677 skb_queue_head_init(&list); 2677 __skb_queue_head_init(&list);
2678 bh_lock_sock(sk); 2678 bh_lock_sock(sk);
2679 2679
2680 /* Try again later if socket is busy */ 2680 /* Try again later if socket is busy */