summaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
authorJon Maloy <jon.maloy@ericsson.com>2019-08-15 10:42:50 -0400
committerDavid S. Miller <davem@davemloft.net>2019-08-18 17:01:07 -0400
commite654f9f53b45fde3fcc8051830b212c7a8f36148 (patch)
tree1b3b3af06442bae94101ca20f1c0b6ca06d54e44 /net/tipc/node.c
parent10086b345385bc1ca67b260aff236e742e2e630e (diff)
tipc: clean up skb list lock handling on send path
The policy for handling the skb list locks on the send and receive paths is simple. - On the send path we never need to grab the lock on the 'xmitq' list when the destination is an exernal node. - On the receive path we always need to grab the lock on the 'inputq' list, irrespective of source node. However, when transmitting node local messages those will eventually end up on the receive path of a local socket, meaning that the argument 'xmitq' in tipc_node_xmit() will become the 'ínputq' argument in the function tipc_sk_rcv(). This has been handled by always initializing the spinlock of the 'xmitq' list at message creation, just in case it may end up on the receive path later, and despite knowing that the lock in most cases never will be used. This approach is inaccurate and confusing, and has also concealed the fact that the stated 'no lock grabbing' policy for the send path is violated in some cases. We now clean up this by never initializing the lock at message creation, instead doing this at the moment we find that the message actually will enter the receive path. At the same time we fix the four locations where we incorrectly access the spinlock on the send/error path. This patch also reverts commit d12cffe9329f ("tipc: ensure head->lock is initialised") which has now become redundant. CC: Eric Dumazet <edumazet@google.com> Reported-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Reviewed-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 1bdcf0fc1a4d..c8f6177dd5a2 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1444,13 +1444,14 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1444 1444
1445 if (in_own_node(net, dnode)) { 1445 if (in_own_node(net, dnode)) {
1446 tipc_loopback_trace(net, list); 1446 tipc_loopback_trace(net, list);
1447 spin_lock_init(&list->lock);
1447 tipc_sk_rcv(net, list); 1448 tipc_sk_rcv(net, list);
1448 return 0; 1449 return 0;
1449 } 1450 }
1450 1451
1451 n = tipc_node_find(net, dnode); 1452 n = tipc_node_find(net, dnode);
1452 if (unlikely(!n)) { 1453 if (unlikely(!n)) {
1453 skb_queue_purge(list); 1454 __skb_queue_purge(list);
1454 return -EHOSTUNREACH; 1455 return -EHOSTUNREACH;
1455 } 1456 }
1456 1457
@@ -1459,7 +1460,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1459 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1460 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1460 tipc_node_read_unlock(n); 1461 tipc_node_read_unlock(n);
1461 tipc_node_put(n); 1462 tipc_node_put(n);
1462 skb_queue_purge(list); 1463 __skb_queue_purge(list);
1463 return -EHOSTUNREACH; 1464 return -EHOSTUNREACH;
1464 } 1465 }
1465 1466
@@ -1491,7 +1492,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1491{ 1492{
1492 struct sk_buff_head head; 1493 struct sk_buff_head head;
1493 1494
1494 skb_queue_head_init(&head); 1495 __skb_queue_head_init(&head);
1495 __skb_queue_tail(&head, skb); 1496 __skb_queue_tail(&head, skb);
1496 tipc_node_xmit(net, &head, dnode, selector); 1497 tipc_node_xmit(net, &head, dnode, selector);
1497 return 0; 1498 return 0;