aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/bcast.c
diff options
context:
space:
mode:
authorJon Maloy <jon.maloy@ericsson.com>2019-08-15 10:42:50 -0400
committerDavid S. Miller <davem@davemloft.net>2019-08-18 17:01:07 -0400
commite654f9f53b45fde3fcc8051830b212c7a8f36148 (patch)
tree1b3b3af06442bae94101ca20f1c0b6ca06d54e44 /net/tipc/bcast.c
parent10086b345385bc1ca67b260aff236e742e2e630e (diff)
tipc: clean up skb list lock handling on send path
The policy for handling the skb list locks on the send and receive paths is simple. - On the send path we never need to grab the lock on the 'xmitq' list when the destination is an exernal node. - On the receive path we always need to grab the lock on the 'inputq' list, irrespective of source node. However, when transmitting node local messages those will eventually end up on the receive path of a local socket, meaning that the argument 'xmitq' in tipc_node_xmit() will become the 'ínputq' argument in the function tipc_sk_rcv(). This has been handled by always initializing the spinlock of the 'xmitq' list at message creation, just in case it may end up on the receive path later, and despite knowing that the lock in most cases never will be used. This approach is inaccurate and confusing, and has also concealed the fact that the stated 'no lock grabbing' policy for the send path is violated in some cases. We now clean up this by never initializing the lock at message creation, instead doing this at the moment we find that the message actually will enter the receive path. At the same time we fix the four locations where we incorrectly access the spinlock on the send/error path. This patch also reverts commit d12cffe9329f ("tipc: ensure head->lock is initialised") which has now become redundant. CC: Eric Dumazet <edumazet@google.com> Reported-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Reviewed-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r--net/tipc/bcast.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 34f3e5641438..6ef1abdd525f 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -185,7 +185,7 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
185 } 185 }
186 186
187 /* We have to transmit across all bearers */ 187 /* We have to transmit across all bearers */
188 skb_queue_head_init(&_xmitq); 188 __skb_queue_head_init(&_xmitq);
189 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 189 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
190 if (!bb->dests[bearer_id]) 190 if (!bb->dests[bearer_id])
191 continue; 191 continue;
@@ -256,7 +256,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
256 struct sk_buff_head xmitq; 256 struct sk_buff_head xmitq;
257 int rc = 0; 257 int rc = 0;
258 258
259 skb_queue_head_init(&xmitq); 259 __skb_queue_head_init(&xmitq);
260 tipc_bcast_lock(net); 260 tipc_bcast_lock(net);
261 if (tipc_link_bc_peers(l)) 261 if (tipc_link_bc_peers(l))
262 rc = tipc_link_xmit(l, pkts, &xmitq); 262 rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -286,7 +286,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
286 u32 dnode, selector; 286 u32 dnode, selector;
287 287
288 selector = msg_link_selector(buf_msg(skb_peek(pkts))); 288 selector = msg_link_selector(buf_msg(skb_peek(pkts)));
289 skb_queue_head_init(&_pkts); 289 __skb_queue_head_init(&_pkts);
290 290
291 list_for_each_entry_safe(dst, tmp, &dests->list, list) { 291 list_for_each_entry_safe(dst, tmp, &dests->list, list) {
292 dnode = dst->node; 292 dnode = dst->node;
@@ -344,7 +344,7 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
344 msg_set_size(_hdr, MCAST_H_SIZE); 344 msg_set_size(_hdr, MCAST_H_SIZE);
345 msg_set_is_rcast(_hdr, !msg_is_rcast(hdr)); 345 msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
346 346
347 skb_queue_head_init(&tmpq); 347 __skb_queue_head_init(&tmpq);
348 __skb_queue_tail(&tmpq, _skb); 348 __skb_queue_tail(&tmpq, _skb);
349 if (method->rcast) 349 if (method->rcast)
350 tipc_bcast_xmit(net, &tmpq, cong_link_cnt); 350 tipc_bcast_xmit(net, &tmpq, cong_link_cnt);
@@ -378,7 +378,7 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
378 int rc = 0; 378 int rc = 0;
379 379
380 skb_queue_head_init(&inputq); 380 skb_queue_head_init(&inputq);
381 skb_queue_head_init(&localq); 381 __skb_queue_head_init(&localq);
382 382
383 /* Clone packets before they are consumed by next call */ 383 /* Clone packets before they are consumed by next call */
384 if (dests->local && !tipc_msg_reassemble(pkts, &localq)) { 384 if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {