diff options
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r-- | net/tipc/node.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c index 27753325e06e..e9295fa3a554 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -1172,7 +1172,7 @@ msg_full: | |||
1172 | * @list: chain of buffers containing message | 1172 | * @list: chain of buffers containing message |
1173 | * @dnode: address of destination node | 1173 | * @dnode: address of destination node |
1174 | * @selector: a number used for deterministic link selection | 1174 | * @selector: a number used for deterministic link selection |
1175 | * Consumes the buffer chain, except when returning -ELINKCONG | 1175 | * Consumes the buffer chain. |
1176 | * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF | 1176 | * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF |
1177 | */ | 1177 | */ |
1178 | int tipc_node_xmit(struct net *net, struct sk_buff_head *list, | 1178 | int tipc_node_xmit(struct net *net, struct sk_buff_head *list, |
@@ -1211,10 +1211,10 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, | |||
1211 | spin_unlock_bh(&le->lock); | 1211 | spin_unlock_bh(&le->lock); |
1212 | tipc_node_read_unlock(n); | 1212 | tipc_node_read_unlock(n); |
1213 | 1213 | ||
1214 | if (likely(rc == 0)) | 1214 | if (unlikely(rc == -ENOBUFS)) |
1215 | tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); | ||
1216 | else if (rc == -ENOBUFS) | ||
1217 | tipc_node_link_down(n, bearer_id, false); | 1215 | tipc_node_link_down(n, bearer_id, false); |
1216 | else | ||
1217 | tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); | ||
1218 | 1218 | ||
1219 | tipc_node_put(n); | 1219 | tipc_node_put(n); |
1220 | 1220 | ||
@@ -1226,20 +1226,15 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, | |||
1226 | * messages, which will not be rejected | 1226 | * messages, which will not be rejected |
1227 | * The only exception is datagram messages rerouted after secondary | 1227 | * The only exception is datagram messages rerouted after secondary |
1228 | * lookup, which are rare and safe to dispose of anyway. | 1228 | * lookup, which are rare and safe to dispose of anyway. |
1229 | * TODO: Return real return value, and let callers use | ||
1230 | * tipc_wait_for_sendpkt() where applicable | ||
1231 | */ | 1229 | */ |
1232 | int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, | 1230 | int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, |
1233 | u32 selector) | 1231 | u32 selector) |
1234 | { | 1232 | { |
1235 | struct sk_buff_head head; | 1233 | struct sk_buff_head head; |
1236 | int rc; | ||
1237 | 1234 | ||
1238 | skb_queue_head_init(&head); | 1235 | skb_queue_head_init(&head); |
1239 | __skb_queue_tail(&head, skb); | 1236 | __skb_queue_tail(&head, skb); |
1240 | rc = tipc_node_xmit(net, &head, dnode, selector); | 1237 | tipc_node_xmit(net, &head, dnode, selector); |
1241 | if (rc == -ELINKCONG) | ||
1242 | kfree_skb(skb); | ||
1243 | return 0; | 1238 | return 0; |
1244 | } | 1239 | } |
1245 | 1240 | ||
@@ -1267,6 +1262,19 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) | |||
1267 | kfree_skb(skb); | 1262 | kfree_skb(skb); |
1268 | } | 1263 | } |
1269 | 1264 | ||
1265 | static void tipc_node_mcast_rcv(struct tipc_node *n) | ||
1266 | { | ||
1267 | struct tipc_bclink_entry *be = &n->bc_entry; | ||
1268 | |||
1269 | /* 'arrvq' is under inputq2's lock protection */ | ||
1270 | spin_lock_bh(&be->inputq2.lock); | ||
1271 | spin_lock_bh(&be->inputq1.lock); | ||
1272 | skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); | ||
1273 | spin_unlock_bh(&be->inputq1.lock); | ||
1274 | spin_unlock_bh(&be->inputq2.lock); | ||
1275 | tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); | ||
1276 | } | ||
1277 | |||
1270 | static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, | 1278 | static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, |
1271 | int bearer_id, struct sk_buff_head *xmitq) | 1279 | int bearer_id, struct sk_buff_head *xmitq) |
1272 | { | 1280 | { |
@@ -1340,15 +1348,8 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id | |||
1340 | if (!skb_queue_empty(&xmitq)) | 1348 | if (!skb_queue_empty(&xmitq)) |
1341 | tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); | 1349 | tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); |
1342 | 1350 | ||
1343 | /* Deliver. 'arrvq' is under inputq2's lock protection */ | 1351 | if (!skb_queue_empty(&be->inputq1)) |
1344 | if (!skb_queue_empty(&be->inputq1)) { | 1352 | tipc_node_mcast_rcv(n); |
1345 | spin_lock_bh(&be->inputq2.lock); | ||
1346 | spin_lock_bh(&be->inputq1.lock); | ||
1347 | skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); | ||
1348 | spin_unlock_bh(&be->inputq1.lock); | ||
1349 | spin_unlock_bh(&be->inputq2.lock); | ||
1350 | tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); | ||
1351 | } | ||
1352 | 1353 | ||
1353 | if (rc & TIPC_LINK_DOWN_EVT) { | 1354 | if (rc & TIPC_LINK_DOWN_EVT) { |
1354 | /* Reception reassembly failure => reset all links to peer */ | 1355 | /* Reception reassembly failure => reset all links to peer */ |
@@ -1575,6 +1576,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) | |||
1575 | if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) | 1576 | if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) |
1576 | tipc_named_rcv(net, &n->bc_entry.namedq); | 1577 | tipc_named_rcv(net, &n->bc_entry.namedq); |
1577 | 1578 | ||
1579 | if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) | ||
1580 | tipc_node_mcast_rcv(n); | ||
1581 | |||
1578 | if (!skb_queue_empty(&le->inputq)) | 1582 | if (!skb_queue_empty(&le->inputq)) |
1579 | tipc_sk_rcv(net, &le->inputq); | 1583 | tipc_sk_rcv(net, &le->inputq); |
1580 | 1584 | ||