aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorRichard Alpe <richard.alpe@ericsson.com>2016-02-11 04:43:15 -0500
committerDavid S. Miller <davem@davemloft.net>2016-02-16 15:58:40 -0500
commit4952cd3e7b47dfe8f7d6c69973b13eb487eb2bd0 (patch)
tree17bef7045a199c4c0a6e6fd7e1da9d90a6e47ff5 /net/tipc
parent37ace20a3c99c54ebffb4b13671a01adb20926ca (diff)
tipc: refactor node xmit and fix memory leaks
Refactor tipc_node_xmit() to fail fast and fail early. Fix several potential memory leaks in unexpected error paths. Reported-by: Dmitry Vyukov <dvyukov@google.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: Richard Alpe <richard.alpe@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/link.c8
-rw-r--r--net/tipc/node.c54
2 files changed, 38 insertions, 24 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6f4a6d9b0149..3e513daecf80 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -903,8 +903,10 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
903 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) 903 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
904 return link_schedule_user(l, list); 904 return link_schedule_user(l, list);
905 } 905 }
906 if (unlikely(msg_size(hdr) > mtu)) 906 if (unlikely(msg_size(hdr) > mtu)) {
907 skb_queue_purge(list);
907 return -EMSGSIZE; 908 return -EMSGSIZE;
909 }
908 910
909 /* Prepare each packet for sending, and add to relevant queue: */ 911 /* Prepare each packet for sending, and add to relevant queue: */
910 while (skb_queue_len(list)) { 912 while (skb_queue_len(list)) {
@@ -916,8 +918,10 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
916 918
917 if (likely(skb_queue_len(transmq) < maxwin)) { 919 if (likely(skb_queue_len(transmq) < maxwin)) {
918 _skb = skb_clone(skb, GFP_ATOMIC); 920 _skb = skb_clone(skb, GFP_ATOMIC);
919 if (!_skb) 921 if (!_skb) {
922 skb_queue_purge(list);
920 return -ENOBUFS; 923 return -ENOBUFS;
924 }
921 __skb_dequeue(list); 925 __skb_dequeue(list);
922 __skb_queue_tail(transmq, skb); 926 __skb_queue_tail(transmq, skb);
923 __skb_queue_tail(xmitq, _skb); 927 __skb_queue_tail(xmitq, _skb);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index f8a8255a7182..10a1e8717c6f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1166,7 +1166,7 @@ msg_full:
1166 * @dnode: address of destination node 1166 * @dnode: address of destination node
1167 * @selector: a number used for deterministic link selection 1167 * @selector: a number used for deterministic link selection
1168 * Consumes the buffer chain, except when returning -ELINKCONG 1168 * Consumes the buffer chain, except when returning -ELINKCONG
1169 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 1169 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1170 */ 1170 */
1171int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1171int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1172 u32 dnode, int selector) 1172 u32 dnode, int selector)
@@ -1174,33 +1174,43 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1174 struct tipc_link_entry *le = NULL; 1174 struct tipc_link_entry *le = NULL;
1175 struct tipc_node *n; 1175 struct tipc_node *n;
1176 struct sk_buff_head xmitq; 1176 struct sk_buff_head xmitq;
1177 int bearer_id = -1; 1177 int bearer_id;
1178 int rc = -EHOSTUNREACH; 1178 int rc;
1179
1180 if (in_own_node(net, dnode)) {
1181 tipc_sk_rcv(net, list);
1182 return 0;
1183 }
1179 1184
1180 __skb_queue_head_init(&xmitq);
1181 n = tipc_node_find(net, dnode); 1185 n = tipc_node_find(net, dnode);
1182 if (likely(n)) { 1186 if (unlikely(!n)) {
1183 tipc_node_read_lock(n); 1187 skb_queue_purge(list);
1184 bearer_id = n->active_links[selector & 1]; 1188 return -EHOSTUNREACH;
1185 if (bearer_id >= 0) { 1189 }
1186 le = &n->links[bearer_id]; 1190
1187 spin_lock_bh(&le->lock); 1191 tipc_node_read_lock(n);
1188 rc = tipc_link_xmit(le->link, list, &xmitq); 1192 bearer_id = n->active_links[selector & 1];
1189 spin_unlock_bh(&le->lock); 1193 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1190 }
1191 tipc_node_read_unlock(n); 1194 tipc_node_read_unlock(n);
1192 if (likely(!rc))
1193 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1194 else if (rc == -ENOBUFS)
1195 tipc_node_link_down(n, bearer_id, false);
1196 tipc_node_put(n); 1195 tipc_node_put(n);
1197 return rc; 1196 skb_queue_purge(list);
1197 return -EHOSTUNREACH;
1198 } 1198 }
1199 1199
1200 if (likely(in_own_node(net, dnode))) { 1200 __skb_queue_head_init(&xmitq);
1201 tipc_sk_rcv(net, list); 1201 le = &n->links[bearer_id];
1202 return 0; 1202 spin_lock_bh(&le->lock);
1203 } 1203 rc = tipc_link_xmit(le->link, list, &xmitq);
1204 spin_unlock_bh(&le->lock);
1205 tipc_node_read_unlock(n);
1206
1207 if (likely(rc == 0))
1208 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1209 else if (rc == -ENOBUFS)
1210 tipc_node_link_down(n, bearer_id, false);
1211
1212 tipc_node_put(n);
1213
1204 return rc; 1214 return rc;
1205} 1215}
1206 1216