aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2014-03-27 00:54:32 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-27 13:08:36 -0400
commit78dfb789b69f161703ef322a0c2e3e61c7f7573a (patch)
tree56d0b040948715a0e941a6a6c0b2440d14d731ed /net
parent5902385a2440a55f005b266c93e0bb9398e5a62b (diff)
tipc: acquire necessary locks in named_cluster_distribute routine
The 'tipc_node_list' is guarded by tipc_net_lock and 'links' array defined in 'tipc_node' structure is protected by node lock as well. Without acquiring the two locks in named_cluster_distribute() a fatal oops may happen in case that a destroyed link might be got and then accessed. Therefore, above mentioned two locks must be held in named_cluster_distribute() to prevent the issue from happening accidentally. As 'links' array in node struct must be protected by node lock, we have to move the code of selecting an active link from tipc_link_xmit() to named_cluster_distribute() and then call __tipc_link_xmit() with the selected link to deliver name messages. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/tipc/name_distr.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 893c49a3d98a..c5904d196cd3 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -131,16 +131,24 @@ static void named_cluster_distribute(struct sk_buff *buf)
131{ 131{
132 struct sk_buff *buf_copy; 132 struct sk_buff *buf_copy;
133 struct tipc_node *n_ptr; 133 struct tipc_node *n_ptr;
134 struct tipc_link *l_ptr;
134 135
136 read_lock_bh(&tipc_net_lock);
135 list_for_each_entry(n_ptr, &tipc_node_list, list) { 137 list_for_each_entry(n_ptr, &tipc_node_list, list) {
136 if (tipc_node_active_links(n_ptr)) { 138 spin_lock_bh(&n_ptr->lock);
139 l_ptr = n_ptr->active_links[n_ptr->addr & 1];
140 if (l_ptr) {
137 buf_copy = skb_copy(buf, GFP_ATOMIC); 141 buf_copy = skb_copy(buf, GFP_ATOMIC);
138 if (!buf_copy) 142 if (!buf_copy) {
143 spin_unlock_bh(&n_ptr->lock);
139 break; 144 break;
145 }
140 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); 146 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
141 tipc_link_xmit(buf_copy, n_ptr->addr, n_ptr->addr); 147 __tipc_link_xmit(l_ptr, buf_copy);
142 } 148 }
149 spin_unlock_bh(&n_ptr->lock);
143 } 150 }
151 read_unlock_bh(&tipc_net_lock);
144 152
145 kfree_skb(buf); 153 kfree_skb(buf);
146} 154}