aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/unicast.c
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2011-02-10 09:33:53 -0500
committerMarek Lindner <lindner_marek@yahoo.de>2011-03-05 06:50:03 -0500
commit44524fcdf6ca19b58c24f7622c4af1d8d8fe59f8 (patch)
tree297c76f80d68d56e3c65a23c70de645a1c93df47 /net/batman-adv/unicast.c
parenta4c135c561106c397bae33455acfca4aa8065a30 (diff)
batman-adv: Correct rcu refcounting for neigh_node
It might be possible that 2 threads access the same data in the same rcu grace period. The first thread calls call_rcu() to decrement the refcount and free the data while the second thread increases the refcount to use the data. To avoid this race condition all refcount operations have to be atomic. Reported-by: Sven Eckelmann <sven@narfation.org> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/unicast.c')
-rw-r--r--net/batman-adv/unicast.c57
1 files changed, 34 insertions, 23 deletions
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 00bfeaf9ece3..7ca994ccac1d 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -285,38 +285,42 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
285 struct unicast_packet *unicast_packet; 285 struct unicast_packet *unicast_packet;
286 struct orig_node *orig_node = NULL; 286 struct orig_node *orig_node = NULL;
287 struct batman_if *batman_if; 287 struct batman_if *batman_if;
288 struct neigh_node *router; 288 struct neigh_node *neigh_node;
289 int data_len = skb->len; 289 int data_len = skb->len;
290 uint8_t dstaddr[6]; 290 uint8_t dstaddr[6];
291 int ret = 1;
291 292
292 spin_lock_bh(&bat_priv->orig_hash_lock); 293 spin_lock_bh(&bat_priv->orig_hash_lock);
293 294
294 /* get routing information */ 295 /* get routing information */
295 if (is_multicast_ether_addr(ethhdr->h_dest)) 296 if (is_multicast_ether_addr(ethhdr->h_dest))
296 orig_node = (struct orig_node *)gw_get_selected(bat_priv); 297 orig_node = (struct orig_node *)gw_get_selected(bat_priv);
298 if (orig_node) {
299 kref_get(&orig_node->refcount);
300 goto find_router;
301 }
297 302
298 /* check for hna host */ 303 /* check for hna host - increases orig_node refcount */
299 if (!orig_node) 304 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
300 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
301 305
306find_router:
302 /* find_router() increases neigh_nodes refcount if found. */ 307 /* find_router() increases neigh_nodes refcount if found. */
303 router = find_router(bat_priv, orig_node, NULL); 308 neigh_node = find_router(bat_priv, orig_node, NULL);
304 309
305 if (!router) 310 if (!neigh_node)
306 goto unlock; 311 goto unlock;
307 312
308 /* don't lock while sending the packets ... we therefore 313 if (neigh_node->if_incoming->if_status != IF_ACTIVE)
309 * copy the required data before sending */ 314 goto unlock;
310 batman_if = router->if_incoming;
311 memcpy(dstaddr, router->addr, ETH_ALEN);
312
313 spin_unlock_bh(&bat_priv->orig_hash_lock);
314
315 if (batman_if->if_status != IF_ACTIVE)
316 goto dropped;
317 315
318 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0) 316 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
319 goto dropped; 317 goto unlock;
318
319 /* don't lock while sending the packets ... we therefore
320 * copy the required data before sending */
321 batman_if = neigh_node->if_incoming;
322 memcpy(dstaddr, neigh_node->addr, ETH_ALEN);
323 spin_unlock_bh(&bat_priv->orig_hash_lock);
320 324
321 unicast_packet = (struct unicast_packet *)skb->data; 325 unicast_packet = (struct unicast_packet *)skb->data;
322 326
@@ -330,18 +334,25 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
330 334
331 if (atomic_read(&bat_priv->fragmentation) && 335 if (atomic_read(&bat_priv->fragmentation) &&
332 data_len + sizeof(struct unicast_packet) > 336 data_len + sizeof(struct unicast_packet) >
333 batman_if->net_dev->mtu) { 337 batman_if->net_dev->mtu) {
334 /* send frag skb decreases ttl */ 338 /* send frag skb decreases ttl */
335 unicast_packet->ttl++; 339 unicast_packet->ttl++;
336 return frag_send_skb(skb, bat_priv, batman_if, 340 ret = frag_send_skb(skb, bat_priv, batman_if, dstaddr);
337 dstaddr); 341 goto out;
338 } 342 }
343
339 send_skb_packet(skb, batman_if, dstaddr); 344 send_skb_packet(skb, batman_if, dstaddr);
340 return 0; 345 ret = 0;
346 goto out;
341 347
342unlock: 348unlock:
343 spin_unlock_bh(&bat_priv->orig_hash_lock); 349 spin_unlock_bh(&bat_priv->orig_hash_lock);
344dropped: 350out:
345 kfree_skb(skb); 351 if (neigh_node)
346 return 1; 352 neigh_node_free_ref(neigh_node);
353 if (orig_node)
354 kref_put(&orig_node->refcount, orig_node_free_ref);
355 if (ret == 1)
356 kfree_skb(skb);
357 return ret;
347} 358}