aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/originator.c
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2011-02-10 09:33:53 -0500
committerMarek Lindner <lindner_marek@yahoo.de>2011-03-05 06:50:03 -0500
commit44524fcdf6ca19b58c24f7622c4af1d8d8fe59f8 (patch)
tree297c76f80d68d56e3c65a23c70de645a1c93df47 /net/batman-adv/originator.c
parenta4c135c561106c397bae33455acfca4aa8065a30 (diff)
batman-adv: Correct rcu refcounting for neigh_node
It might be possible that 2 threads access the same data in the same rcu grace period. The first thread calls call_rcu() to decrement the refcount and free the data while the second thread increases the refcount to use the data. To avoid this race condition all refcount operations have to be atomic. Reported-by: Sven Eckelmann <sven@narfation.org> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/originator.c')
-rw-r--r--net/batman-adv/originator.c26
1 files changed, 8 insertions, 18 deletions
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index a85eadca6b2d..61299da82c6b 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -59,28 +59,18 @@ err:
59 return 0; 59 return 0;
60} 60}
61 61
62void neigh_node_free_ref(struct kref *refcount)
63{
64 struct neigh_node *neigh_node;
65
66 neigh_node = container_of(refcount, struct neigh_node, refcount);
67 kfree(neigh_node);
68}
69
70static void neigh_node_free_rcu(struct rcu_head *rcu) 62static void neigh_node_free_rcu(struct rcu_head *rcu)
71{ 63{
72 struct neigh_node *neigh_node; 64 struct neigh_node *neigh_node;
73 65
74 neigh_node = container_of(rcu, struct neigh_node, rcu); 66 neigh_node = container_of(rcu, struct neigh_node, rcu);
75 kref_put(&neigh_node->refcount, neigh_node_free_ref); 67 kfree(neigh_node);
76} 68}
77 69
78void neigh_node_free_rcu_bond(struct rcu_head *rcu) 70void neigh_node_free_ref(struct neigh_node *neigh_node)
79{ 71{
80 struct neigh_node *neigh_node; 72 if (atomic_dec_and_test(&neigh_node->refcount))
81 73 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
82 neigh_node = container_of(rcu, struct neigh_node, rcu_bond);
83 kref_put(&neigh_node->refcount, neigh_node_free_ref);
84} 74}
85 75
86struct neigh_node *create_neighbor(struct orig_node *orig_node, 76struct neigh_node *create_neighbor(struct orig_node *orig_node,
@@ -104,7 +94,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
104 memcpy(neigh_node->addr, neigh, ETH_ALEN); 94 memcpy(neigh_node->addr, neigh, ETH_ALEN);
105 neigh_node->orig_node = orig_neigh_node; 95 neigh_node->orig_node = orig_neigh_node;
106 neigh_node->if_incoming = if_incoming; 96 neigh_node->if_incoming = if_incoming;
107 kref_init(&neigh_node->refcount); 97 atomic_set(&neigh_node->refcount, 1);
108 98
109 spin_lock_bh(&orig_node->neigh_list_lock); 99 spin_lock_bh(&orig_node->neigh_list_lock);
110 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 100 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
@@ -126,14 +116,14 @@ void orig_node_free_ref(struct kref *refcount)
126 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 116 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
127 &orig_node->bond_list, bonding_list) { 117 &orig_node->bond_list, bonding_list) {
128 list_del_rcu(&neigh_node->bonding_list); 118 list_del_rcu(&neigh_node->bonding_list);
129 call_rcu(&neigh_node->rcu_bond, neigh_node_free_rcu_bond); 119 neigh_node_free_ref(neigh_node);
130 } 120 }
131 121
132 /* for all neighbors towards this originator ... */ 122 /* for all neighbors towards this originator ... */
133 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 123 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
134 &orig_node->neigh_list, list) { 124 &orig_node->neigh_list, list) {
135 hlist_del_rcu(&neigh_node->list); 125 hlist_del_rcu(&neigh_node->list);
136 call_rcu(&neigh_node->rcu, neigh_node_free_rcu); 126 neigh_node_free_ref(neigh_node);
137 } 127 }
138 128
139 spin_unlock_bh(&orig_node->neigh_list_lock); 129 spin_unlock_bh(&orig_node->neigh_list_lock);
@@ -315,7 +305,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
315 305
316 hlist_del_rcu(&neigh_node->list); 306 hlist_del_rcu(&neigh_node->list);
317 bonding_candidate_del(orig_node, neigh_node); 307 bonding_candidate_del(orig_node, neigh_node);
318 call_rcu(&neigh_node->rcu, neigh_node_free_rcu); 308 neigh_node_free_ref(neigh_node);
319 } else { 309 } else {
320 if ((!*best_neigh_node) || 310 if ((!*best_neigh_node) ||
321 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 311 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))