aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2011-02-10 09:33:50 -0500
committerMarek Lindner <lindner_marek@yahoo.de>2011-03-05 06:50:05 -0500
commit7d2b554826195372764910da2f0dcb0d9b869108 (patch)
tree364a7b2cdca991b3c6c120e0a1e3bf2c261fb1a1 /net/batman-adv
parent25b6d3c17eaa92ae9700eb8235bc79782613354a (diff)
batman-adv: Correct rcu refcounting for softif_neigh
It might be possible that 2 threads access the same data in the same rcu grace period. The first thread calls call_rcu() to decrement the refcount and free the data while the second thread increases the refcount to use the data. To avoid this race condition all refcount operations have to be atomic. Reported-by: Sven Eckelmann <sven@narfation.org> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv')
-rw-r--r--net/batman-adv/soft-interface.c31
-rw-r--r--net/batman-adv/types.h2
2 files changed, 16 insertions, 17 deletions
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 7e37077ed816..152beaafae1d 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -76,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
76 return 0; 76 return 0;
77} 77}
78 78
79static void softif_neigh_free_ref(struct kref *refcount) 79static void softif_neigh_free_rcu(struct rcu_head *rcu)
80{ 80{
81 struct softif_neigh *softif_neigh; 81 struct softif_neigh *softif_neigh;
82 82
83 softif_neigh = container_of(refcount, struct softif_neigh, refcount); 83 softif_neigh = container_of(rcu, struct softif_neigh, rcu);
84 kfree(softif_neigh); 84 kfree(softif_neigh);
85} 85}
86 86
87static void softif_neigh_free_rcu(struct rcu_head *rcu) 87static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
88{ 88{
89 struct softif_neigh *softif_neigh; 89 if (atomic_dec_and_test(&softif_neigh->refcount))
90 90 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
91 softif_neigh = container_of(rcu, struct softif_neigh, rcu);
92 kref_put(&softif_neigh->refcount, softif_neigh_free_ref);
93} 91}
94 92
95void softif_neigh_purge(struct bat_priv *bat_priv) 93void softif_neigh_purge(struct bat_priv *bat_priv)
@@ -116,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
116 softif_neigh->addr, softif_neigh->vid); 114 softif_neigh->addr, softif_neigh->vid);
117 softif_neigh_tmp = bat_priv->softif_neigh; 115 softif_neigh_tmp = bat_priv->softif_neigh;
118 bat_priv->softif_neigh = NULL; 116 bat_priv->softif_neigh = NULL;
119 kref_put(&softif_neigh_tmp->refcount, 117 softif_neigh_free_ref(softif_neigh_tmp);
120 softif_neigh_free_ref);
121 } 118 }
122 119
123 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); 120 softif_neigh_free_ref(softif_neigh);
124 } 121 }
125 122
126 spin_unlock_bh(&bat_priv->softif_neigh_lock); 123 spin_unlock_bh(&bat_priv->softif_neigh_lock);
@@ -141,8 +138,11 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
141 if (softif_neigh->vid != vid) 138 if (softif_neigh->vid != vid)
142 continue; 139 continue;
143 140
141 if (!atomic_inc_not_zero(&softif_neigh->refcount))
142 continue;
143
144 softif_neigh->last_seen = jiffies; 144 softif_neigh->last_seen = jiffies;
145 goto found; 145 goto out;
146 } 146 }
147 147
148 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); 148 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
@@ -152,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
152 memcpy(softif_neigh->addr, addr, ETH_ALEN); 152 memcpy(softif_neigh->addr, addr, ETH_ALEN);
153 softif_neigh->vid = vid; 153 softif_neigh->vid = vid;
154 softif_neigh->last_seen = jiffies; 154 softif_neigh->last_seen = jiffies;
155 kref_init(&softif_neigh->refcount); 155 /* initialize with 2 - caller decrements counter by one */
156 atomic_set(&softif_neigh->refcount, 2);
156 157
157 INIT_HLIST_NODE(&softif_neigh->list); 158 INIT_HLIST_NODE(&softif_neigh->list);
158 spin_lock_bh(&bat_priv->softif_neigh_lock); 159 spin_lock_bh(&bat_priv->softif_neigh_lock);
159 hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); 160 hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list);
160 spin_unlock_bh(&bat_priv->softif_neigh_lock); 161 spin_unlock_bh(&bat_priv->softif_neigh_lock);
161 162
162found:
163 kref_get(&softif_neigh->refcount);
164out: 163out:
165 rcu_read_unlock(); 164 rcu_read_unlock();
166 return softif_neigh; 165 return softif_neigh;
@@ -264,7 +263,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
264 softif_neigh->addr, softif_neigh->vid); 263 softif_neigh->addr, softif_neigh->vid);
265 softif_neigh_tmp = bat_priv->softif_neigh; 264 softif_neigh_tmp = bat_priv->softif_neigh;
266 bat_priv->softif_neigh = softif_neigh; 265 bat_priv->softif_neigh = softif_neigh;
267 kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref); 266 softif_neigh_free_ref(softif_neigh_tmp);
268 /* we need to hold the additional reference */ 267 /* we need to hold the additional reference */
269 goto err; 268 goto err;
270 } 269 }
@@ -282,7 +281,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
282 } 281 }
283 282
284out: 283out:
285 kref_put(&softif_neigh->refcount, softif_neigh_free_ref); 284 softif_neigh_free_ref(softif_neigh);
286err: 285err:
287 kfree_skb(skb); 286 kfree_skb(skb);
288 return; 287 return;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index cfbeb45cd9b3..96f7c224975b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -268,7 +268,7 @@ struct softif_neigh {
268 uint8_t addr[ETH_ALEN]; 268 uint8_t addr[ETH_ALEN];
269 unsigned long last_seen; 269 unsigned long last_seen;
270 short vid; 270 short vid;
271 struct kref refcount; 271 atomic_t refcount;
272 struct rcu_head rcu; 272 struct rcu_head rcu;
273}; 273};
274 274