aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/send.c
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2011-04-20 09:40:58 -0400
committerSven Eckelmann <sven@narfation.org>2011-05-01 16:49:03 -0400
commit32ae9b221e788413ce68feaae2ca39e406211a0a (patch)
treed827f989976a28fea5cdcb349c308baa98182c35 /net/batman-adv/send.c
parent71e4aa9c465fd66c110667ab5d620fb6a4ef2157 (diff)
batman-adv: Make bat_priv->primary_if an rcu protected pointer
The rcu protected macros rcu_dereference() and rcu_assign_pointer() for the bat_priv->primary_if need to be used, as well as spin/rcu locking. Otherwise we might end up using a primary_if pointer pointing to already freed memory. Signed-off-by: Marek Lindner <lindner_marek@yahoo.de> Signed-off-by: Sven Eckelmann <sven@narfation.org>
Diffstat (limited to 'net/batman-adv/send.c')
-rw-r--r--net/batman-adv/send.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 7650e2bf187d..02b541a6dfef 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -244,6 +244,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
244void schedule_own_packet(struct hard_iface *hard_iface) 244void schedule_own_packet(struct hard_iface *hard_iface)
245{ 245{
246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
247 struct hard_iface *primary_if;
247 unsigned long send_time; 248 unsigned long send_time;
248 struct batman_packet *batman_packet; 249 struct batman_packet *batman_packet;
249 int vis_server; 250 int vis_server;
@@ -253,6 +254,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
253 return; 254 return;
254 255
255 vis_server = atomic_read(&bat_priv->vis_mode); 256 vis_server = atomic_read(&bat_priv->vis_mode);
257 primary_if = primary_if_get_selected(bat_priv);
256 258
257 /** 259 /**
258 * the interface gets activated here to avoid race conditions between 260 * the interface gets activated here to avoid race conditions between
@@ -266,7 +268,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
266 268
267 /* if local hna has changed and interface is a primary interface */ 269 /* if local hna has changed and interface is a primary interface */
268 if ((atomic_read(&bat_priv->hna_local_changed)) && 270 if ((atomic_read(&bat_priv->hna_local_changed)) &&
269 (hard_iface == bat_priv->primary_if)) 271 (hard_iface == primary_if))
270 rebuild_batman_packet(bat_priv, hard_iface); 272 rebuild_batman_packet(bat_priv, hard_iface);
271 273
272 /** 274 /**
@@ -284,7 +286,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
284 else 286 else
285 batman_packet->flags &= ~VIS_SERVER; 287 batman_packet->flags &= ~VIS_SERVER;
286 288
287 if ((hard_iface == bat_priv->primary_if) && 289 if ((hard_iface == primary_if) &&
288 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) 290 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
289 batman_packet->gw_flags = 291 batman_packet->gw_flags =
290 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 292 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
@@ -299,6 +301,9 @@ void schedule_own_packet(struct hard_iface *hard_iface)
299 hard_iface->packet_buff, 301 hard_iface->packet_buff,
300 hard_iface->packet_len, 302 hard_iface->packet_len,
301 hard_iface, 1, send_time); 303 hard_iface, 1, send_time);
304
305 if (primary_if)
306 hardif_free_ref(primary_if);
302} 307}
303 308
304void schedule_forward_packet(struct orig_node *orig_node, 309void schedule_forward_packet(struct orig_node *orig_node,
@@ -403,6 +408,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
403 * skb is freed. */ 408 * skb is freed. */
404int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) 409int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
405{ 410{
411 struct hard_iface *primary_if = NULL;
406 struct forw_packet *forw_packet; 412 struct forw_packet *forw_packet;
407 struct bcast_packet *bcast_packet; 413 struct bcast_packet *bcast_packet;
408 414
@@ -411,7 +417,8 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
411 goto out; 417 goto out;
412 } 418 }
413 419
414 if (!bat_priv->primary_if) 420 primary_if = primary_if_get_selected(bat_priv);
421 if (!primary_if)
415 goto out; 422 goto out;
416 423
417 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 424 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
@@ -430,7 +437,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
430 skb_reset_mac_header(skb); 437 skb_reset_mac_header(skb);
431 438
432 forw_packet->skb = skb; 439 forw_packet->skb = skb;
433 forw_packet->if_incoming = bat_priv->primary_if; 440 forw_packet->if_incoming = primary_if;
434 441
435 /* how often did we send the bcast packet ? */ 442 /* how often did we send the bcast packet ? */
436 forw_packet->num_packets = 0; 443 forw_packet->num_packets = 0;
@@ -443,6 +450,8 @@ packet_free:
443out_and_inc: 450out_and_inc:
444 atomic_inc(&bat_priv->bcast_queue_left); 451 atomic_inc(&bat_priv->bcast_queue_left);
445out: 452out:
453 if (primary_if)
454 hardif_free_ref(primary_if);
446 return NETDEV_TX_BUSY; 455 return NETDEV_TX_BUSY;
447} 456}
448 457