diff options
author | Sven Eckelmann <sven@narfation.org> | 2011-05-11 14:59:06 -0400 |
---|---|---|
committer | Sven Eckelmann <sven@narfation.org> | 2011-05-14 18:02:06 -0400 |
commit | 6d5808d4ae1b0851c3b732d9ec2860d5f7804294 (patch) | |
tree | 48e9dd4ba240daf2c252d715b89835f066e85fe7 /net/batman-adv/send.c | |
parent | 27aea2128ec09924dfe08e97739b2bf8b15c8619 (diff) |
batman-adv: Add missing hardif_free_ref in forw_packet_free
add_bcast_packet_to_list increases the refcount for if_incoming but the
reference count is never decreased. The reference count must be
increased for all kinds of forwarded packets which have the primary
interface stored and forw_packet_free must decrease them. Also
purge_outstanding_packets has to invoke forw_packet_free when a work
item was really cancelled.
This regression was introduced in
32ae9b221e788413ce68feaae2ca39e406211a0a.
Reported-by: Antonio Quartulli <ordex@autistici.org>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Diffstat (limited to 'net/batman-adv/send.c')
-rw-r--r-- | net/batman-adv/send.c | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index f30d0c69ccbb..76daa46efe19 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -377,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet) | |||
377 | { | 377 | { |
378 | if (forw_packet->skb) | 378 | if (forw_packet->skb) |
379 | kfree_skb(forw_packet->skb); | 379 | kfree_skb(forw_packet->skb); |
380 | if (forw_packet->if_incoming) | ||
381 | hardif_free_ref(forw_packet->if_incoming); | ||
380 | kfree(forw_packet); | 382 | kfree(forw_packet); |
381 | } | 383 | } |
382 | 384 | ||
@@ -539,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
539 | { | 541 | { |
540 | struct forw_packet *forw_packet; | 542 | struct forw_packet *forw_packet; |
541 | struct hlist_node *tmp_node, *safe_tmp_node; | 543 | struct hlist_node *tmp_node, *safe_tmp_node; |
544 | bool pending; | ||
542 | 545 | ||
543 | if (hard_iface) | 546 | if (hard_iface) |
544 | bat_dbg(DBG_BATMAN, bat_priv, | 547 | bat_dbg(DBG_BATMAN, bat_priv, |
@@ -567,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
567 | * send_outstanding_bcast_packet() will lock the list to | 570 | * send_outstanding_bcast_packet() will lock the list to |
568 | * delete the item from the list | 571 | * delete the item from the list |
569 | */ | 572 | */ |
570 | cancel_delayed_work_sync(&forw_packet->delayed_work); | 573 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
571 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 574 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
575 | |||
576 | if (pending) { | ||
577 | hlist_del(&forw_packet->list); | ||
578 | forw_packet_free(forw_packet); | ||
579 | } | ||
572 | } | 580 | } |
573 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 581 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
574 | 582 | ||
@@ -591,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
591 | * send_outstanding_bat_packet() will lock the list to | 599 | * send_outstanding_bat_packet() will lock the list to |
592 | * delete the item from the list | 600 | * delete the item from the list |
593 | */ | 601 | */ |
594 | cancel_delayed_work_sync(&forw_packet->delayed_work); | 602 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
595 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 603 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
604 | |||
605 | if (pending) { | ||
606 | hlist_del(&forw_packet->list); | ||
607 | forw_packet_free(forw_packet); | ||
608 | } | ||
596 | } | 609 | } |
597 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 610 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
598 | } | 611 | } |