diff options
author | Sven Eckelmann <sven@narfation.org> | 2011-05-11 14:59:06 -0400 |
---|---|---|
committer | Sven Eckelmann <sven@narfation.org> | 2011-05-14 18:02:06 -0400 |
commit | 6d5808d4ae1b0851c3b732d9ec2860d5f7804294 (patch) | |
tree | 48e9dd4ba240daf2c252d715b89835f066e85fe7 | |
parent | 27aea2128ec09924dfe08e97739b2bf8b15c8619 (diff) |
batman-adv: Add missing hardif_free_ref in forw_packet_free
add_bcast_packet_to_list increases the refcount for if_incoming but the
reference count is never decreased. The reference count must be
increased for all kinds of forwarded packets which have the primary
interface stored and forw_packet_free must decrease them. Also
purge_outstanding_packets has to invoke forw_packet_free when a work
item was really cancelled.
This regression was introduced in
32ae9b221e788413ce68feaae2ca39e406211a0a.
Reported-by: Antonio Quartulli <ordex@autistici.org>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
-rw-r--r-- | net/batman-adv/aggregation.c | 14 | ||||
-rw-r--r-- | net/batman-adv/send.c | 17 |
2 files changed, 26 insertions, 5 deletions
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c index 9b9459024479..a8c32030527c 100644 --- a/net/batman-adv/aggregation.c +++ b/net/batman-adv/aggregation.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "aggregation.h" | 23 | #include "aggregation.h" |
24 | #include "send.h" | 24 | #include "send.h" |
25 | #include "routing.h" | 25 | #include "routing.h" |
26 | #include "hard-interface.h" | ||
26 | 27 | ||
27 | /* calculate the size of the tt information for a given packet */ | 28 | /* calculate the size of the tt information for a given packet */ |
28 | static int tt_len(struct batman_packet *batman_packet) | 29 | static int tt_len(struct batman_packet *batman_packet) |
@@ -105,12 +106,15 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, | |||
105 | struct forw_packet *forw_packet_aggr; | 106 | struct forw_packet *forw_packet_aggr; |
106 | unsigned char *skb_buff; | 107 | unsigned char *skb_buff; |
107 | 108 | ||
109 | if (!atomic_inc_not_zero(&if_incoming->refcount)) | ||
110 | return; | ||
111 | |||
108 | /* own packet should always be scheduled */ | 112 | /* own packet should always be scheduled */ |
109 | if (!own_packet) { | 113 | if (!own_packet) { |
110 | if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { | 114 | if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { |
111 | bat_dbg(DBG_BATMAN, bat_priv, | 115 | bat_dbg(DBG_BATMAN, bat_priv, |
112 | "batman packet queue full\n"); | 116 | "batman packet queue full\n"); |
113 | return; | 117 | goto out; |
114 | } | 118 | } |
115 | } | 119 | } |
116 | 120 | ||
@@ -118,7 +122,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, | |||
118 | if (!forw_packet_aggr) { | 122 | if (!forw_packet_aggr) { |
119 | if (!own_packet) | 123 | if (!own_packet) |
120 | atomic_inc(&bat_priv->batman_queue_left); | 124 | atomic_inc(&bat_priv->batman_queue_left); |
121 | return; | 125 | goto out; |
122 | } | 126 | } |
123 | 127 | ||
124 | if ((atomic_read(&bat_priv->aggregated_ogms)) && | 128 | if ((atomic_read(&bat_priv->aggregated_ogms)) && |
@@ -133,7 +137,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, | |||
133 | if (!own_packet) | 137 | if (!own_packet) |
134 | atomic_inc(&bat_priv->batman_queue_left); | 138 | atomic_inc(&bat_priv->batman_queue_left); |
135 | kfree(forw_packet_aggr); | 139 | kfree(forw_packet_aggr); |
136 | return; | 140 | goto out; |
137 | } | 141 | } |
138 | skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); | 142 | skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); |
139 | 143 | ||
@@ -164,6 +168,10 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, | |||
164 | queue_delayed_work(bat_event_workqueue, | 168 | queue_delayed_work(bat_event_workqueue, |
165 | &forw_packet_aggr->delayed_work, | 169 | &forw_packet_aggr->delayed_work, |
166 | send_time - jiffies); | 170 | send_time - jiffies); |
171 | |||
172 | return; | ||
173 | out: | ||
174 | hardif_free_ref(if_incoming); | ||
167 | } | 175 | } |
168 | 176 | ||
169 | /* aggregate a new packet into the existing aggregation */ | 177 | /* aggregate a new packet into the existing aggregation */ |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index f30d0c69ccbb..76daa46efe19 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -377,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet) | |||
377 | { | 377 | { |
378 | if (forw_packet->skb) | 378 | if (forw_packet->skb) |
379 | kfree_skb(forw_packet->skb); | 379 | kfree_skb(forw_packet->skb); |
380 | if (forw_packet->if_incoming) | ||
381 | hardif_free_ref(forw_packet->if_incoming); | ||
380 | kfree(forw_packet); | 382 | kfree(forw_packet); |
381 | } | 383 | } |
382 | 384 | ||
@@ -539,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
539 | { | 541 | { |
540 | struct forw_packet *forw_packet; | 542 | struct forw_packet *forw_packet; |
541 | struct hlist_node *tmp_node, *safe_tmp_node; | 543 | struct hlist_node *tmp_node, *safe_tmp_node; |
544 | bool pending; | ||
542 | 545 | ||
543 | if (hard_iface) | 546 | if (hard_iface) |
544 | bat_dbg(DBG_BATMAN, bat_priv, | 547 | bat_dbg(DBG_BATMAN, bat_priv, |
@@ -567,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
567 | * send_outstanding_bcast_packet() will lock the list to | 570 | * send_outstanding_bcast_packet() will lock the list to |
568 | * delete the item from the list | 571 | * delete the item from the list |
569 | */ | 572 | */ |
570 | cancel_delayed_work_sync(&forw_packet->delayed_work); | 573 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
571 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 574 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
575 | |||
576 | if (pending) { | ||
577 | hlist_del(&forw_packet->list); | ||
578 | forw_packet_free(forw_packet); | ||
579 | } | ||
572 | } | 580 | } |
573 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 581 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
574 | 582 | ||
@@ -591,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
591 | * send_outstanding_bat_packet() will lock the list to | 599 | * send_outstanding_bat_packet() will lock the list to |
592 | * delete the item from the list | 600 | * delete the item from the list |
593 | */ | 601 | */ |
594 | cancel_delayed_work_sync(&forw_packet->delayed_work); | 602 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
595 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 603 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
604 | |||
605 | if (pending) { | ||
606 | hlist_del(&forw_packet->list); | ||
607 | forw_packet_free(forw_packet); | ||
608 | } | ||
596 | } | 609 | } |
597 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 610 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
598 | } | 611 | } |