aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/send.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/batman-adv/send.c')
-rw-r--r--net/batman-adv/send.c237
1 files changed, 85 insertions, 152 deletions
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f47299f22c68..3b4b2daa3b3e 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,4 @@
1/* 1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * 2 *
4 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
5 * 4 *
@@ -16,7 +15,6 @@
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA 17 * 02110-1301, USA
19 *
20 */ 18 */
21 19
22#include "main.h" 20#include "main.h"
@@ -29,16 +27,18 @@
29#include "gateway_common.h" 27#include "gateway_common.h"
30#include "originator.h" 28#include "originator.h"
31 29
32static void send_outstanding_bcast_packet(struct work_struct *work); 30static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
33 31
34/* send out an already prepared packet to the given address via the 32/* send out an already prepared packet to the given address via the
35 * specified batman interface */ 33 * specified batman interface
36int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, 34 */
37 const uint8_t *dst_addr) 35int batadv_send_skb_packet(struct sk_buff *skb,
36 struct batadv_hard_iface *hard_iface,
37 const uint8_t *dst_addr)
38{ 38{
39 struct ethhdr *ethhdr; 39 struct ethhdr *ethhdr;
40 40
41 if (hard_iface->if_status != IF_ACTIVE) 41 if (hard_iface->if_status != BATADV_IF_ACTIVE)
42 goto send_skb_err; 42 goto send_skb_err;
43 43
44 if (unlikely(!hard_iface->net_dev)) 44 if (unlikely(!hard_iface->net_dev))
@@ -51,7 +51,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
51 } 51 }
52 52
53 /* push to the ethernet header. */ 53 /* push to the ethernet header. */
54 if (my_skb_head_push(skb, ETH_HLEN) < 0) 54 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
55 goto send_skb_err; 55 goto send_skb_err;
56 56
57 skb_reset_mac_header(skb); 57 skb_reset_mac_header(skb);
@@ -59,129 +59,57 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59 ethhdr = (struct ethhdr *)skb_mac_header(skb); 59 ethhdr = (struct ethhdr *)skb_mac_header(skb);
60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); 60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 62 ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
63 63
64 skb_set_network_header(skb, ETH_HLEN); 64 skb_set_network_header(skb, ETH_HLEN);
65 skb->priority = TC_PRIO_CONTROL; 65 skb->priority = TC_PRIO_CONTROL;
66 skb->protocol = __constant_htons(ETH_P_BATMAN); 66 skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
67 67
68 skb->dev = hard_iface->net_dev; 68 skb->dev = hard_iface->net_dev;
69 69
70 /* dev_queue_xmit() returns a negative result on error. However on 70 /* dev_queue_xmit() returns a negative result on error. However on
71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 * (which is > 0). This will not be treated as an error. */ 72 * (which is > 0). This will not be treated as an error.
73 73 */
74 return dev_queue_xmit(skb); 74 return dev_queue_xmit(skb);
75send_skb_err: 75send_skb_err:
76 kfree_skb(skb); 76 kfree_skb(skb);
77 return NET_XMIT_DROP; 77 return NET_XMIT_DROP;
78} 78}
79 79
80static void realloc_packet_buffer(struct hard_iface *hard_iface, 80void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
81 int new_len)
82{ 81{
83 unsigned char *new_buff; 82 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
84
85 new_buff = kmalloc(new_len, GFP_ATOMIC);
86
87 /* keep old buffer if kmalloc should fail */
88 if (new_buff) {
89 memcpy(new_buff, hard_iface->packet_buff,
90 BATMAN_OGM_HLEN);
91
92 kfree(hard_iface->packet_buff);
93 hard_iface->packet_buff = new_buff;
94 hard_iface->packet_len = new_len;
95 }
96}
97 83
98/* when calling this function (hard_iface == primary_if) has to be true */ 84 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
99static int prepare_packet_buffer(struct bat_priv *bat_priv, 85 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
100 struct hard_iface *hard_iface)
101{
102 int new_len;
103
104 new_len = BATMAN_OGM_HLEN +
105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
106
107 /* if we have too many changes for one packet don't send any
108 * and wait for the tt table request which will be fragmented */
109 if (new_len > hard_iface->soft_iface->mtu)
110 new_len = BATMAN_OGM_HLEN;
111
112 realloc_packet_buffer(hard_iface, new_len);
113
114 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
115
116 /* reset the sending counter */
117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
118
119 return tt_changes_fill_buffer(bat_priv,
120 hard_iface->packet_buff + BATMAN_OGM_HLEN,
121 hard_iface->packet_len - BATMAN_OGM_HLEN);
122}
123
124static int reset_packet_buffer(struct bat_priv *bat_priv,
125 struct hard_iface *hard_iface)
126{
127 realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
128 return 0;
129}
130
131void schedule_bat_ogm(struct hard_iface *hard_iface)
132{
133 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
134 struct hard_iface *primary_if;
135 int tt_num_changes = -1;
136
137 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
138 (hard_iface->if_status == IF_TO_BE_REMOVED))
139 return; 86 return;
140 87
141 /** 88 /* the interface gets activated here to avoid race conditions between
142 * the interface gets activated here to avoid race conditions between
143 * the moment of activating the interface in 89 * the moment of activating the interface in
144 * hardif_activate_interface() where the originator mac is set and 90 * hardif_activate_interface() where the originator mac is set and
145 * outdated packets (especially uninitialized mac addresses) in the 91 * outdated packets (especially uninitialized mac addresses) in the
146 * packet queue 92 * packet queue
147 */ 93 */
148 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 94 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
149 hard_iface->if_status = IF_ACTIVE; 95 hard_iface->if_status = BATADV_IF_ACTIVE;
150
151 primary_if = primary_if_get_selected(bat_priv);
152
153 if (hard_iface == primary_if) {
154 /* if at least one change happened */
155 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
156 tt_commit_changes(bat_priv);
157 tt_num_changes = prepare_packet_buffer(bat_priv,
158 hard_iface);
159 }
160
161 /* if the changes have been sent often enough */
162 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
163 tt_num_changes = reset_packet_buffer(bat_priv,
164 hard_iface);
165 }
166
167 if (primary_if)
168 hardif_free_ref(primary_if);
169 96
170 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes); 97 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
171} 98}
172 99
173static void forw_packet_free(struct forw_packet *forw_packet) 100static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
174{ 101{
175 if (forw_packet->skb) 102 if (forw_packet->skb)
176 kfree_skb(forw_packet->skb); 103 kfree_skb(forw_packet->skb);
177 if (forw_packet->if_incoming) 104 if (forw_packet->if_incoming)
178 hardif_free_ref(forw_packet->if_incoming); 105 batadv_hardif_free_ref(forw_packet->if_incoming);
179 kfree(forw_packet); 106 kfree(forw_packet);
180} 107}
181 108
182static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, 109static void
183 struct forw_packet *forw_packet, 110_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
184 unsigned long send_time) 111 struct batadv_forw_packet *forw_packet,
112 unsigned long send_time)
185{ 113{
186 INIT_HLIST_NODE(&forw_packet->list); 114 INIT_HLIST_NODE(&forw_packet->list);
187 115
@@ -192,8 +120,8 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
192 120
193 /* start timer for this packet */ 121 /* start timer for this packet */
194 INIT_DELAYED_WORK(&forw_packet->delayed_work, 122 INIT_DELAYED_WORK(&forw_packet->delayed_work,
195 send_outstanding_bcast_packet); 123 batadv_send_outstanding_bcast_packet);
196 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, 124 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
197 send_time); 125 send_time);
198} 126}
199 127
@@ -204,21 +132,24 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
204 * errors. 132 * errors.
205 * 133 *
206 * The skb is not consumed, so the caller should make sure that the 134 * The skb is not consumed, so the caller should make sure that the
207 * skb is freed. */ 135 * skb is freed.
208int add_bcast_packet_to_list(struct bat_priv *bat_priv, 136 */
209 const struct sk_buff *skb, unsigned long delay) 137int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
138 const struct sk_buff *skb,
139 unsigned long delay)
210{ 140{
211 struct hard_iface *primary_if = NULL; 141 struct batadv_hard_iface *primary_if = NULL;
212 struct forw_packet *forw_packet; 142 struct batadv_forw_packet *forw_packet;
213 struct bcast_packet *bcast_packet; 143 struct batadv_bcast_packet *bcast_packet;
214 struct sk_buff *newskb; 144 struct sk_buff *newskb;
215 145
216 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { 146 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
217 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); 147 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
148 "bcast packet queue full\n");
218 goto out; 149 goto out;
219 } 150 }
220 151
221 primary_if = primary_if_get_selected(bat_priv); 152 primary_if = batadv_primary_if_get_selected(bat_priv);
222 if (!primary_if) 153 if (!primary_if)
223 goto out_and_inc; 154 goto out_and_inc;
224 155
@@ -232,7 +163,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
232 goto packet_free; 163 goto packet_free;
233 164
234 /* as we have a copy now, it is safe to decrease the TTL */ 165 /* as we have a copy now, it is safe to decrease the TTL */
235 bcast_packet = (struct bcast_packet *)newskb->data; 166 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
236 bcast_packet->header.ttl--; 167 bcast_packet->header.ttl--;
237 168
238 skb_reset_mac_header(newskb); 169 skb_reset_mac_header(newskb);
@@ -243,7 +174,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
243 /* how often did we send the bcast packet ? */ 174 /* how often did we send the bcast packet ? */
244 forw_packet->num_packets = 0; 175 forw_packet->num_packets = 0;
245 176
246 _add_bcast_packet_to_list(bat_priv, forw_packet, delay); 177 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
247 return NETDEV_TX_OK; 178 return NETDEV_TX_OK;
248 179
249packet_free: 180packet_free:
@@ -252,38 +183,43 @@ out_and_inc:
252 atomic_inc(&bat_priv->bcast_queue_left); 183 atomic_inc(&bat_priv->bcast_queue_left);
253out: 184out:
254 if (primary_if) 185 if (primary_if)
255 hardif_free_ref(primary_if); 186 batadv_hardif_free_ref(primary_if);
256 return NETDEV_TX_BUSY; 187 return NETDEV_TX_BUSY;
257} 188}
258 189
259static void send_outstanding_bcast_packet(struct work_struct *work) 190static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
260{ 191{
261 struct hard_iface *hard_iface; 192 struct batadv_hard_iface *hard_iface;
262 struct delayed_work *delayed_work = 193 struct delayed_work *delayed_work =
263 container_of(work, struct delayed_work, work); 194 container_of(work, struct delayed_work, work);
264 struct forw_packet *forw_packet = 195 struct batadv_forw_packet *forw_packet;
265 container_of(delayed_work, struct forw_packet, delayed_work);
266 struct sk_buff *skb1; 196 struct sk_buff *skb1;
267 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; 197 struct net_device *soft_iface;
268 struct bat_priv *bat_priv = netdev_priv(soft_iface); 198 struct batadv_priv *bat_priv;
199
200 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
201 delayed_work);
202 soft_iface = forw_packet->if_incoming->soft_iface;
203 bat_priv = netdev_priv(soft_iface);
269 204
270 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 205 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
271 hlist_del(&forw_packet->list); 206 hlist_del(&forw_packet->list);
272 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 207 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
273 208
274 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 209 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
275 goto out; 210 goto out;
276 211
277 /* rebroadcast packet */ 212 /* rebroadcast packet */
278 rcu_read_lock(); 213 rcu_read_lock();
279 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 214 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
280 if (hard_iface->soft_iface != soft_iface) 215 if (hard_iface->soft_iface != soft_iface)
281 continue; 216 continue;
282 217
283 /* send a copy of the saved skb */ 218 /* send a copy of the saved skb */
284 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); 219 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
285 if (skb1) 220 if (skb1)
286 send_skb_packet(skb1, hard_iface, broadcast_addr); 221 batadv_send_skb_packet(skb1, hard_iface,
222 batadv_broadcast_addr);
287 } 223 }
288 rcu_read_unlock(); 224 rcu_read_unlock();
289 225
@@ -291,72 +227,72 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
291 227
292 /* if we still have some more bcasts to send */ 228 /* if we still have some more bcasts to send */
293 if (forw_packet->num_packets < 3) { 229 if (forw_packet->num_packets < 3) {
294 _add_bcast_packet_to_list(bat_priv, forw_packet, 230 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
295 msecs_to_jiffies(5)); 231 msecs_to_jiffies(5));
296 return; 232 return;
297 } 233 }
298 234
299out: 235out:
300 forw_packet_free(forw_packet); 236 batadv_forw_packet_free(forw_packet);
301 atomic_inc(&bat_priv->bcast_queue_left); 237 atomic_inc(&bat_priv->bcast_queue_left);
302} 238}
303 239
304void send_outstanding_bat_ogm_packet(struct work_struct *work) 240void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
305{ 241{
306 struct delayed_work *delayed_work = 242 struct delayed_work *delayed_work =
307 container_of(work, struct delayed_work, work); 243 container_of(work, struct delayed_work, work);
308 struct forw_packet *forw_packet = 244 struct batadv_forw_packet *forw_packet;
309 container_of(delayed_work, struct forw_packet, delayed_work); 245 struct batadv_priv *bat_priv;
310 struct bat_priv *bat_priv;
311 246
247 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
248 delayed_work);
312 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); 249 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
313 spin_lock_bh(&bat_priv->forw_bat_list_lock); 250 spin_lock_bh(&bat_priv->forw_bat_list_lock);
314 hlist_del(&forw_packet->list); 251 hlist_del(&forw_packet->list);
315 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 252 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
316 253
317 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) 254 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
318 goto out; 255 goto out;
319 256
320 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); 257 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
321 258
322 /** 259 /* we have to have at least one packet in the queue
323 * we have to have at least one packet in the queue
324 * to determine the queues wake up time unless we are 260 * to determine the queues wake up time unless we are
325 * shutting down 261 * shutting down
326 */ 262 */
327 if (forw_packet->own) 263 if (forw_packet->own)
328 schedule_bat_ogm(forw_packet->if_incoming); 264 batadv_schedule_bat_ogm(forw_packet->if_incoming);
329 265
330out: 266out:
331 /* don't count own packet */ 267 /* don't count own packet */
332 if (!forw_packet->own) 268 if (!forw_packet->own)
333 atomic_inc(&bat_priv->batman_queue_left); 269 atomic_inc(&bat_priv->batman_queue_left);
334 270
335 forw_packet_free(forw_packet); 271 batadv_forw_packet_free(forw_packet);
336} 272}
337 273
338void purge_outstanding_packets(struct bat_priv *bat_priv, 274void
339 const struct hard_iface *hard_iface) 275batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
276 const struct batadv_hard_iface *hard_iface)
340{ 277{
341 struct forw_packet *forw_packet; 278 struct batadv_forw_packet *forw_packet;
342 struct hlist_node *tmp_node, *safe_tmp_node; 279 struct hlist_node *tmp_node, *safe_tmp_node;
343 bool pending; 280 bool pending;
344 281
345 if (hard_iface) 282 if (hard_iface)
346 bat_dbg(DBG_BATMAN, bat_priv, 283 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
347 "purge_outstanding_packets(): %s\n", 284 "purge_outstanding_packets(): %s\n",
348 hard_iface->net_dev->name); 285 hard_iface->net_dev->name);
349 else 286 else
350 bat_dbg(DBG_BATMAN, bat_priv, 287 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
351 "purge_outstanding_packets()\n"); 288 "purge_outstanding_packets()\n");
352 289
353 /* free bcast list */ 290 /* free bcast list */
354 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 291 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
355 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 292 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
356 &bat_priv->forw_bcast_list, list) { 293 &bat_priv->forw_bcast_list, list) {
357 294
358 /** 295 /* if purge_outstanding_packets() was called with an argument
359 * if purge_outstanding_packets() was called with an argument
360 * we delete only packets belonging to the given interface 296 * we delete only packets belonging to the given interface
361 */ 297 */
362 if ((hard_iface) && 298 if ((hard_iface) &&
@@ -365,8 +301,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
365 301
366 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 302 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
367 303
368 /** 304 /* batadv_send_outstanding_bcast_packet() will lock the list to
369 * send_outstanding_bcast_packet() will lock the list to
370 * delete the item from the list 305 * delete the item from the list
371 */ 306 */
372 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 307 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -374,7 +309,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
374 309
375 if (pending) { 310 if (pending) {
376 hlist_del(&forw_packet->list); 311 hlist_del(&forw_packet->list);
377 forw_packet_free(forw_packet); 312 batadv_forw_packet_free(forw_packet);
378 } 313 }
379 } 314 }
380 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 315 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -384,8 +319,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
384 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 319 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
385 &bat_priv->forw_bat_list, list) { 320 &bat_priv->forw_bat_list, list) {
386 321
387 /** 322 /* if purge_outstanding_packets() was called with an argument
388 * if purge_outstanding_packets() was called with an argument
389 * we delete only packets belonging to the given interface 323 * we delete only packets belonging to the given interface
390 */ 324 */
391 if ((hard_iface) && 325 if ((hard_iface) &&
@@ -394,8 +328,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
394 328
395 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 329 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
396 330
397 /** 331 /* send_outstanding_bat_packet() will lock the list to
398 * send_outstanding_bat_packet() will lock the list to
399 * delete the item from the list 332 * delete the item from the list
400 */ 333 */
401 pending = cancel_delayed_work_sync(&forw_packet->delayed_work); 334 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -403,7 +336,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
403 336
404 if (pending) { 337 if (pending) {
405 hlist_del(&forw_packet->list); 338 hlist_del(&forw_packet->list);
406 forw_packet_free(forw_packet); 339 batadv_forw_packet_free(forw_packet);
407 } 340 }
408 } 341 }
409 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 342 spin_unlock_bh(&bat_priv->forw_bat_list_lock);