diff options
Diffstat (limited to 'net/batman-adv/send.c')
-rw-r--r-- | net/batman-adv/send.c | 72 |
1 files changed, 49 insertions, 23 deletions
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index d49e54d932af..33779278f1b2 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -121,7 +121,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
121 | /* adjust all flags and log packets */ | 121 | /* adjust all flags and log packets */ |
122 | while (aggregated_packet(buff_pos, | 122 | while (aggregated_packet(buff_pos, |
123 | forw_packet->packet_len, | 123 | forw_packet->packet_len, |
124 | batman_packet->num_hna)) { | 124 | batman_packet->num_tt)) { |
125 | 125 | ||
126 | /* we might have aggregated direct link packets with an | 126 | /* we might have aggregated direct link packets with an |
127 | * ordinary base packet */ | 127 | * ordinary base packet */ |
@@ -146,7 +146,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
146 | hard_iface->net_dev->dev_addr); | 146 | hard_iface->net_dev->dev_addr); |
147 | 147 | ||
148 | buff_pos += sizeof(struct batman_packet) + | 148 | buff_pos += sizeof(struct batman_packet) + |
149 | (batman_packet->num_hna * ETH_ALEN); | 149 | (batman_packet->num_tt * ETH_ALEN); |
150 | packet_num++; | 150 | packet_num++; |
151 | batman_packet = (struct batman_packet *) | 151 | batman_packet = (struct batman_packet *) |
152 | (forw_packet->skb->data + buff_pos); | 152 | (forw_packet->skb->data + buff_pos); |
@@ -222,7 +222,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
222 | struct batman_packet *batman_packet; | 222 | struct batman_packet *batman_packet; |
223 | 223 | ||
224 | new_len = sizeof(struct batman_packet) + | 224 | new_len = sizeof(struct batman_packet) + |
225 | (bat_priv->num_local_hna * ETH_ALEN); | 225 | (bat_priv->num_local_tt * ETH_ALEN); |
226 | new_buff = kmalloc(new_len, GFP_ATOMIC); | 226 | new_buff = kmalloc(new_len, GFP_ATOMIC); |
227 | 227 | ||
228 | /* keep old buffer if kmalloc should fail */ | 228 | /* keep old buffer if kmalloc should fail */ |
@@ -231,7 +231,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
231 | sizeof(struct batman_packet)); | 231 | sizeof(struct batman_packet)); |
232 | batman_packet = (struct batman_packet *)new_buff; | 232 | batman_packet = (struct batman_packet *)new_buff; |
233 | 233 | ||
234 | batman_packet->num_hna = hna_local_fill_buffer(bat_priv, | 234 | batman_packet->num_tt = tt_local_fill_buffer(bat_priv, |
235 | new_buff + sizeof(struct batman_packet), | 235 | new_buff + sizeof(struct batman_packet), |
236 | new_len - sizeof(struct batman_packet)); | 236 | new_len - sizeof(struct batman_packet)); |
237 | 237 | ||
@@ -244,6 +244,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
244 | void schedule_own_packet(struct hard_iface *hard_iface) | 244 | void schedule_own_packet(struct hard_iface *hard_iface) |
245 | { | 245 | { |
246 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 246 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
247 | struct hard_iface *primary_if; | ||
247 | unsigned long send_time; | 248 | unsigned long send_time; |
248 | struct batman_packet *batman_packet; | 249 | struct batman_packet *batman_packet; |
249 | int vis_server; | 250 | int vis_server; |
@@ -253,6 +254,7 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
253 | return; | 254 | return; |
254 | 255 | ||
255 | vis_server = atomic_read(&bat_priv->vis_mode); | 256 | vis_server = atomic_read(&bat_priv->vis_mode); |
257 | primary_if = primary_if_get_selected(bat_priv); | ||
256 | 258 | ||
257 | /** | 259 | /** |
258 | * the interface gets activated here to avoid race conditions between | 260 | * the interface gets activated here to avoid race conditions between |
@@ -264,9 +266,9 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
264 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) | 266 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
265 | hard_iface->if_status = IF_ACTIVE; | 267 | hard_iface->if_status = IF_ACTIVE; |
266 | 268 | ||
267 | /* if local hna has changed and interface is a primary interface */ | 269 | /* if local tt has changed and interface is a primary interface */ |
268 | if ((atomic_read(&bat_priv->hna_local_changed)) && | 270 | if ((atomic_read(&bat_priv->tt_local_changed)) && |
269 | (hard_iface == bat_priv->primary_if)) | 271 | (hard_iface == primary_if)) |
270 | rebuild_batman_packet(bat_priv, hard_iface); | 272 | rebuild_batman_packet(bat_priv, hard_iface); |
271 | 273 | ||
272 | /** | 274 | /** |
@@ -284,7 +286,7 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
284 | else | 286 | else |
285 | batman_packet->flags &= ~VIS_SERVER; | 287 | batman_packet->flags &= ~VIS_SERVER; |
286 | 288 | ||
287 | if ((hard_iface == bat_priv->primary_if) && | 289 | if ((hard_iface == primary_if) && |
288 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) | 290 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) |
289 | batman_packet->gw_flags = | 291 | batman_packet->gw_flags = |
290 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); | 292 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); |
@@ -299,15 +301,19 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
299 | hard_iface->packet_buff, | 301 | hard_iface->packet_buff, |
300 | hard_iface->packet_len, | 302 | hard_iface->packet_len, |
301 | hard_iface, 1, send_time); | 303 | hard_iface, 1, send_time); |
304 | |||
305 | if (primary_if) | ||
306 | hardif_free_ref(primary_if); | ||
302 | } | 307 | } |
303 | 308 | ||
304 | void schedule_forward_packet(struct orig_node *orig_node, | 309 | void schedule_forward_packet(struct orig_node *orig_node, |
305 | struct ethhdr *ethhdr, | 310 | struct ethhdr *ethhdr, |
306 | struct batman_packet *batman_packet, | 311 | struct batman_packet *batman_packet, |
307 | uint8_t directlink, int hna_buff_len, | 312 | uint8_t directlink, int tt_buff_len, |
308 | struct hard_iface *if_incoming) | 313 | struct hard_iface *if_incoming) |
309 | { | 314 | { |
310 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 315 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
316 | struct neigh_node *router; | ||
311 | unsigned char in_tq, in_ttl, tq_avg = 0; | 317 | unsigned char in_tq, in_ttl, tq_avg = 0; |
312 | unsigned long send_time; | 318 | unsigned long send_time; |
313 | 319 | ||
@@ -316,6 +322,8 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
316 | return; | 322 | return; |
317 | } | 323 | } |
318 | 324 | ||
325 | router = orig_node_get_router(orig_node); | ||
326 | |||
319 | in_tq = batman_packet->tq; | 327 | in_tq = batman_packet->tq; |
320 | in_ttl = batman_packet->ttl; | 328 | in_ttl = batman_packet->ttl; |
321 | 329 | ||
@@ -324,20 +332,22 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
324 | 332 | ||
325 | /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast | 333 | /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast |
326 | * of our best tq value */ | 334 | * of our best tq value */ |
327 | if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { | 335 | if (router && router->tq_avg != 0) { |
328 | 336 | ||
329 | /* rebroadcast ogm of best ranking neighbor as is */ | 337 | /* rebroadcast ogm of best ranking neighbor as is */ |
330 | if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { | 338 | if (!compare_eth(router->addr, ethhdr->h_source)) { |
331 | batman_packet->tq = orig_node->router->tq_avg; | 339 | batman_packet->tq = router->tq_avg; |
332 | 340 | ||
333 | if (orig_node->router->last_ttl) | 341 | if (router->last_ttl) |
334 | batman_packet->ttl = orig_node->router->last_ttl | 342 | batman_packet->ttl = router->last_ttl - 1; |
335 | - 1; | ||
336 | } | 343 | } |
337 | 344 | ||
338 | tq_avg = orig_node->router->tq_avg; | 345 | tq_avg = router->tq_avg; |
339 | } | 346 | } |
340 | 347 | ||
348 | if (router) | ||
349 | neigh_node_free_ref(router); | ||
350 | |||
341 | /* apply hop penalty */ | 351 | /* apply hop penalty */ |
342 | batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv); | 352 | batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv); |
343 | 353 | ||
@@ -359,7 +369,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
359 | send_time = forward_send_time(); | 369 | send_time = forward_send_time(); |
360 | add_bat_packet_to_list(bat_priv, | 370 | add_bat_packet_to_list(bat_priv, |
361 | (unsigned char *)batman_packet, | 371 | (unsigned char *)batman_packet, |
362 | sizeof(struct batman_packet) + hna_buff_len, | 372 | sizeof(struct batman_packet) + tt_buff_len, |
363 | if_incoming, 0, send_time); | 373 | if_incoming, 0, send_time); |
364 | } | 374 | } |
365 | 375 | ||
@@ -367,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet) | |||
367 | { | 377 | { |
368 | if (forw_packet->skb) | 378 | if (forw_packet->skb) |
369 | kfree_skb(forw_packet->skb); | 379 | kfree_skb(forw_packet->skb); |
380 | if (forw_packet->if_incoming) | ||
381 | hardif_free_ref(forw_packet->if_incoming); | ||
370 | kfree(forw_packet); | 382 | kfree(forw_packet); |
371 | } | 383 | } |
372 | 384 | ||
@@ -388,7 +400,6 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, | |||
388 | send_time); | 400 | send_time); |
389 | } | 401 | } |
390 | 402 | ||
391 | #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) | ||
392 | /* add a broadcast packet to the queue and setup timers. broadcast packets | 403 | /* add a broadcast packet to the queue and setup timers. broadcast packets |
393 | * are sent multiple times to increase probability for beeing received. | 404 | * are sent multiple times to increase probability for beeing received. |
394 | * | 405 | * |
@@ -399,6 +410,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, | |||
399 | * skb is freed. */ | 410 | * skb is freed. */ |
400 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) | 411 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) |
401 | { | 412 | { |
413 | struct hard_iface *primary_if = NULL; | ||
402 | struct forw_packet *forw_packet; | 414 | struct forw_packet *forw_packet; |
403 | struct bcast_packet *bcast_packet; | 415 | struct bcast_packet *bcast_packet; |
404 | 416 | ||
@@ -407,8 +419,9 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) | |||
407 | goto out; | 419 | goto out; |
408 | } | 420 | } |
409 | 421 | ||
410 | if (!bat_priv->primary_if) | 422 | primary_if = primary_if_get_selected(bat_priv); |
411 | goto out; | 423 | if (!primary_if) |
424 | goto out_and_inc; | ||
412 | 425 | ||
413 | forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); | 426 | forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); |
414 | 427 | ||
@@ -426,7 +439,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) | |||
426 | skb_reset_mac_header(skb); | 439 | skb_reset_mac_header(skb); |
427 | 440 | ||
428 | forw_packet->skb = skb; | 441 | forw_packet->skb = skb; |
429 | forw_packet->if_incoming = bat_priv->primary_if; | 442 | forw_packet->if_incoming = primary_if; |
430 | 443 | ||
431 | /* how often did we send the bcast packet ? */ | 444 | /* how often did we send the bcast packet ? */ |
432 | forw_packet->num_packets = 0; | 445 | forw_packet->num_packets = 0; |
@@ -439,6 +452,8 @@ packet_free: | |||
439 | out_and_inc: | 452 | out_and_inc: |
440 | atomic_inc(&bat_priv->bcast_queue_left); | 453 | atomic_inc(&bat_priv->bcast_queue_left); |
441 | out: | 454 | out: |
455 | if (primary_if) | ||
456 | hardif_free_ref(primary_if); | ||
442 | return NETDEV_TX_BUSY; | 457 | return NETDEV_TX_BUSY; |
443 | } | 458 | } |
444 | 459 | ||
@@ -526,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
526 | { | 541 | { |
527 | struct forw_packet *forw_packet; | 542 | struct forw_packet *forw_packet; |
528 | struct hlist_node *tmp_node, *safe_tmp_node; | 543 | struct hlist_node *tmp_node, *safe_tmp_node; |
544 | bool pending; | ||
529 | 545 | ||
530 | if (hard_iface) | 546 | if (hard_iface) |
531 | bat_dbg(DBG_BATMAN, bat_priv, | 547 | bat_dbg(DBG_BATMAN, bat_priv, |
@@ -554,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
554 | * send_outstanding_bcast_packet() will lock the list to | 570 | * send_outstanding_bcast_packet() will lock the list to |
555 | * delete the item from the list | 571 | * delete the item from the list |
556 | */ | 572 | */ |
557 | cancel_delayed_work_sync(&forw_packet->delayed_work); | 573 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
558 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 574 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
575 | |||
576 | if (pending) { | ||
577 | hlist_del(&forw_packet->list); | ||
578 | forw_packet_free(forw_packet); | ||
579 | } | ||
559 | } | 580 | } |
560 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 581 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
561 | 582 | ||
@@ -578,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
578 | * send_outstanding_bat_packet() will lock the list to | 599 | * send_outstanding_bat_packet() will lock the list to |
579 | * delete the item from the list | 600 | * delete the item from the list |
580 | */ | 601 | */ |
581 | cancel_delayed_work_sync(&forw_packet->delayed_work); | 602 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
582 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 603 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
604 | |||
605 | if (pending) { | ||
606 | hlist_del(&forw_packet->list); | ||
607 | forw_packet_free(forw_packet); | ||
608 | } | ||
583 | } | 609 | } |
584 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 610 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
585 | } | 611 | } |