aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2010-05-22 11:48:44 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-06-04 16:38:55 -0400
commit5f411a90ee163801434775264b4f9932f1de9e4c (patch)
tree3837fcba82e6c9d6786330240d6b9385fac31650 /drivers/staging
parent9d20015391dfc47f6371492925cc0333ac403414 (diff)
Staging: batman-adv: fix rogue packets on shutdown
On module shutdown batman-adv would purge the internal packet queue by sending all remaining packets which could confuse other nodes. Now, the packets are silently discarded. Signed-off-by: Marek Lindner <lindner_marek@yahoo.de> Signed-off-by: Sven Eckelmann <sven.eckelmann@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/batman-adv/send.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index d8536e277a2..ac69ed871a7 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -440,6 +440,9 @@ void send_outstanding_bcast_packet(struct work_struct *work)
440 hlist_del(&forw_packet->list); 440 hlist_del(&forw_packet->list);
441 spin_unlock_irqrestore(&forw_bcast_list_lock, flags); 441 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
442 442
443 if (atomic_read(&module_state) == MODULE_DEACTIVATING)
444 goto out;
445
443 /* rebroadcast packet */ 446 /* rebroadcast packet */
444 rcu_read_lock(); 447 rcu_read_lock();
445 list_for_each_entry_rcu(batman_if, &if_list, list) { 448 list_for_each_entry_rcu(batman_if, &if_list, list) {
@@ -453,15 +456,15 @@ void send_outstanding_bcast_packet(struct work_struct *work)
453 456
454 forw_packet->num_packets++; 457 forw_packet->num_packets++;
455 458
456 /* if we still have some more bcasts to send and we are not shutting 459 /* if we still have some more bcasts to send */
457 * down */ 460 if (forw_packet->num_packets < 3) {
458 if ((forw_packet->num_packets < 3) &&
459 (atomic_read(&module_state) != MODULE_DEACTIVATING))
460 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000)); 461 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
461 else { 462 return;
462 forw_packet_free(forw_packet);
463 atomic_inc(&bcast_queue_left);
464 } 463 }
464
465out:
466 forw_packet_free(forw_packet);
467 atomic_inc(&bcast_queue_left);
465} 468}
466 469
467void send_outstanding_bat_packet(struct work_struct *work) 470void send_outstanding_bat_packet(struct work_struct *work)
@@ -476,6 +479,9 @@ void send_outstanding_bat_packet(struct work_struct *work)
476 hlist_del(&forw_packet->list); 479 hlist_del(&forw_packet->list);
477 spin_unlock_irqrestore(&forw_bat_list_lock, flags); 480 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
478 481
482 if (atomic_read(&module_state) == MODULE_DEACTIVATING)
483 goto out;
484
479 send_packet(forw_packet); 485 send_packet(forw_packet);
480 486
481 /** 487 /**
@@ -483,10 +489,10 @@ void send_outstanding_bat_packet(struct work_struct *work)
483 * to determine the queues wake up time unless we are 489 * to determine the queues wake up time unless we are
484 * shutting down 490 * shutting down
485 */ 491 */
486 if ((forw_packet->own) && 492 if (forw_packet->own)
487 (atomic_read(&module_state) != MODULE_DEACTIVATING))
488 schedule_own_packet(forw_packet->if_incoming); 493 schedule_own_packet(forw_packet->if_incoming);
489 494
495out:
490 /* don't count own packet */ 496 /* don't count own packet */
491 if (!forw_packet->own) 497 if (!forw_packet->own)
492 atomic_inc(&batman_queue_left); 498 atomic_inc(&batman_queue_left);