diff options
author | Johannes Berg <johannes@sipsolutions.net> | 2008-03-31 13:23:03 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-04-01 17:14:10 -0400 |
commit | dc6676b7f2c2072ec05254aaca32e99f87a8a417 (patch) | |
tree | 6caf7e007063f9ae6a16fdcb1912bf72d31237c2 /net/mac80211/sta_info.c | |
parent | 4f6fab472c4c7c21d577f85fabec7628d4a05637 (diff) |
mac80211: sta_info_flush() fixes
When the IBSS code tries to flush the STA list, it does so in
an atomic context. Flushing isn't safe there, however, and
requires the RTNL, so we need to defer it to a workqueue.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/sta_info.c')
-rw-r--r-- | net/mac80211/sta_info.c | 70 |
1 files changed, 70 insertions, 0 deletions
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index dfca96e05d69..f5c65e891288 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -644,10 +644,41 @@ static void sta_info_debugfs_add_work(struct work_struct *work) | |||
644 | } | 644 | } |
645 | #endif | 645 | #endif |
646 | 646 | ||
647 | void __ieee80211_run_pending_flush(struct ieee80211_local *local) | ||
648 | { | ||
649 | struct sta_info *sta; | ||
650 | unsigned long flags; | ||
651 | |||
652 | ASSERT_RTNL(); | ||
653 | |||
654 | spin_lock_irqsave(&local->sta_lock, flags); | ||
655 | while (!list_empty(&local->sta_flush_list)) { | ||
656 | sta = list_first_entry(&local->sta_flush_list, | ||
657 | struct sta_info, list); | ||
658 | list_del(&sta->list); | ||
659 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
660 | sta_info_destroy(sta); | ||
661 | spin_lock_irqsave(&local->sta_lock, flags); | ||
662 | } | ||
663 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
664 | } | ||
665 | |||
666 | static void ieee80211_sta_flush_work(struct work_struct *work) | ||
667 | { | ||
668 | struct ieee80211_local *local = | ||
669 | container_of(work, struct ieee80211_local, sta_flush_work); | ||
670 | |||
671 | rtnl_lock(); | ||
672 | __ieee80211_run_pending_flush(local); | ||
673 | rtnl_unlock(); | ||
674 | } | ||
675 | |||
647 | void sta_info_init(struct ieee80211_local *local) | 676 | void sta_info_init(struct ieee80211_local *local) |
648 | { | 677 | { |
649 | spin_lock_init(&local->sta_lock); | 678 | spin_lock_init(&local->sta_lock); |
650 | INIT_LIST_HEAD(&local->sta_list); | 679 | INIT_LIST_HEAD(&local->sta_list); |
680 | INIT_LIST_HEAD(&local->sta_flush_list); | ||
681 | INIT_WORK(&local->sta_flush_work, ieee80211_sta_flush_work); | ||
651 | 682 | ||
652 | setup_timer(&local->sta_cleanup, sta_info_cleanup, | 683 | setup_timer(&local->sta_cleanup, sta_info_cleanup, |
653 | (unsigned long)local); | 684 | (unsigned long)local); |
@@ -668,7 +699,12 @@ int sta_info_start(struct ieee80211_local *local) | |||
668 | void sta_info_stop(struct ieee80211_local *local) | 699 | void sta_info_stop(struct ieee80211_local *local) |
669 | { | 700 | { |
670 | del_timer(&local->sta_cleanup); | 701 | del_timer(&local->sta_cleanup); |
702 | cancel_work_sync(&local->sta_flush_work); | ||
703 | |||
704 | rtnl_lock(); | ||
671 | sta_info_flush(local, NULL); | 705 | sta_info_flush(local, NULL); |
706 | __ieee80211_run_pending_flush(local); | ||
707 | rtnl_unlock(); | ||
672 | } | 708 | } |
673 | 709 | ||
674 | /** | 710 | /** |
@@ -688,6 +724,7 @@ int sta_info_flush(struct ieee80211_local *local, | |||
688 | unsigned long flags; | 724 | unsigned long flags; |
689 | 725 | ||
690 | might_sleep(); | 726 | might_sleep(); |
727 | ASSERT_RTNL(); | ||
691 | 728 | ||
692 | spin_lock_irqsave(&local->sta_lock, flags); | 729 | spin_lock_irqsave(&local->sta_lock, flags); |
693 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { | 730 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { |
@@ -706,3 +743,36 @@ int sta_info_flush(struct ieee80211_local *local, | |||
706 | 743 | ||
707 | return ret; | 744 | return ret; |
708 | } | 745 | } |
746 | |||
747 | /** | ||
748 | * sta_info_flush_delayed - flush matching STA entries from the STA table | ||
749 | * | ||
750 | * This function unlinks all stations for a given interface and queues | ||
751 | * them for freeing. Note that the workqueue function scheduled here has | ||
752 | * to run before any new keys can be added to the system to avoid set_key() | ||
753 | * callback ordering issues. | ||
754 | * | ||
755 | * @sdata: the interface | ||
756 | */ | ||
757 | void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata) | ||
758 | { | ||
759 | struct ieee80211_local *local = sdata->local; | ||
760 | struct sta_info *sta, *tmp; | ||
761 | unsigned long flags; | ||
762 | bool work = false; | ||
763 | |||
764 | spin_lock_irqsave(&local->sta_lock, flags); | ||
765 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { | ||
766 | if (sdata == sta->sdata) { | ||
767 | __sta_info_unlink(&sta); | ||
768 | if (sta) { | ||
769 | list_add_tail(&sta->list, | ||
770 | &local->sta_flush_list); | ||
771 | work = true; | ||
772 | } | ||
773 | } | ||
774 | } | ||
775 | if (work) | ||
776 | schedule_work(&local->sta_flush_work); | ||
777 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
778 | } | ||