diff options
author | Ivo van Doorn <ivdoorn@gmail.com> | 2010-12-13 06:35:17 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-12-13 15:23:35 -0500 |
commit | 0b7fde54f94979edc67bbf86b5adba702ebfefe8 (patch) | |
tree | 90c21b9040c5c43c98d931ca7ca14328412a4a89 /drivers/net/wireless/rt2x00/rt2x00queue.c | |
parent | dbba306f2ae574450a7a5133d6637fe6f5fafc72 (diff) |
rt2x00: Protect queue control with mutex
Add wrapper functions in rt2x00queue.c to
start & stop queues. This control must be protected
using a mutex.
Queues can also be paused which will halt the flow
of packets between the driver and mac80211. This doesn't
require a mutex protection.
Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r-- | drivers/net/wireless/rt2x00/rt2x00queue.c | 130 |
1 files changed, 121 insertions, 9 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 2af6cea0d2da..558965fb41b3 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
@@ -585,7 +585,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | |||
585 | rt2x00queue_free_skb(intf->beacon); | 585 | rt2x00queue_free_skb(intf->beacon); |
586 | 586 | ||
587 | if (!enable_beacon) { | 587 | if (!enable_beacon) { |
588 | rt2x00dev->ops->lib->stop_queue(intf->beacon->queue); | 588 | rt2x00queue_stop_queue(intf->beacon->queue); |
589 | mutex_unlock(&intf->beacon_skb_mutex); | 589 | mutex_unlock(&intf->beacon_skb_mutex); |
590 | return 0; | 590 | return 0; |
591 | } | 591 | } |
@@ -738,6 +738,125 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) | |||
738 | spin_unlock_irqrestore(&queue->index_lock, irqflags); | 738 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
739 | } | 739 | } |
740 | 740 | ||
741 | void rt2x00queue_pause_queue(struct data_queue *queue) | ||
742 | { | ||
743 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | ||
744 | !test_bit(QUEUE_STARTED, &queue->flags) || | ||
745 | test_and_set_bit(QUEUE_PAUSED, &queue->flags)) | ||
746 | return; | ||
747 | |||
748 | switch (queue->qid) { | ||
749 | case QID_AC_BE: | ||
750 | case QID_AC_BK: | ||
751 | case QID_AC_VI: | ||
752 | case QID_AC_VO: | ||
753 | /* | ||
754 | * For TX queues, we have to disable the queue | ||
755 | * inside mac80211. | ||
756 | */ | ||
757 | ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); | ||
758 | break; | ||
759 | default: | ||
760 | break; | ||
761 | } | ||
762 | } | ||
763 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); | ||
764 | |||
765 | void rt2x00queue_unpause_queue(struct data_queue *queue) | ||
766 | { | ||
767 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | ||
768 | !test_bit(QUEUE_STARTED, &queue->flags) || | ||
769 | !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) | ||
770 | return; | ||
771 | |||
772 | switch (queue->qid) { | ||
773 | case QID_AC_BE: | ||
774 | case QID_AC_BK: | ||
775 | case QID_AC_VI: | ||
776 | case QID_AC_VO: | ||
777 | /* | ||
778 | * For TX queues, we have to enable the queue | ||
779 | * inside mac80211. | ||
780 | */ | ||
781 | ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); | ||
782 | break; | ||
783 | default: | ||
784 | break; | ||
785 | } | ||
786 | } | ||
787 | EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); | ||
788 | |||
789 | void rt2x00queue_start_queue(struct data_queue *queue) | ||
790 | { | ||
791 | mutex_lock(&queue->status_lock); | ||
792 | |||
793 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | ||
794 | test_and_set_bit(QUEUE_STARTED, &queue->flags)) { | ||
795 | mutex_unlock(&queue->status_lock); | ||
796 | return; | ||
797 | } | ||
798 | |||
799 | set_bit(QUEUE_PAUSED, &queue->flags); | ||
800 | |||
801 | queue->rt2x00dev->ops->lib->start_queue(queue); | ||
802 | |||
803 | rt2x00queue_unpause_queue(queue); | ||
804 | |||
805 | mutex_unlock(&queue->status_lock); | ||
806 | } | ||
807 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); | ||
808 | |||
809 | void rt2x00queue_stop_queue(struct data_queue *queue) | ||
810 | { | ||
811 | mutex_lock(&queue->status_lock); | ||
812 | |||
813 | if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { | ||
814 | mutex_unlock(&queue->status_lock); | ||
815 | return; | ||
816 | } | ||
817 | |||
818 | rt2x00queue_pause_queue(queue); | ||
819 | |||
820 | queue->rt2x00dev->ops->lib->stop_queue(queue); | ||
821 | |||
822 | mutex_unlock(&queue->status_lock); | ||
823 | } | ||
824 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); | ||
825 | |||
826 | void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) | ||
827 | { | ||
828 | struct data_queue *queue; | ||
829 | |||
830 | /* | ||
831 | * rt2x00queue_start_queue will call ieee80211_wake_queue | ||
832 | * for each queue after is has been properly initialized. | ||
833 | */ | ||
834 | tx_queue_for_each(rt2x00dev, queue) | ||
835 | rt2x00queue_start_queue(queue); | ||
836 | |||
837 | rt2x00queue_start_queue(rt2x00dev->rx); | ||
838 | } | ||
839 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); | ||
840 | |||
841 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) | ||
842 | { | ||
843 | struct data_queue *queue; | ||
844 | |||
845 | /* | ||
846 | * rt2x00queue_stop_queue will call ieee80211_stop_queue | ||
847 | * as well, but we are completely shutting doing everything | ||
848 | * now, so it is much safer to stop all TX queues at once, | ||
849 | * and use rt2x00queue_stop_queue for cleaning up. | ||
850 | */ | ||
851 | ieee80211_stop_queues(rt2x00dev->hw); | ||
852 | |||
853 | tx_queue_for_each(rt2x00dev, queue) | ||
854 | rt2x00queue_stop_queue(queue); | ||
855 | |||
856 | rt2x00queue_stop_queue(rt2x00dev->rx); | ||
857 | } | ||
858 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); | ||
859 | |||
741 | static void rt2x00queue_reset(struct data_queue *queue) | 860 | static void rt2x00queue_reset(struct data_queue *queue) |
742 | { | 861 | { |
743 | unsigned long irqflags; | 862 | unsigned long irqflags; |
@@ -756,14 +875,6 @@ static void rt2x00queue_reset(struct data_queue *queue) | |||
756 | spin_unlock_irqrestore(&queue->index_lock, irqflags); | 875 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
757 | } | 876 | } |
758 | 877 | ||
759 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) | ||
760 | { | ||
761 | struct data_queue *queue; | ||
762 | |||
763 | txall_queue_for_each(rt2x00dev, queue) | ||
764 | rt2x00dev->ops->lib->stop_queue(queue); | ||
765 | } | ||
766 | |||
767 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) | 878 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) |
768 | { | 879 | { |
769 | struct data_queue *queue; | 880 | struct data_queue *queue; |
@@ -905,6 +1016,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) | |||
905 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, | 1016 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, |
906 | struct data_queue *queue, enum data_queue_qid qid) | 1017 | struct data_queue *queue, enum data_queue_qid qid) |
907 | { | 1018 | { |
1019 | mutex_init(&queue->status_lock); | ||
908 | spin_lock_init(&queue->index_lock); | 1020 | spin_lock_init(&queue->index_lock); |
909 | 1021 | ||
910 | queue->rt2x00dev = rt2x00dev; | 1022 | queue->rt2x00dev = rt2x00dev; |