aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c239
1 files changed, 218 insertions, 21 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a3d79c7a21c6..ca82b3a91697 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -199,15 +199,18 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
199 199
200void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 200void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
201{ 201{
202 unsigned int l2pad = L2PAD_SIZE(header_length); 202 /*
203 * L2 padding is only present if the skb contains more than just the
204 * IEEE 802.11 header.
205 */
206 unsigned int l2pad = (skb->len > header_length) ?
207 L2PAD_SIZE(header_length) : 0;
203 208
204 if (!l2pad) 209 if (!l2pad)
205 return; 210 return;
206 211
207 memmove(skb->data + header_length, skb->data + header_length + l2pad, 212 memmove(skb->data + l2pad, skb->data, header_length);
208 skb->len - header_length - l2pad); 213 skb_pull(skb, l2pad);
209
210 skb_trim(skb, skb->len - l2pad);
211} 214}
212 215
213static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, 216static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
@@ -468,7 +471,7 @@ static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
468 */ 471 */
469 if (rt2x00queue_threshold(queue) || 472 if (rt2x00queue_threshold(queue) ||
470 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 473 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
471 queue->rt2x00dev->ops->lib->kick_tx_queue(queue); 474 queue->rt2x00dev->ops->lib->kick_queue(queue);
472} 475}
473 476
474int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 477int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -582,7 +585,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
582 rt2x00queue_free_skb(intf->beacon); 585 rt2x00queue_free_skb(intf->beacon);
583 586
584 if (!enable_beacon) { 587 if (!enable_beacon) {
585 rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue); 588 rt2x00queue_stop_queue(intf->beacon->queue);
586 mutex_unlock(&intf->beacon_skb_mutex); 589 mutex_unlock(&intf->beacon_skb_mutex);
587 return 0; 590 return 0;
588 } 591 }
@@ -735,6 +738,210 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
735 spin_unlock_irqrestore(&queue->index_lock, irqflags); 738 spin_unlock_irqrestore(&queue->index_lock, irqflags);
736} 739}
737 740
741void rt2x00queue_pause_queue(struct data_queue *queue)
742{
743 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
744 !test_bit(QUEUE_STARTED, &queue->flags) ||
745 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
746 return;
747
748 switch (queue->qid) {
749 case QID_AC_VO:
750 case QID_AC_VI:
751 case QID_AC_BE:
752 case QID_AC_BK:
753 /*
754 * For TX queues, we have to disable the queue
755 * inside mac80211.
756 */
757 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
758 break;
759 default:
760 break;
761 }
762}
763EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
764
765void rt2x00queue_unpause_queue(struct data_queue *queue)
766{
767 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
768 !test_bit(QUEUE_STARTED, &queue->flags) ||
769 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
770 return;
771
772 switch (queue->qid) {
773 case QID_AC_VO:
774 case QID_AC_VI:
775 case QID_AC_BE:
776 case QID_AC_BK:
777 /*
778 * For TX queues, we have to enable the queue
779 * inside mac80211.
780 */
781 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
782 break;
783 case QID_RX:
784 /*
785 * For RX we need to kick the queue now in order to
786 * receive frames.
787 */
788 queue->rt2x00dev->ops->lib->kick_queue(queue);
789 default:
790 break;
791 }
792}
793EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
794
795void rt2x00queue_start_queue(struct data_queue *queue)
796{
797 mutex_lock(&queue->status_lock);
798
799 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
800 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
801 mutex_unlock(&queue->status_lock);
802 return;
803 }
804
805 set_bit(QUEUE_PAUSED, &queue->flags);
806
807 queue->rt2x00dev->ops->lib->start_queue(queue);
808
809 rt2x00queue_unpause_queue(queue);
810
811 mutex_unlock(&queue->status_lock);
812}
813EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
814
815void rt2x00queue_stop_queue(struct data_queue *queue)
816{
817 mutex_lock(&queue->status_lock);
818
819 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
820 mutex_unlock(&queue->status_lock);
821 return;
822 }
823
824 rt2x00queue_pause_queue(queue);
825
826 queue->rt2x00dev->ops->lib->stop_queue(queue);
827
828 mutex_unlock(&queue->status_lock);
829}
830EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
831
832void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
833{
834 unsigned int i;
835 bool started;
836 bool tx_queue =
837 (queue->qid == QID_AC_VO) ||
838 (queue->qid == QID_AC_VI) ||
839 (queue->qid == QID_AC_BE) ||
840 (queue->qid == QID_AC_BK);
841
842 mutex_lock(&queue->status_lock);
843
844 /*
845 * If the queue has been started, we must stop it temporarily
846 * to prevent any new frames to be queued on the device. If
847 * we are not dropping the pending frames, the queue must
848 * only be stopped in the software and not the hardware,
849 * otherwise the queue will never become empty on its own.
850 */
851 started = test_bit(QUEUE_STARTED, &queue->flags);
852 if (started) {
853 /*
854 * Pause the queue
855 */
856 rt2x00queue_pause_queue(queue);
857
858 /*
859 * If we are not supposed to drop any pending
860 * frames, this means we must force a start (=kick)
861 * to the queue to make sure the hardware will
862 * start transmitting.
863 */
864 if (!drop && tx_queue)
865 queue->rt2x00dev->ops->lib->kick_queue(queue);
866 }
867
868 /*
869 * Check if driver supports flushing, we can only guarentee
870 * full support for flushing if the driver is able
871 * to cancel all pending frames (drop = true).
872 */
873 if (drop && queue->rt2x00dev->ops->lib->flush_queue)
874 queue->rt2x00dev->ops->lib->flush_queue(queue);
875
876 /*
877 * When we don't want to drop any frames, or when
878 * the driver doesn't fully flush the queue correcly,
879 * we must wait for the queue to become empty.
880 */
881 for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
882 msleep(10);
883
884 /*
885 * The queue flush has failed...
886 */
887 if (unlikely(!rt2x00queue_empty(queue)))
888 WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
889
890 /*
891 * Restore the queue to the previous status
892 */
893 if (started)
894 rt2x00queue_unpause_queue(queue);
895
896 mutex_unlock(&queue->status_lock);
897}
898EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
899
900void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
901{
902 struct data_queue *queue;
903
904 /*
905 * rt2x00queue_start_queue will call ieee80211_wake_queue
906 * for each queue after is has been properly initialized.
907 */
908 tx_queue_for_each(rt2x00dev, queue)
909 rt2x00queue_start_queue(queue);
910
911 rt2x00queue_start_queue(rt2x00dev->rx);
912}
913EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
914
915void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
916{
917 struct data_queue *queue;
918
919 /*
920 * rt2x00queue_stop_queue will call ieee80211_stop_queue
921 * as well, but we are completely shutting doing everything
922 * now, so it is much safer to stop all TX queues at once,
923 * and use rt2x00queue_stop_queue for cleaning up.
924 */
925 ieee80211_stop_queues(rt2x00dev->hw);
926
927 tx_queue_for_each(rt2x00dev, queue)
928 rt2x00queue_stop_queue(queue);
929
930 rt2x00queue_stop_queue(rt2x00dev->rx);
931}
932EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
933
934void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
935{
936 struct data_queue *queue;
937
938 tx_queue_for_each(rt2x00dev, queue)
939 rt2x00queue_flush_queue(queue, drop);
940
941 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
942}
943EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
944
738static void rt2x00queue_reset(struct data_queue *queue) 945static void rt2x00queue_reset(struct data_queue *queue)
739{ 946{
740 unsigned long irqflags; 947 unsigned long irqflags;
@@ -753,14 +960,6 @@ static void rt2x00queue_reset(struct data_queue *queue)
753 spin_unlock_irqrestore(&queue->index_lock, irqflags); 960 spin_unlock_irqrestore(&queue->index_lock, irqflags);
754} 961}
755 962
756void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
757{
758 struct data_queue *queue;
759
760 txall_queue_for_each(rt2x00dev, queue)
761 rt2x00dev->ops->lib->kill_tx_queue(queue);
762}
763
764void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 963void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
765{ 964{
766 struct data_queue *queue; 965 struct data_queue *queue;
@@ -769,11 +968,8 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
769 queue_for_each(rt2x00dev, queue) { 968 queue_for_each(rt2x00dev, queue) {
770 rt2x00queue_reset(queue); 969 rt2x00queue_reset(queue);
771 970
772 for (i = 0; i < queue->limit; i++) { 971 for (i = 0; i < queue->limit; i++)
773 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 972 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
774 if (queue->qid == QID_RX)
775 rt2x00queue_index_inc(queue, Q_INDEX);
776 }
777 } 973 }
778} 974}
779 975
@@ -902,6 +1098,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
902static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 1098static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
903 struct data_queue *queue, enum data_queue_qid qid) 1099 struct data_queue *queue, enum data_queue_qid qid)
904{ 1100{
1101 mutex_init(&queue->status_lock);
905 spin_lock_init(&queue->index_lock); 1102 spin_lock_init(&queue->index_lock);
906 1103
907 queue->rt2x00dev = rt2x00dev; 1104 queue->rt2x00dev = rt2x00dev;
@@ -944,7 +1141,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
944 /* 1141 /*
945 * Initialize queue parameters. 1142 * Initialize queue parameters.
946 * RX: qid = QID_RX 1143 * RX: qid = QID_RX
947 * TX: qid = QID_AC_BE + index 1144 * TX: qid = QID_AC_VO + index
948 * TX: cw_min: 2^5 = 32. 1145 * TX: cw_min: 2^5 = 32.
949 * TX: cw_max: 2^10 = 1024. 1146 * TX: cw_max: 2^10 = 1024.
950 * BCN: qid = QID_BEACON 1147 * BCN: qid = QID_BEACON
@@ -952,7 +1149,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
952 */ 1149 */
953 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); 1150 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
954 1151
955 qid = QID_AC_BE; 1152 qid = QID_AC_VO;
956 tx_queue_for_each(rt2x00dev, queue) 1153 tx_queue_for_each(rt2x00dev, queue)
957 rt2x00queue_init(rt2x00dev, queue, qid++); 1154 rt2x00queue_init(rt2x00dev, queue, qid++);
958 1155