aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c177
1 files changed, 123 insertions, 54 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4358051bfe1..ab8c16f8bca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -60,7 +60,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
60 * at least 8 bytes bytes available in headroom for IV/EIV 60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon. 61 * and 8 bytes for ICV data as tailroon.
62 */ 62 */
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
64 head_size += 8; 64 head_size += 8;
65 tail_size += 8; 65 tail_size += 8;
66 } 66 }
@@ -86,7 +86,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
86 memset(skbdesc, 0, sizeof(*skbdesc)); 86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry; 87 skbdesc->entry = entry;
88 88
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { 89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, 90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data, 91 skb->data,
92 skb->len, 92 skb->len,
@@ -148,19 +148,6 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
148 skb_trim(skb, frame_length); 148 skb_trim(skb, frame_length);
149} 149}
150 150
151void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
152{
153 unsigned int frame_length = skb->len;
154 unsigned int align = ALIGN_SIZE(skb, header_length);
155
156 if (!align)
157 return;
158
159 skb_push(skb, align);
160 memmove(skb->data, skb->data + align, frame_length);
161 skb_trim(skb, frame_length);
162}
163
164void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 151void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
165{ 152{
166 unsigned int payload_length = skb->len - header_length; 153 unsigned int payload_length = skb->len - header_length;
@@ -226,7 +213,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
226 213
227 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
228 215
229 if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags)) 216 if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
230 return; 217 return;
231 218
232 /* 219 /*
@@ -315,6 +302,85 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
315 } 302 }
316} 303}
317 304
305static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
306 struct txentry_desc *txdesc,
307 const struct rt2x00_rate *hwrate)
308{
309 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
310 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
312
313 if (tx_info->control.sta)
314 txdesc->u.ht.mpdu_density =
315 tx_info->control.sta->ht_cap.ampdu_density;
316
317 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
318
319 /*
320 * Only one STBC stream is supported for now.
321 */
322 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
323 txdesc->u.ht.stbc = 1;
324
325 /*
326 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
327 * mcs rate to be used
328 */
329 if (txrate->flags & IEEE80211_TX_RC_MCS) {
330 txdesc->u.ht.mcs = txrate->idx;
331
332 /*
333 * MIMO PS should be set to 1 for STA's using dynamic SM PS
334 * when using more then one tx stream (>MCS7).
335 */
336 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
337 ((tx_info->control.sta->ht_cap.cap &
338 IEEE80211_HT_CAP_SM_PS) >>
339 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
340 WLAN_HT_CAP_SM_PS_DYNAMIC)
341 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
342 } else {
343 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
344 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
345 txdesc->u.ht.mcs |= 0x08;
346 }
347
348 /*
349 * This frame is eligible for an AMPDU, however, don't aggregate
350 * frames that are intended to probe a specific tx rate.
351 */
352 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
353 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
354 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
355
356 /*
357 * Set 40Mhz mode if necessary (for legacy rates this will
358 * duplicate the frame to both channels).
359 */
360 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
361 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
362 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
363 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
364 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
365
366 /*
367 * Determine IFS values
368 * - Use TXOP_BACKOFF for management frames except beacons
369 * - Use TXOP_SIFS for fragment bursts
370 * - Use TXOP_HTTXOP for everything else
371 *
372 * Note: rt2800 devices won't use CTS protection (if used)
373 * for frames not transmitted with TXOP_HTTXOP
374 */
375 if (ieee80211_is_mgmt(hdr->frame_control) &&
376 !ieee80211_is_beacon(hdr->frame_control))
377 txdesc->u.ht.txop = TXOP_BACKOFF;
378 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
379 txdesc->u.ht.txop = TXOP_SIFS;
380 else
381 txdesc->u.ht.txop = TXOP_HTTXOP;
382}
383
318static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, 384static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
319 struct txentry_desc *txdesc) 385 struct txentry_desc *txdesc)
320{ 386{
@@ -409,8 +475,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
409 rt2x00crypto_create_tx_descriptor(entry, txdesc); 475 rt2x00crypto_create_tx_descriptor(entry, txdesc);
410 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 476 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
411 477
412 if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags)) 478 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
413 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); 479 rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate);
414 else 480 else
415 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 481 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
416} 482}
@@ -449,7 +515,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
449 /* 515 /*
450 * Map the skb to DMA. 516 * Map the skb to DMA.
451 */ 517 */
452 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) 518 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
453 rt2x00queue_map_txskb(entry); 519 rt2x00queue_map_txskb(entry);
454 520
455 return 0; 521 return 0;
@@ -495,8 +561,11 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
495 struct skb_frame_desc *skbdesc; 561 struct skb_frame_desc *skbdesc;
496 u8 rate_idx, rate_flags; 562 u8 rate_idx, rate_flags;
497 563
498 if (unlikely(rt2x00queue_full(queue))) 564 if (unlikely(rt2x00queue_full(queue))) {
565 ERROR(queue->rt2x00dev,
566 "Dropping frame due to full tx queue %d.\n", queue->qid);
499 return -ENOBUFS; 567 return -ENOBUFS;
568 }
500 569
501 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 570 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
502 &entry->flags))) { 571 &entry->flags))) {
@@ -539,7 +608,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
539 */ 608 */
540 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 609 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
541 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 610 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
542 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) 611 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
543 rt2x00crypto_tx_copy_iv(skb, &txdesc); 612 rt2x00crypto_tx_copy_iv(skb, &txdesc);
544 else 613 else
545 rt2x00crypto_tx_remove_iv(skb, &txdesc); 614 rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -553,9 +622,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
553 * PCI and USB devices, while header alignment only is valid 622 * PCI and USB devices, while header alignment only is valid
554 * for PCI devices. 623 * for PCI devices.
555 */ 624 */
556 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) 625 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
557 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); 626 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
558 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 627 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
559 rt2x00queue_align_frame(entry->skb); 628 rt2x00queue_align_frame(entry->skb);
560 629
561 /* 630 /*
@@ -571,7 +640,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
571 640
572 set_bit(ENTRY_DATA_PENDING, &entry->flags); 641 set_bit(ENTRY_DATA_PENDING, &entry->flags);
573 642
574 rt2x00queue_index_inc(queue, Q_INDEX); 643 rt2x00queue_index_inc(entry, Q_INDEX);
575 rt2x00queue_write_tx_descriptor(entry, &txdesc); 644 rt2x00queue_write_tx_descriptor(entry, &txdesc);
576 rt2x00queue_kick_tx_queue(queue, &txdesc); 645 rt2x00queue_kick_tx_queue(queue, &txdesc);
577 646
@@ -660,10 +729,12 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
660 return ret; 729 return ret;
661} 730}
662 731
663void rt2x00queue_for_each_entry(struct data_queue *queue, 732bool rt2x00queue_for_each_entry(struct data_queue *queue,
664 enum queue_index start, 733 enum queue_index start,
665 enum queue_index end, 734 enum queue_index end,
666 void (*fn)(struct queue_entry *entry)) 735 void *data,
736 bool (*fn)(struct queue_entry *entry,
737 void *data))
667{ 738{
668 unsigned long irqflags; 739 unsigned long irqflags;
669 unsigned int index_start; 740 unsigned int index_start;
@@ -674,7 +745,7 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
674 ERROR(queue->rt2x00dev, 745 ERROR(queue->rt2x00dev,
675 "Entry requested from invalid index range (%d - %d)\n", 746 "Entry requested from invalid index range (%d - %d)\n",
676 start, end); 747 start, end);
677 return; 748 return true;
678 } 749 }
679 750
680 /* 751 /*
@@ -693,15 +764,23 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
693 * send out all frames in the correct order. 764 * send out all frames in the correct order.
694 */ 765 */
695 if (index_start < index_end) { 766 if (index_start < index_end) {
696 for (i = index_start; i < index_end; i++) 767 for (i = index_start; i < index_end; i++) {
697 fn(&queue->entries[i]); 768 if (fn(&queue->entries[i], data))
769 return true;
770 }
698 } else { 771 } else {
699 for (i = index_start; i < queue->limit; i++) 772 for (i = index_start; i < queue->limit; i++) {
700 fn(&queue->entries[i]); 773 if (fn(&queue->entries[i], data))
774 return true;
775 }
701 776
702 for (i = 0; i < index_end; i++) 777 for (i = 0; i < index_end; i++) {
703 fn(&queue->entries[i]); 778 if (fn(&queue->entries[i], data))
779 return true;
780 }
704 } 781 }
782
783 return false;
705} 784}
706EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 785EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
707 786
@@ -727,8 +806,9 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
727} 806}
728EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 807EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
729 808
730void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) 809void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
731{ 810{
811 struct data_queue *queue = entry->queue;
732 unsigned long irqflags; 812 unsigned long irqflags;
733 813
734 if (unlikely(index >= Q_INDEX_MAX)) { 814 if (unlikely(index >= Q_INDEX_MAX)) {
@@ -743,7 +823,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
743 if (queue->index[index] >= queue->limit) 823 if (queue->index[index] >= queue->limit)
744 queue->index[index] = 0; 824 queue->index[index] = 0;
745 825
746 queue->last_action[index] = jiffies; 826 entry->last_action = jiffies;
747 827
748 if (index == Q_INDEX) { 828 if (index == Q_INDEX) {
749 queue->length++; 829 queue->length++;
@@ -848,7 +928,6 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
848 928
849void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 929void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
850{ 930{
851 unsigned int i;
852 bool started; 931 bool started;
853 bool tx_queue = 932 bool tx_queue =
854 (queue->qid == QID_AC_VO) || 933 (queue->qid == QID_AC_VO) ||
@@ -883,20 +962,12 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
883 } 962 }
884 963
885 /* 964 /*
886 * Check if driver supports flushing, we can only guarantee 965 * Check if driver supports flushing, if that is the case we can
887 * full support for flushing if the driver is able 966 * defer the flushing to the driver. Otherwise we must use the
888 * to cancel all pending frames (drop = true). 967 * alternative which just waits for the queue to become empty.
889 */
890 if (drop && queue->rt2x00dev->ops->lib->flush_queue)
891 queue->rt2x00dev->ops->lib->flush_queue(queue);
892
893 /*
894 * When we don't want to drop any frames, or when
895 * the driver doesn't fully flush the queue correcly,
896 * we must wait for the queue to become empty.
897 */ 968 */
898 for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++) 969 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
899 msleep(10); 970 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
900 971
901 /* 972 /*
902 * The queue flush has failed... 973 * The queue flush has failed...
@@ -969,10 +1040,8 @@ static void rt2x00queue_reset(struct data_queue *queue)
969 queue->count = 0; 1040 queue->count = 0;
970 queue->length = 0; 1041 queue->length = 0;
971 1042
972 for (i = 0; i < Q_INDEX_MAX; i++) { 1043 for (i = 0; i < Q_INDEX_MAX; i++)
973 queue->index[i] = 0; 1044 queue->index[i] = 0;
974 queue->last_action[i] = jiffies;
975 }
976 1045
977 spin_unlock_irqrestore(&queue->index_lock, irqflags); 1046 spin_unlock_irqrestore(&queue->index_lock, irqflags);
978} 1047}
@@ -1079,7 +1148,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1079 if (status) 1148 if (status)
1080 goto exit; 1149 goto exit;
1081 1150
1082 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { 1151 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1083 status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1152 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1084 rt2x00dev->ops->atim); 1153 rt2x00dev->ops->atim);
1085 if (status) 1154 if (status)
@@ -1131,7 +1200,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1131 struct data_queue *queue; 1200 struct data_queue *queue;
1132 enum data_queue_qid qid; 1201 enum data_queue_qid qid;
1133 unsigned int req_atim = 1202 unsigned int req_atim =
1134 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1203 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1135 1204
1136 /* 1205 /*
1137 * We need the following queues: 1206 * We need the following queues: