aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-04-25 14:34:25 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-04-25 14:34:25 -0400
commitcfef6047c4027a8448ec8dafeaf2bb362cc882e4 (patch)
treec254bd25aa8b4b0696b5b5cc45d8e30c7c1bb9dd /drivers/net/wireless/rt2x00/rt2x00queue.c
parentb71d1d426d263b0b6cb5760322efebbfc89d4463 (diff)
parent73b48099cc265f88fa1255f3f43e52fe6a94fd5c (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
Conflicts: drivers/net/wireless/iwlwifi/iwl-core.c drivers/net/wireless/rt2x00/rt2x00queue.c drivers/net/wireless/rt2x00/rt2x00queue.h
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c159
1 files changed, 119 insertions, 40 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 94b8bbb7ad80..ab8c16f8bcaf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -60,7 +60,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
60 * at least 8 bytes bytes available in headroom for IV/EIV 60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon. 61 * and 8 bytes for ICV data as tailroon.
62 */ 62 */
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
64 head_size += 8; 64 head_size += 8;
65 tail_size += 8; 65 tail_size += 8;
66 } 66 }
@@ -86,7 +86,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
86 memset(skbdesc, 0, sizeof(*skbdesc)); 86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry; 87 skbdesc->entry = entry;
88 88
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { 89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, 90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data, 91 skb->data,
92 skb->len, 92 skb->len,
@@ -213,7 +213,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
213 213
214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
215 215
216 if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags)) 216 if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
217 return; 217 return;
218 218
219 /* 219 /*
@@ -302,6 +302,85 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
302 } 302 }
303} 303}
304 304
305static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
306 struct txentry_desc *txdesc,
307 const struct rt2x00_rate *hwrate)
308{
309 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
310 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
312
313 if (tx_info->control.sta)
314 txdesc->u.ht.mpdu_density =
315 tx_info->control.sta->ht_cap.ampdu_density;
316
317 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
318
319 /*
320 * Only one STBC stream is supported for now.
321 */
322 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
323 txdesc->u.ht.stbc = 1;
324
325 /*
326 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
327 * mcs rate to be used
328 */
329 if (txrate->flags & IEEE80211_TX_RC_MCS) {
330 txdesc->u.ht.mcs = txrate->idx;
331
332 /*
333 * MIMO PS should be set to 1 for STA's using dynamic SM PS
334 * when using more then one tx stream (>MCS7).
335 */
336 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
337 ((tx_info->control.sta->ht_cap.cap &
338 IEEE80211_HT_CAP_SM_PS) >>
339 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
340 WLAN_HT_CAP_SM_PS_DYNAMIC)
341 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
342 } else {
343 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
344 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
345 txdesc->u.ht.mcs |= 0x08;
346 }
347
348 /*
349 * This frame is eligible for an AMPDU, however, don't aggregate
350 * frames that are intended to probe a specific tx rate.
351 */
352 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
353 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
354 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
355
356 /*
357 * Set 40Mhz mode if necessary (for legacy rates this will
358 * duplicate the frame to both channels).
359 */
360 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
361 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
362 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
363 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
364 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
365
366 /*
367 * Determine IFS values
368 * - Use TXOP_BACKOFF for management frames except beacons
369 * - Use TXOP_SIFS for fragment bursts
370 * - Use TXOP_HTTXOP for everything else
371 *
372 * Note: rt2800 devices won't use CTS protection (if used)
373 * for frames not transmitted with TXOP_HTTXOP
374 */
375 if (ieee80211_is_mgmt(hdr->frame_control) &&
376 !ieee80211_is_beacon(hdr->frame_control))
377 txdesc->u.ht.txop = TXOP_BACKOFF;
378 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
379 txdesc->u.ht.txop = TXOP_SIFS;
380 else
381 txdesc->u.ht.txop = TXOP_HTTXOP;
382}
383
305static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, 384static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
306 struct txentry_desc *txdesc) 385 struct txentry_desc *txdesc)
307{ 386{
@@ -396,8 +475,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
396 rt2x00crypto_create_tx_descriptor(entry, txdesc); 475 rt2x00crypto_create_tx_descriptor(entry, txdesc);
397 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 476 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
398 477
399 if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags)) 478 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
400 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); 479 rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate);
401 else 480 else
402 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 481 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
403} 482}
@@ -436,7 +515,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
436 /* 515 /*
437 * Map the skb to DMA. 516 * Map the skb to DMA.
438 */ 517 */
439 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) 518 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
440 rt2x00queue_map_txskb(entry); 519 rt2x00queue_map_txskb(entry);
441 520
442 return 0; 521 return 0;
@@ -529,7 +608,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
529 */ 608 */
530 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 609 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
531 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 610 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
532 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) 611 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
533 rt2x00crypto_tx_copy_iv(skb, &txdesc); 612 rt2x00crypto_tx_copy_iv(skb, &txdesc);
534 else 613 else
535 rt2x00crypto_tx_remove_iv(skb, &txdesc); 614 rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -543,9 +622,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
543 * PCI and USB devices, while header alignment only is valid 622 * PCI and USB devices, while header alignment only is valid
544 * for PCI devices. 623 * for PCI devices.
545 */ 624 */
546 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) 625 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
547 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); 626 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
548 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 627 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
549 rt2x00queue_align_frame(entry->skb); 628 rt2x00queue_align_frame(entry->skb);
550 629
551 /* 630 /*
@@ -561,7 +640,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
561 640
562 set_bit(ENTRY_DATA_PENDING, &entry->flags); 641 set_bit(ENTRY_DATA_PENDING, &entry->flags);
563 642
564 rt2x00queue_index_inc(queue, Q_INDEX); 643 rt2x00queue_index_inc(entry, Q_INDEX);
565 rt2x00queue_write_tx_descriptor(entry, &txdesc); 644 rt2x00queue_write_tx_descriptor(entry, &txdesc);
566 rt2x00queue_kick_tx_queue(queue, &txdesc); 645 rt2x00queue_kick_tx_queue(queue, &txdesc);
567 646
@@ -650,10 +729,12 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
650 return ret; 729 return ret;
651} 730}
652 731
653void rt2x00queue_for_each_entry(struct data_queue *queue, 732bool rt2x00queue_for_each_entry(struct data_queue *queue,
654 enum queue_index start, 733 enum queue_index start,
655 enum queue_index end, 734 enum queue_index end,
656 void (*fn)(struct queue_entry *entry)) 735 void *data,
736 bool (*fn)(struct queue_entry *entry,
737 void *data))
657{ 738{
658 unsigned long irqflags; 739 unsigned long irqflags;
659 unsigned int index_start; 740 unsigned int index_start;
@@ -664,7 +745,7 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
664 ERROR(queue->rt2x00dev, 745 ERROR(queue->rt2x00dev,
665 "Entry requested from invalid index range (%d - %d)\n", 746 "Entry requested from invalid index range (%d - %d)\n",
666 start, end); 747 start, end);
667 return; 748 return true;
668 } 749 }
669 750
670 /* 751 /*
@@ -683,15 +764,23 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
683 * send out all frames in the correct order. 764 * send out all frames in the correct order.
684 */ 765 */
685 if (index_start < index_end) { 766 if (index_start < index_end) {
686 for (i = index_start; i < index_end; i++) 767 for (i = index_start; i < index_end; i++) {
687 fn(&queue->entries[i]); 768 if (fn(&queue->entries[i], data))
769 return true;
770 }
688 } else { 771 } else {
689 for (i = index_start; i < queue->limit; i++) 772 for (i = index_start; i < queue->limit; i++) {
690 fn(&queue->entries[i]); 773 if (fn(&queue->entries[i], data))
774 return true;
775 }
691 776
692 for (i = 0; i < index_end; i++) 777 for (i = 0; i < index_end; i++) {
693 fn(&queue->entries[i]); 778 if (fn(&queue->entries[i], data))
779 return true;
780 }
694 } 781 }
782
783 return false;
695} 784}
696EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 785EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
697 786
@@ -717,8 +806,9 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
717} 806}
718EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 807EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
719 808
720void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) 809void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
721{ 810{
811 struct data_queue *queue = entry->queue;
722 unsigned long irqflags; 812 unsigned long irqflags;
723 813
724 if (unlikely(index >= Q_INDEX_MAX)) { 814 if (unlikely(index >= Q_INDEX_MAX)) {
@@ -733,7 +823,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
733 if (queue->index[index] >= queue->limit) 823 if (queue->index[index] >= queue->limit)
734 queue->index[index] = 0; 824 queue->index[index] = 0;
735 825
736 queue->last_action[index] = jiffies; 826 entry->last_action = jiffies;
737 827
738 if (index == Q_INDEX) { 828 if (index == Q_INDEX) {
739 queue->length++; 829 queue->length++;
@@ -838,7 +928,6 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
838 928
839void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 929void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
840{ 930{
841 unsigned int i;
842 bool started; 931 bool started;
843 bool tx_queue = 932 bool tx_queue =
844 (queue->qid == QID_AC_VO) || 933 (queue->qid == QID_AC_VO) ||
@@ -873,20 +962,12 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
873 } 962 }
874 963
875 /* 964 /*
876 * Check if driver supports flushing, we can only guarantee 965 * Check if driver supports flushing, if that is the case we can
877 * full support for flushing if the driver is able 966 * defer the flushing to the driver. Otherwise we must use the
878 * to cancel all pending frames (drop = true). 967 * alternative which just waits for the queue to become empty.
879 */ 968 */
880 if (drop && queue->rt2x00dev->ops->lib->flush_queue) 969 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
881 queue->rt2x00dev->ops->lib->flush_queue(queue); 970 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
882
883 /*
884 * When we don't want to drop any frames, or when
885 * the driver doesn't fully flush the queue correcly,
886 * we must wait for the queue to become empty.
887 */
888 for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
889 msleep(10);
890 971
891 /* 972 /*
892 * The queue flush has failed... 973 * The queue flush has failed...
@@ -959,10 +1040,8 @@ static void rt2x00queue_reset(struct data_queue *queue)
959 queue->count = 0; 1040 queue->count = 0;
960 queue->length = 0; 1041 queue->length = 0;
961 1042
962 for (i = 0; i < Q_INDEX_MAX; i++) { 1043 for (i = 0; i < Q_INDEX_MAX; i++)
963 queue->index[i] = 0; 1044 queue->index[i] = 0;
964 queue->last_action[i] = jiffies;
965 }
966 1045
967 spin_unlock_irqrestore(&queue->index_lock, irqflags); 1046 spin_unlock_irqrestore(&queue->index_lock, irqflags);
968} 1047}
@@ -1069,7 +1148,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1069 if (status) 1148 if (status)
1070 goto exit; 1149 goto exit;
1071 1150
1072 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { 1151 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1073 status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1152 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1074 rt2x00dev->ops->atim); 1153 rt2x00dev->ops->atim);
1075 if (status) 1154 if (status)
@@ -1121,7 +1200,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1121 struct data_queue *queue; 1200 struct data_queue *queue;
1122 enum data_queue_qid qid; 1201 enum data_queue_qid qid;
1123 unsigned int req_atim = 1202 unsigned int req_atim =
1124 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1203 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1125 1204
1126 /* 1205 /*
1127 * We need the following queues: 1206 * We need the following queues: