aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ar9170/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ar9170/main.c')
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c587
1 files changed, 22 insertions, 565 deletions
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c53692980990..2abc87578994 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -50,10 +50,6 @@ static int modparam_nohwcrypt;
50module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 50module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
51MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 51MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
52 52
53static int modparam_ht;
54module_param_named(ht, modparam_ht, bool, S_IRUGO);
55MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
56
57#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ 53#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
58 .bitrate = (_bitrate), \ 54 .bitrate = (_bitrate), \
59 .flags = (_flags), \ 55 .flags = (_flags), \
@@ -182,7 +178,6 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
182}; 178};
183 179
184static void ar9170_tx(struct ar9170 *ar); 180static void ar9170_tx(struct ar9170 *ar);
185static bool ar9170_tx_ampdu(struct ar9170 *ar);
186 181
187static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr) 182static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
188{ 183{
@@ -195,21 +190,7 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
195 return ar9170_get_seq_h((void *) txc->frame_data); 190 return ar9170_get_seq_h((void *) txc->frame_data);
196} 191}
197 192
198static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr) 193#ifdef AR9170_QUEUE_DEBUG
199{
200 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
201}
202
203static inline u16 ar9170_get_tid(struct sk_buff *skb)
204{
205 struct ar9170_tx_control *txc = (void *) skb->data;
206 return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
207}
208
209#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
210#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
211
212#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
213static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) 194static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
214{ 195{
215 struct ar9170_tx_control *txc = (void *) skb->data; 196 struct ar9170_tx_control *txc = (void *) skb->data;
@@ -236,7 +217,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
236 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 217 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
237 218
238 skb_queue_walk(queue, skb) { 219 skb_queue_walk(queue, skb) {
239 printk(KERN_DEBUG "index:%d => \n", i++); 220 printk(KERN_DEBUG "index:%d =>\n", i++);
240 ar9170_print_txheader(ar, skb); 221 ar9170_print_txheader(ar, skb);
241 } 222 }
242 if (i != skb_queue_len(queue)) 223 if (i != skb_queue_len(queue))
@@ -244,7 +225,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
244 "mismatch %d != %d\n", skb_queue_len(queue), i); 225 "mismatch %d != %d\n", skb_queue_len(queue), i);
245 printk(KERN_DEBUG "---[ end ]---\n"); 226 printk(KERN_DEBUG "---[ end ]---\n");
246} 227}
247#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */ 228#endif /* AR9170_QUEUE_DEBUG */
248 229
249#ifdef AR9170_QUEUE_DEBUG 230#ifdef AR9170_QUEUE_DEBUG
250static void ar9170_dump_txqueue(struct ar9170 *ar, 231static void ar9170_dump_txqueue(struct ar9170 *ar,
@@ -275,20 +256,6 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
275} 256}
276#endif /* AR9170_QUEUE_STOP_DEBUG */ 257#endif /* AR9170_QUEUE_STOP_DEBUG */
277 258
278#ifdef AR9170_TXAGG_DEBUG
279static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
280{
281 unsigned long flags;
282
283 spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
284 printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
285 wiphy_name(ar->hw->wiphy));
286 __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
287 spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
288}
289
290#endif /* AR9170_TXAGG_DEBUG */
291
292/* caller must guarantee exclusive access for _bin_ queue. */ 259/* caller must guarantee exclusive access for _bin_ queue. */
293static void ar9170_recycle_expired(struct ar9170 *ar, 260static void ar9170_recycle_expired(struct ar9170 *ar,
294 struct sk_buff_head *queue, 261 struct sk_buff_head *queue,
@@ -308,7 +275,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
308 if (time_is_before_jiffies(arinfo->timeout)) { 275 if (time_is_before_jiffies(arinfo->timeout)) {
309#ifdef AR9170_QUEUE_DEBUG 276#ifdef AR9170_QUEUE_DEBUG
310 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => " 277 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
311 "recycle \n", wiphy_name(ar->hw->wiphy), 278 "recycle\n", wiphy_name(ar->hw->wiphy),
312 jiffies, arinfo->timeout); 279 jiffies, arinfo->timeout);
313 ar9170_print_txheader(ar, skb); 280 ar9170_print_txheader(ar, skb);
314#endif /* AR9170_QUEUE_DEBUG */ 281#endif /* AR9170_QUEUE_DEBUG */
@@ -360,70 +327,6 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
360 ieee80211_tx_status_irqsafe(ar->hw, skb); 327 ieee80211_tx_status_irqsafe(ar->hw, skb);
361} 328}
362 329
363static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
364{
365 struct sk_buff_head success;
366 struct sk_buff *skb;
367 unsigned int i;
368 unsigned long queue_bitmap = 0;
369
370 skb_queue_head_init(&success);
371
372 while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
373 __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
374
375 ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
376
377#ifdef AR9170_TXAGG_DEBUG
378 printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
379 wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
380 __ar9170_dump_txqueue(ar, &success);
381#endif /* AR9170_TXAGG_DEBUG */
382
383 while ((skb = __skb_dequeue(&success))) {
384 struct ieee80211_tx_info *txinfo;
385
386 queue_bitmap |= BIT(skb_get_queue_mapping(skb));
387
388 txinfo = IEEE80211_SKB_CB(skb);
389 ieee80211_tx_info_clear_status(txinfo);
390
391 txinfo->flags |= IEEE80211_TX_STAT_ACK;
392 txinfo->status.rates[0].count = 1;
393
394 skb_pull(skb, sizeof(struct ar9170_tx_control));
395 ieee80211_tx_status_irqsafe(ar->hw, skb);
396 }
397
398 for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
399#ifdef AR9170_QUEUE_STOP_DEBUG
400 printk(KERN_DEBUG "%s: wake queue %d\n",
401 wiphy_name(ar->hw->wiphy), i);
402 __ar9170_dump_txstats(ar);
403#endif /* AR9170_QUEUE_STOP_DEBUG */
404 ieee80211_wake_queue(ar->hw, i);
405 }
406
407 if (queue_bitmap)
408 ar9170_tx(ar);
409}
410
411static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
412{
413 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
414 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
415
416 arinfo->timeout = jiffies +
417 msecs_to_jiffies(AR9170_BA_TIMEOUT);
418
419 skb_queue_tail(&ar->tx_status_ampdu, skb);
420 ar9170_tx_fake_ampdu_status(ar);
421
422 if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
423 !list_empty(&ar->tx_ampdu_list))
424 ar9170_tx_ampdu(ar);
425}
426
427void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) 330void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
428{ 331{
429 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -447,14 +350,10 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
447 if (info->flags & IEEE80211_TX_CTL_NO_ACK) { 350 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
448 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED); 351 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
449 } else { 352 } else {
450 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 353 arinfo->timeout = jiffies +
451 ar9170_tx_ampdu_callback(ar, skb); 354 msecs_to_jiffies(AR9170_TX_TIMEOUT);
452 } else {
453 arinfo->timeout = jiffies +
454 msecs_to_jiffies(AR9170_TX_TIMEOUT);
455 355
456 skb_queue_tail(&ar->tx_status[queue], skb); 356 skb_queue_tail(&ar->tx_status[queue], skb);
457 }
458 } 357 }
459 358
460 if (!ar->tx_stats[queue].len && 359 if (!ar->tx_stats[queue].len &&
@@ -524,38 +423,6 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
524 return NULL; 423 return NULL;
525} 424}
526 425
527static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
528{
529 struct sk_buff *skb;
530 struct ieee80211_tx_info *txinfo;
531
532 while (count) {
533 skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
534 if (!skb)
535 break;
536
537 txinfo = IEEE80211_SKB_CB(skb);
538 ieee80211_tx_info_clear_status(txinfo);
539
540 /* FIXME: maybe more ? */
541 txinfo->status.rates[0].count = 1;
542
543 skb_pull(skb, sizeof(struct ar9170_tx_control));
544 ieee80211_tx_status_irqsafe(ar->hw, skb);
545 count--;
546 }
547
548#ifdef AR9170_TXAGG_DEBUG
549 if (count) {
550 printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
551 "suitable frames left in tx_status queue.\n",
552 wiphy_name(ar->hw->wiphy), count);
553
554 ar9170_dump_tx_status_ampdu(ar);
555 }
556#endif /* AR9170_TXAGG_DEBUG */
557}
558
559/* 426/*
560 * This worker tries to keeps an maintain tx_status queues. 427 * This worker tries to keeps an maintain tx_status queues.
561 * So we can guarantee that incoming tx_status reports are 428 * So we can guarantee that incoming tx_status reports are
@@ -592,8 +459,6 @@ static void ar9170_tx_janitor(struct work_struct *work)
592 resched = true; 459 resched = true;
593 } 460 }
594 461
595 ar9170_tx_fake_ampdu_status(ar);
596
597 if (!resched) 462 if (!resched)
598 return; 463 return;
599 464
@@ -673,10 +538,6 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
673 538
674 case 0xc5: 539 case 0xc5:
675 /* BlockACK events */ 540 /* BlockACK events */
676 ar9170_handle_block_ack(ar,
677 le16_to_cpu(cmd->ba_fail_cnt.failed),
678 le16_to_cpu(cmd->ba_fail_cnt.rate));
679 ar9170_tx_fake_ampdu_status(ar);
680 break; 541 break;
681 542
682 case 0xc6: 543 case 0xc6:
@@ -689,7 +550,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
689 550
690 /* firmware debug */ 551 /* firmware debug */
691 case 0xca: 552 case 0xca:
692 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4); 553 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
554 (char *)buf + 4);
693 break; 555 break;
694 case 0xcb: 556 case 0xcb:
695 len -= 4; 557 len -= 4;
@@ -926,7 +788,6 @@ static void ar9170_rx_phy_status(struct ar9170 *ar,
926 788
927 /* TODO: we could do something with phy_errors */ 789 /* TODO: we could do something with phy_errors */
928 status->signal = ar->noise[0] + phy->rssi_combined; 790 status->signal = ar->noise[0] + phy->rssi_combined;
929 status->noise = ar->noise[0];
930} 791}
931 792
932static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len) 793static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
@@ -1247,7 +1108,6 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
1247 ar->global_ampdu_density = 6; 1108 ar->global_ampdu_density = 6;
1248 ar->global_ampdu_factor = 3; 1109 ar->global_ampdu_factor = 3;
1249 1110
1250 atomic_set(&ar->tx_ampdu_pending, 0);
1251 ar->bad_hw_nagger = jiffies; 1111 ar->bad_hw_nagger = jiffies;
1252 1112
1253 err = ar->open(ar); 1113 err = ar->open(ar);
@@ -1310,40 +1170,10 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
1310 skb_queue_purge(&ar->tx_pending[i]); 1170 skb_queue_purge(&ar->tx_pending[i]);
1311 skb_queue_purge(&ar->tx_status[i]); 1171 skb_queue_purge(&ar->tx_status[i]);
1312 } 1172 }
1313 skb_queue_purge(&ar->tx_status_ampdu);
1314 1173
1315 mutex_unlock(&ar->mutex); 1174 mutex_unlock(&ar->mutex);
1316} 1175}
1317 1176
1318static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
1319{
1320 struct ar9170_tx_control *txc = (void *) skb->data;
1321
1322 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
1323}
1324
1325static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
1326 struct sk_buff *src)
1327{
1328 struct ar9170_tx_control *dst_txc, *src_txc;
1329 struct ieee80211_tx_info *dst_info, *src_info;
1330 struct ar9170_tx_info *dst_arinfo, *src_arinfo;
1331
1332 src_txc = (void *) src->data;
1333 src_info = IEEE80211_SKB_CB(src);
1334 src_arinfo = (void *) src_info->rate_driver_data;
1335
1336 dst_txc = (void *) dst->data;
1337 dst_info = IEEE80211_SKB_CB(dst);
1338 dst_arinfo = (void *) dst_info->rate_driver_data;
1339
1340 dst_txc->phy_control = src_txc->phy_control;
1341
1342 /* same MCS for the whole aggregate */
1343 memcpy(dst_info->driver_rates, src_info->driver_rates,
1344 sizeof(dst_info->driver_rates));
1345}
1346
1347static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) 1177static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1348{ 1178{
1349 struct ieee80211_hdr *hdr; 1179 struct ieee80211_hdr *hdr;
@@ -1420,14 +1250,7 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1420 txc->phy_control |= 1250 txc->phy_control |=
1421 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT); 1251 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1422 1252
1423 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1253 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1424 if (unlikely(!info->control.sta))
1425 goto err_out;
1426
1427 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1428 } else {
1429 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1430 }
1431 } 1254 }
1432 1255
1433 return 0; 1256 return 0;
@@ -1537,158 +1360,6 @@ static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1537 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); 1360 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1538} 1361}
1539 1362
1540static bool ar9170_tx_ampdu(struct ar9170 *ar)
1541{
1542 struct sk_buff_head agg;
1543 struct ar9170_sta_tid *tid_info = NULL, *tmp;
1544 struct sk_buff *skb, *first = NULL;
1545 unsigned long flags, f2;
1546 unsigned int i = 0;
1547 u16 seq, queue, tmpssn;
1548 bool run = false;
1549
1550 skb_queue_head_init(&agg);
1551
1552 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1553 if (list_empty(&ar->tx_ampdu_list)) {
1554#ifdef AR9170_TXAGG_DEBUG
1555 printk(KERN_DEBUG "%s: aggregation list is empty.\n",
1556 wiphy_name(ar->hw->wiphy));
1557#endif /* AR9170_TXAGG_DEBUG */
1558 goto out_unlock;
1559 }
1560
1561 list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
1562 if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
1563#ifdef AR9170_TXAGG_DEBUG
1564 printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
1565 wiphy_name(ar->hw->wiphy));
1566#endif /* AR9170_TXAGG_DEBUG */
1567 continue;
1568 }
1569
1570 if (++i > 64) {
1571#ifdef AR9170_TXAGG_DEBUG
1572 printk(KERN_DEBUG "%s: enough frames aggregated.\n",
1573 wiphy_name(ar->hw->wiphy));
1574#endif /* AR9170_TXAGG_DEBUG */
1575 break;
1576 }
1577
1578 queue = TID_TO_WME_AC(tid_info->tid);
1579
1580 if (skb_queue_len(&ar->tx_pending[queue]) >=
1581 AR9170_NUM_TX_AGG_MAX) {
1582#ifdef AR9170_TXAGG_DEBUG
1583 printk(KERN_DEBUG "%s: queue %d full.\n",
1584 wiphy_name(ar->hw->wiphy), queue);
1585#endif /* AR9170_TXAGG_DEBUG */
1586 continue;
1587 }
1588
1589 list_del_init(&tid_info->list);
1590
1591 spin_lock_irqsave(&tid_info->queue.lock, f2);
1592 tmpssn = seq = tid_info->ssn;
1593 first = skb_peek(&tid_info->queue);
1594
1595 if (likely(first))
1596 tmpssn = ar9170_get_seq(first);
1597
1598 if (unlikely(tmpssn != seq)) {
1599#ifdef AR9170_TXAGG_DEBUG
1600 printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
1601 wiphy_name(ar->hw->wiphy), seq, tmpssn);
1602#endif /* AR9170_TXAGG_DEBUG */
1603 tid_info->ssn = tmpssn;
1604 }
1605
1606#ifdef AR9170_TXAGG_DEBUG
1607 printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
1608 "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
1609 tid_info->tid, tid_info->ssn,
1610 skb_queue_len(&tid_info->queue));
1611 __ar9170_dump_txqueue(ar, &tid_info->queue);
1612#endif /* AR9170_TXAGG_DEBUG */
1613
1614 while ((skb = skb_peek(&tid_info->queue))) {
1615 if (unlikely(ar9170_get_seq(skb) != seq))
1616 break;
1617
1618 __skb_unlink(skb, &tid_info->queue);
1619 tid_info->ssn = seq = GET_NEXT_SEQ(seq);
1620
1621 if (unlikely(skb_get_queue_mapping(skb) != queue)) {
1622#ifdef AR9170_TXAGG_DEBUG
1623 printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
1624 "!match.\n", wiphy_name(ar->hw->wiphy),
1625 tid_info->tid,
1626 TID_TO_WME_AC(tid_info->tid),
1627 skb_get_queue_mapping(skb));
1628#endif /* AR9170_TXAGG_DEBUG */
1629 dev_kfree_skb_any(skb);
1630 continue;
1631 }
1632
1633 if (unlikely(first == skb)) {
1634 ar9170_tx_prepare_phy(ar, skb);
1635 __skb_queue_tail(&agg, skb);
1636 first = skb;
1637 } else {
1638 ar9170_tx_copy_phy(ar, skb, first);
1639 __skb_queue_tail(&agg, skb);
1640 }
1641
1642 if (unlikely(skb_queue_len(&agg) ==
1643 AR9170_NUM_TX_AGG_MAX))
1644 break;
1645 }
1646
1647 if (skb_queue_empty(&tid_info->queue))
1648 tid_info->active = false;
1649 else
1650 list_add_tail(&tid_info->list,
1651 &ar->tx_ampdu_list);
1652
1653 spin_unlock_irqrestore(&tid_info->queue.lock, f2);
1654
1655 if (unlikely(skb_queue_empty(&agg))) {
1656#ifdef AR9170_TXAGG_DEBUG
1657 printk(KERN_DEBUG "%s: queued empty list!\n",
1658 wiphy_name(ar->hw->wiphy));
1659#endif /* AR9170_TXAGG_DEBUG */
1660 continue;
1661 }
1662
1663 /*
1664 * tell the FW/HW that this is the last frame,
1665 * that way it will wait for the immediate block ack.
1666 */
1667 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1668
1669#ifdef AR9170_TXAGG_DEBUG
1670 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
1671 wiphy_name(ar->hw->wiphy));
1672 __ar9170_dump_txqueue(ar, &agg);
1673#endif /* AR9170_TXAGG_DEBUG */
1674
1675 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1676
1677 spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
1678 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1679 spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
1680 run = true;
1681
1682 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1683 }
1684
1685out_unlock:
1686 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1687 __skb_queue_purge(&agg);
1688
1689 return run;
1690}
1691
1692static void ar9170_tx(struct ar9170 *ar) 1363static void ar9170_tx(struct ar9170 *ar)
1693{ 1364{
1694 struct sk_buff *skb; 1365 struct sk_buff *skb;
@@ -1728,7 +1399,7 @@ static void ar9170_tx(struct ar9170 *ar)
1728 printk(KERN_DEBUG "%s: queue %d full\n", 1399 printk(KERN_DEBUG "%s: queue %d full\n",
1729 wiphy_name(ar->hw->wiphy), i); 1400 wiphy_name(ar->hw->wiphy), i);
1730 1401
1731 printk(KERN_DEBUG "%s: stuck frames: ===> \n", 1402 printk(KERN_DEBUG "%s: stuck frames: ===>\n",
1732 wiphy_name(ar->hw->wiphy)); 1403 wiphy_name(ar->hw->wiphy));
1733 ar9170_dump_txqueue(ar, &ar->tx_pending[i]); 1404 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1734 ar9170_dump_txqueue(ar, &ar->tx_status[i]); 1405 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@@ -1763,9 +1434,6 @@ static void ar9170_tx(struct ar9170 *ar)
1763 arinfo->timeout = jiffies + 1434 arinfo->timeout = jiffies +
1764 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1435 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1765 1436
1766 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1767 atomic_inc(&ar->tx_ampdu_pending);
1768
1769#ifdef AR9170_QUEUE_DEBUG 1437#ifdef AR9170_QUEUE_DEBUG
1770 printk(KERN_DEBUG "%s: send frame q:%d =>\n", 1438 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1771 wiphy_name(ar->hw->wiphy), i); 1439 wiphy_name(ar->hw->wiphy), i);
@@ -1774,9 +1442,6 @@ static void ar9170_tx(struct ar9170 *ar)
1774 1442
1775 err = ar->tx(ar, skb); 1443 err = ar->tx(ar, skb);
1776 if (unlikely(err)) { 1444 if (unlikely(err)) {
1777 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1778 atomic_dec(&ar->tx_ampdu_pending);
1779
1780 frames_failed++; 1445 frames_failed++;
1781 dev_kfree_skb_any(skb); 1446 dev_kfree_skb_any(skb);
1782 } else { 1447 } else {
@@ -1823,94 +1488,11 @@ static void ar9170_tx(struct ar9170 *ar)
1823 msecs_to_jiffies(AR9170_JANITOR_DELAY)); 1488 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1824} 1489}
1825 1490
1826static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
1827{
1828 struct ieee80211_tx_info *txinfo;
1829 struct ar9170_sta_info *sta_info;
1830 struct ar9170_sta_tid *agg;
1831 struct sk_buff *iter;
1832 unsigned long flags, f2;
1833 unsigned int max;
1834 u16 tid, seq, qseq;
1835 bool run = false, queue = false;
1836
1837 tid = ar9170_get_tid(skb);
1838 seq = ar9170_get_seq(skb);
1839 txinfo = IEEE80211_SKB_CB(skb);
1840 sta_info = (void *) txinfo->control.sta->drv_priv;
1841 agg = &sta_info->agg[tid];
1842 max = sta_info->ampdu_max_len;
1843
1844 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1845
1846 if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
1847#ifdef AR9170_TXAGG_DEBUG
1848 printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
1849 "for ESS:%pM tid:%d state:%d.\n",
1850 wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
1851 agg->state);
1852#endif /* AR9170_TXAGG_DEBUG */
1853 goto err_unlock;
1854 }
1855
1856 if (!agg->active) {
1857 agg->active = true;
1858 agg->ssn = seq;
1859 queue = true;
1860 }
1861
1862 /* check if seq is within the BA window */
1863 if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
1864#ifdef AR9170_TXAGG_DEBUG
1865 printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
1866 "fit into BA window (%d - %d)\n",
1867 wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
1868 (agg->ssn + max) & 0xfff);
1869#endif /* AR9170_TXAGG_DEBUG */
1870 goto err_unlock;
1871 }
1872
1873 spin_lock_irqsave(&agg->queue.lock, f2);
1874
1875 skb_queue_reverse_walk(&agg->queue, iter) {
1876 qseq = ar9170_get_seq(iter);
1877
1878 if (GET_NEXT_SEQ(qseq) == seq) {
1879 __skb_queue_after(&agg->queue, iter, skb);
1880 goto queued;
1881 }
1882 }
1883
1884 __skb_queue_head(&agg->queue, skb);
1885
1886queued:
1887 spin_unlock_irqrestore(&agg->queue.lock, f2);
1888
1889#ifdef AR9170_TXAGG_DEBUG
1890 printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
1891 wiphy_name(ar->hw->wiphy), skb);
1892 __ar9170_dump_txqueue(ar, &agg->queue);
1893#endif /* AR9170_TXAGG_DEBUG */
1894
1895 if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
1896 run = true;
1897
1898 if (queue)
1899 list_add_tail(&agg->list, &ar->tx_ampdu_list);
1900
1901 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1902 return run;
1903
1904err_unlock:
1905 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1906 dev_kfree_skb_irq(skb);
1907 return false;
1908}
1909
1910int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1491int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1911{ 1492{
1912 struct ar9170 *ar = hw->priv; 1493 struct ar9170 *ar = hw->priv;
1913 struct ieee80211_tx_info *info; 1494 struct ieee80211_tx_info *info;
1495 unsigned int queue;
1914 1496
1915 if (unlikely(!IS_STARTED(ar))) 1497 if (unlikely(!IS_STARTED(ar)))
1916 goto err_free; 1498 goto err_free;
@@ -1918,18 +1500,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1918 if (unlikely(ar9170_tx_prepare(ar, skb))) 1500 if (unlikely(ar9170_tx_prepare(ar, skb)))
1919 goto err_free; 1501 goto err_free;
1920 1502
1503 queue = skb_get_queue_mapping(skb);
1921 info = IEEE80211_SKB_CB(skb); 1504 info = IEEE80211_SKB_CB(skb);
1922 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1505 ar9170_tx_prepare_phy(ar, skb);
1923 bool run = ar9170_tx_ampdu_queue(ar, skb); 1506 skb_queue_tail(&ar->tx_pending[queue], skb);
1924
1925 if (run || !atomic_read(&ar->tx_ampdu_pending))
1926 ar9170_tx_ampdu(ar);
1927 } else {
1928 unsigned int queue = skb_get_queue_mapping(skb);
1929
1930 ar9170_tx_prepare_phy(ar, skb);
1931 skb_queue_tail(&ar->tx_pending[queue], skb);
1932 }
1933 1507
1934 ar9170_tx(ar); 1508 ar9170_tx(ar);
1935 return NETDEV_TX_OK; 1509 return NETDEV_TX_OK;
@@ -2046,21 +1620,17 @@ out:
2046 return err; 1620 return err;
2047} 1621}
2048 1622
2049static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, 1623static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
2050 struct dev_addr_list *mclist) 1624 struct netdev_hw_addr_list *mc_list)
2051{ 1625{
2052 u64 mchash; 1626 u64 mchash;
2053 int i; 1627 struct netdev_hw_addr *ha;
2054 1628
2055 /* always get broadcast frames */ 1629 /* always get broadcast frames */
2056 mchash = 1ULL << (0xff >> 2); 1630 mchash = 1ULL << (0xff >> 2);
2057 1631
2058 for (i = 0; i < mc_count; i++) { 1632 netdev_hw_addr_list_for_each(ha, mc_list)
2059 if (WARN_ON(!mclist)) 1633 mchash |= 1ULL << (ha->addr[5] >> 2);
2060 break;
2061 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
2062 mclist = mclist->next;
2063 }
2064 1634
2065 return mchash; 1635 return mchash;
2066} 1636}
@@ -2330,57 +1900,6 @@ out:
2330 return err; 1900 return err;
2331} 1901}
2332 1902
2333static int ar9170_sta_add(struct ieee80211_hw *hw,
2334 struct ieee80211_vif *vif,
2335 struct ieee80211_sta *sta)
2336{
2337 struct ar9170 *ar = hw->priv;
2338 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2339 unsigned int i;
2340
2341 memset(sta_info, 0, sizeof(*sta_info));
2342
2343 if (!sta->ht_cap.ht_supported)
2344 return 0;
2345
2346 if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
2347 ar->global_ampdu_density = sta->ht_cap.ampdu_density;
2348
2349 if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
2350 ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
2351
2352 for (i = 0; i < AR9170_NUM_TID; i++) {
2353 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2354 sta_info->agg[i].active = false;
2355 sta_info->agg[i].ssn = 0;
2356 sta_info->agg[i].tid = i;
2357 INIT_LIST_HEAD(&sta_info->agg[i].list);
2358 skb_queue_head_init(&sta_info->agg[i].queue);
2359 }
2360
2361 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
2362
2363 return 0;
2364}
2365
2366static int ar9170_sta_remove(struct ieee80211_hw *hw,
2367 struct ieee80211_vif *vif,
2368 struct ieee80211_sta *sta)
2369{
2370 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2371 unsigned int i;
2372
2373 if (!sta->ht_cap.ht_supported)
2374 return 0;
2375
2376 for (i = 0; i < AR9170_NUM_TID; i++) {
2377 sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
2378 skb_queue_purge(&sta_info->agg[i].queue);
2379 }
2380
2381 return 0;
2382}
2383
2384static int ar9170_get_stats(struct ieee80211_hw *hw, 1903static int ar9170_get_stats(struct ieee80211_hw *hw,
2385 struct ieee80211_low_level_stats *stats) 1904 struct ieee80211_low_level_stats *stats)
2386{ 1905{
@@ -2423,55 +1942,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2423 enum ieee80211_ampdu_mlme_action action, 1942 enum ieee80211_ampdu_mlme_action action,
2424 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 1943 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2425{ 1944{
2426 struct ar9170 *ar = hw->priv;
2427 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2428 struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
2429 unsigned long flags;
2430
2431 if (!modparam_ht)
2432 return -EOPNOTSUPP;
2433
2434 switch (action) { 1945 switch (action) {
2435 case IEEE80211_AMPDU_TX_START:
2436 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2437 if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
2438 !list_empty(&tid_info->list)) {
2439 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2440#ifdef AR9170_TXAGG_DEBUG
2441 printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
2442 "is in a very bad state!\n",
2443 wiphy_name(hw->wiphy), sta->addr, tid);
2444#endif /* AR9170_TXAGG_DEBUG */
2445 return -EBUSY;
2446 }
2447
2448 *ssn = tid_info->ssn;
2449 tid_info->state = AR9170_TID_STATE_PROGRESS;
2450 tid_info->active = false;
2451 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2452 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2453 break;
2454
2455 case IEEE80211_AMPDU_TX_STOP:
2456 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2457 tid_info->state = AR9170_TID_STATE_SHUTDOWN;
2458 list_del_init(&tid_info->list);
2459 tid_info->active = false;
2460 skb_queue_purge(&tid_info->queue);
2461 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2462 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2463 break;
2464
2465 case IEEE80211_AMPDU_TX_OPERATIONAL:
2466#ifdef AR9170_TXAGG_DEBUG
2467 printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
2468 wiphy_name(hw->wiphy), sta->addr, tid);
2469#endif /* AR9170_TXAGG_DEBUG */
2470 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2471 sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
2472 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2473 break;
2474
2475 case IEEE80211_AMPDU_RX_START: 1946 case IEEE80211_AMPDU_RX_START:
2476 case IEEE80211_AMPDU_RX_STOP: 1947 case IEEE80211_AMPDU_RX_STOP:
2477 /* Handled by firmware */ 1948 /* Handled by firmware */
@@ -2497,8 +1968,6 @@ static const struct ieee80211_ops ar9170_ops = {
2497 .bss_info_changed = ar9170_op_bss_info_changed, 1968 .bss_info_changed = ar9170_op_bss_info_changed,
2498 .get_tsf = ar9170_op_get_tsf, 1969 .get_tsf = ar9170_op_get_tsf,
2499 .set_key = ar9170_set_key, 1970 .set_key = ar9170_set_key,
2500 .sta_add = ar9170_sta_add,
2501 .sta_remove = ar9170_sta_remove,
2502 .get_stats = ar9170_get_stats, 1971 .get_stats = ar9170_get_stats,
2503 .ampdu_action = ar9170_ampdu_action, 1972 .ampdu_action = ar9170_ampdu_action,
2504}; 1973};
@@ -2516,7 +1985,7 @@ void *ar9170_alloc(size_t priv_size)
2516 * tends to split the streams into separate rx descriptors. 1985 * tends to split the streams into separate rx descriptors.
2517 */ 1986 */
2518 1987
2519 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL); 1988 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
2520 if (!skb) 1989 if (!skb)
2521 goto err_nomem; 1990 goto err_nomem;
2522 1991
@@ -2531,8 +2000,6 @@ void *ar9170_alloc(size_t priv_size)
2531 mutex_init(&ar->mutex); 2000 mutex_init(&ar->mutex);
2532 spin_lock_init(&ar->cmdlock); 2001 spin_lock_init(&ar->cmdlock);
2533 spin_lock_init(&ar->tx_stats_lock); 2002 spin_lock_init(&ar->tx_stats_lock);
2534 spin_lock_init(&ar->tx_ampdu_list_lock);
2535 skb_queue_head_init(&ar->tx_status_ampdu);
2536 for (i = 0; i < __AR9170_NUM_TXQ; i++) { 2003 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
2537 skb_queue_head_init(&ar->tx_status[i]); 2004 skb_queue_head_init(&ar->tx_status[i]);
2538 skb_queue_head_init(&ar->tx_pending[i]); 2005 skb_queue_head_init(&ar->tx_pending[i]);
@@ -2540,7 +2007,6 @@ void *ar9170_alloc(size_t priv_size)
2540 ar9170_rx_reset_rx_mpdu(ar); 2007 ar9170_rx_reset_rx_mpdu(ar);
2541 INIT_WORK(&ar->beacon_work, ar9170_new_beacon); 2008 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
2542 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor); 2009 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
2543 INIT_LIST_HEAD(&ar->tx_ampdu_list);
2544 2010
2545 /* all hw supports 2.4 GHz, so set channel to 1 by default */ 2011 /* all hw supports 2.4 GHz, so set channel to 1 by default */
2546 ar->channel = &ar9170_2ghz_chantable[0]; 2012 ar->channel = &ar9170_2ghz_chantable[0];
@@ -2551,19 +2017,10 @@ void *ar9170_alloc(size_t priv_size)
2551 BIT(NL80211_IFTYPE_ADHOC); 2017 BIT(NL80211_IFTYPE_ADHOC);
2552 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 2018 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2553 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2019 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2554 IEEE80211_HW_SIGNAL_DBM | 2020 IEEE80211_HW_SIGNAL_DBM;
2555 IEEE80211_HW_NOISE_DBM;
2556
2557 if (modparam_ht) {
2558 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
2559 } else {
2560 ar9170_band_2GHz.ht_cap.ht_supported = false;
2561 ar9170_band_5GHz.ht_cap.ht_supported = false;
2562 }
2563 2021
2564 ar->hw->queues = __AR9170_NUM_TXQ; 2022 ar->hw->queues = __AR9170_NUM_TXQ;
2565 ar->hw->extra_tx_headroom = 8; 2023 ar->hw->extra_tx_headroom = 8;
2566 ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
2567 2024
2568 ar->hw->max_rates = 1; 2025 ar->hw->max_rates = 1;
2569 ar->hw->max_rate_tries = 3; 2026 ar->hw->max_rate_tries = 3;