aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2012-03-15 16:26:52 -0400
committerJohn W. Linville <linville@tuxdriver.com>2012-04-09 16:37:22 -0400
commit9eae88fa9a02e31af69a215beaa5e1194da3a5a1 (patch)
tree147ed04cc911430969a4d256a0c41ae33f0a1b06
parente56103823716039418d099221dd2059fa7547fbf (diff)
iwlwifi: move queue mapping out of transport
The queue mapping is not only dynamic, it is also dependent on the uCode, as we can already see today with the dual-mode and non-dual-mode being different. Move the queue mapping out of the transport layer and let the higher layer manage it. Part of the transport configuration is how to set up the queues. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c164
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h146
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c201
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c218
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h71
17 files changed, 419 insertions, 575 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 5b0d888f746b..95c59e39b803 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -157,7 +157,6 @@ static struct iwl_lib_ops iwl1000_lib = {
157 157
158static const struct iwl_base_params iwl1000_base_params = { 158static const struct iwl_base_params iwl1000_base_params = {
159 .num_of_queues = IWLAGN_NUM_QUEUES, 159 .num_of_queues = IWLAGN_NUM_QUEUES,
160 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
161 .eeprom_size = OTP_LOW_IMAGE_SIZE, 160 .eeprom_size = OTP_LOW_IMAGE_SIZE,
162 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 161 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
163 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 162 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 5b898db4d3d7..e1329a13f0fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -171,7 +171,6 @@ static struct iwl_lib_ops iwl2030_lib = {
171static const struct iwl_base_params iwl2000_base_params = { 171static const struct iwl_base_params iwl2000_base_params = {
172 .eeprom_size = OTP_LOW_IMAGE_SIZE, 172 .eeprom_size = OTP_LOW_IMAGE_SIZE,
173 .num_of_queues = IWLAGN_NUM_QUEUES, 173 .num_of_queues = IWLAGN_NUM_QUEUES,
174 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
175 .pll_cfg_val = 0, 174 .pll_cfg_val = 0,
176 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 175 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
177 .shadow_ram_support = true, 176 .shadow_ram_support = true,
@@ -190,7 +189,6 @@ static const struct iwl_base_params iwl2000_base_params = {
190static const struct iwl_base_params iwl2030_base_params = { 189static const struct iwl_base_params iwl2030_base_params = {
191 .eeprom_size = OTP_LOW_IMAGE_SIZE, 190 .eeprom_size = OTP_LOW_IMAGE_SIZE,
192 .num_of_queues = IWLAGN_NUM_QUEUES, 191 .num_of_queues = IWLAGN_NUM_QUEUES,
193 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
194 .pll_cfg_val = 0, 192 .pll_cfg_val = 0,
195 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 193 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
196 .shadow_ram_support = true, 194 .shadow_ram_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a805e97b89af..34bc8dd0064b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -308,7 +308,6 @@ static struct iwl_lib_ops iwl5150_lib = {
308static const struct iwl_base_params iwl5000_base_params = { 308static const struct iwl_base_params iwl5000_base_params = {
309 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 309 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
310 .num_of_queues = IWLAGN_NUM_QUEUES, 310 .num_of_queues = IWLAGN_NUM_QUEUES,
311 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
312 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 311 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
313 .led_compensation = 51, 312 .led_compensation = 51,
314 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 313 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 64060cd738b5..7075570a0f2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -269,7 +269,6 @@ static struct iwl_lib_ops iwl6030_lib = {
269static const struct iwl_base_params iwl6000_base_params = { 269static const struct iwl_base_params iwl6000_base_params = {
270 .eeprom_size = OTP_LOW_IMAGE_SIZE, 270 .eeprom_size = OTP_LOW_IMAGE_SIZE,
271 .num_of_queues = IWLAGN_NUM_QUEUES, 271 .num_of_queues = IWLAGN_NUM_QUEUES,
272 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
273 .pll_cfg_val = 0, 272 .pll_cfg_val = 0,
274 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 273 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
275 .shadow_ram_support = true, 274 .shadow_ram_support = true,
@@ -286,7 +285,6 @@ static const struct iwl_base_params iwl6000_base_params = {
286static const struct iwl_base_params iwl6050_base_params = { 285static const struct iwl_base_params iwl6050_base_params = {
287 .eeprom_size = OTP_LOW_IMAGE_SIZE, 286 .eeprom_size = OTP_LOW_IMAGE_SIZE,
288 .num_of_queues = IWLAGN_NUM_QUEUES, 287 .num_of_queues = IWLAGN_NUM_QUEUES,
289 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
290 .pll_cfg_val = 0, 288 .pll_cfg_val = 0,
291 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 289 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
292 .shadow_ram_support = true, 290 .shadow_ram_support = true,
@@ -303,7 +301,6 @@ static const struct iwl_base_params iwl6050_base_params = {
303static const struct iwl_base_params iwl6000_g2_base_params = { 301static const struct iwl_base_params iwl6000_g2_base_params = {
304 .eeprom_size = OTP_LOW_IMAGE_SIZE, 302 .eeprom_size = OTP_LOW_IMAGE_SIZE,
305 .num_of_queues = IWLAGN_NUM_QUEUES, 303 .num_of_queues = IWLAGN_NUM_QUEUES,
306 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
307 .pll_cfg_val = 0, 304 .pll_cfg_val = 0,
308 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 305 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
309 .shadow_ram_support = true, 306 .shadow_ram_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index d0ec0abd3c89..c797ab19d933 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -103,9 +103,6 @@
103/* EEPROM */ 103/* EEPROM */
104#define IWLAGN_EEPROM_IMG_SIZE 2048 104#define IWLAGN_EEPROM_IMG_SIZE 2048
105 105
106#define IWLAGN_CMD_FIFO_NUM 7
107#define IWLAGN_NUM_QUEUES 20 106#define IWLAGN_NUM_QUEUES 20
108#define IWLAGN_NUM_AMPDU_QUEUES 9
109#define IWLAGN_FIRST_AMPDU_QUEUE 11
110 107
111#endif /* __iwl_agn_hw_h__ */ 108#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 453d8808f716..07563a68d32a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -40,6 +40,17 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-trans.h" 41#include "iwl-trans.h"
42 42
43static const u8 tid_to_ac[] = {
44 IEEE80211_AC_BE,
45 IEEE80211_AC_BK,
46 IEEE80211_AC_BK,
47 IEEE80211_AC_BE,
48 IEEE80211_AC_VI,
49 IEEE80211_AC_VI,
50 IEEE80211_AC_VO,
51 IEEE80211_AC_VO,
52};
53
43static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, 54static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
44 struct ieee80211_tx_info *info, 55 struct ieee80211_tx_info *info,
45 __le16 fc, __le32 *tx_flags) 56 __le16 fc, __le32 *tx_flags)
@@ -293,6 +304,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
293 u16 len, seq_number = 0; 304 u16 len, seq_number = 0;
294 u8 sta_id, tid = IWL_MAX_TID_COUNT; 305 u8 sta_id, tid = IWL_MAX_TID_COUNT;
295 bool is_agg = false; 306 bool is_agg = false;
307 int txq_id;
296 308
297 if (info->control.vif) 309 if (info->control.vif)
298 ctx = iwl_rxon_ctx_from_vif(info->control.vif); 310 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
@@ -435,7 +447,27 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
435 /* Copy MAC header from skb into command buffer */ 447 /* Copy MAC header from skb into command buffer */
436 memcpy(tx_cmd->hdr, hdr, hdr_len); 448 memcpy(tx_cmd->hdr, hdr, hdr_len);
437 449
438 if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid)) 450 if (is_agg)
451 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
452 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
453 /*
454 * Send this frame after DTIM -- there's a special queue
455 * reserved for this for contexts that support AP mode.
456 */
457 txq_id = ctx->mcast_queue;
458
459 /*
460 * The microcode will clear the more data
461 * bit in the last frame it transmits.
462 */
463 hdr->frame_control |=
464 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
465 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
466 txq_id = IWL_AUX_QUEUE;
467 else
468 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
469
470 if (iwl_trans_tx(trans(priv), skb, dev_cmd, txq_id))
439 goto drop_unlock_sta; 471 goto drop_unlock_sta;
440 472
441 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && 473 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
@@ -464,11 +496,32 @@ drop_unlock_priv:
464 return -1; 496 return -1;
465} 497}
466 498
499static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int ac)
500{
501 int q;
502
503 for (q = IWLAGN_FIRST_AMPDU_QUEUE;
504 q < cfg(priv)->base_params->num_of_queues; q++) {
505 if (!test_and_set_bit(q, priv->agg_q_alloc)) {
506 priv->queue_to_ac[q] = ac;
507 return q;
508 }
509 }
510
511 return -ENOSPC;
512}
513
514static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
515{
516 clear_bit(q, priv->agg_q_alloc);
517 priv->queue_to_ac[q] = IWL_INVALID_AC;
518}
519
467int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 520int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
468 struct ieee80211_sta *sta, u16 tid) 521 struct ieee80211_sta *sta, u16 tid)
469{ 522{
470 struct iwl_tid_data *tid_data; 523 struct iwl_tid_data *tid_data;
471 int sta_id; 524 int sta_id, txq_id;
472 525
473 sta_id = iwl_sta_id(sta); 526 sta_id = iwl_sta_id(sta);
474 527
@@ -480,6 +533,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
480 spin_lock_bh(&priv->sta_lock); 533 spin_lock_bh(&priv->sta_lock);
481 534
482 tid_data = &priv->tid_data[sta_id][tid]; 535 tid_data = &priv->tid_data[sta_id][tid];
536 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
483 537
484 switch (priv->tid_data[sta_id][tid].agg.state) { 538 switch (priv->tid_data[sta_id][tid].agg.state) {
485 case IWL_EMPTYING_HW_QUEUE_ADDBA: 539 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -504,9 +558,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
504 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 558 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
505 559
506 /* There are still packets for this RA / TID in the HW */ 560 /* There are still packets for this RA / TID in the HW */
507 if (tid_data->agg.ssn != tid_data->next_reclaimed) { 561 if (!test_bit(txq_id, priv->agg_q_alloc)) {
562 IWL_DEBUG_TX_QUEUES(priv,
563 "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
564 sta_id, tid, txq_id);
565 } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
508 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " 566 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
509 "next_recl = %d", 567 "next_recl = %d\n",
510 tid_data->agg.ssn, 568 tid_data->agg.ssn,
511 tid_data->next_reclaimed); 569 tid_data->next_reclaimed);
512 priv->tid_data[sta_id][tid].agg.state = 570 priv->tid_data[sta_id][tid].agg.state =
@@ -522,7 +580,10 @@ turn_off:
522 580
523 spin_unlock_bh(&priv->sta_lock); 581 spin_unlock_bh(&priv->sta_lock);
524 582
525 iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); 583 if (test_bit(txq_id, priv->agg_q_alloc)) {
584 iwl_trans_tx_agg_disable(trans(priv), txq_id);
585 iwlagn_dealloc_agg_txq(priv, txq_id);
586 }
526 587
527 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 588 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
528 589
@@ -533,8 +594,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
533 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 594 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
534{ 595{
535 struct iwl_tid_data *tid_data; 596 struct iwl_tid_data *tid_data;
536 int sta_id; 597 int sta_id, txq_id, ret;
537 int ret;
538 598
539 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", 599 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
540 sta->addr, tid); 600 sta->addr, tid);
@@ -552,23 +612,25 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
552 return -ENXIO; 612 return -ENXIO;
553 } 613 }
554 614
615 txq_id = iwlagn_alloc_agg_txq(priv, tid_to_ac[tid]);
616 if (txq_id < 0) {
617 IWL_DEBUG_TX_QUEUES(priv,
618 "No free aggregation queue for %pM/%d\n",
619 sta->addr, tid);
620 return txq_id;
621 }
622
555 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 623 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
556 if (ret) 624 if (ret)
557 return ret; 625 return ret;
558 626
559 spin_lock_bh(&priv->sta_lock); 627 spin_lock_bh(&priv->sta_lock);
560
561 tid_data = &priv->tid_data[sta_id][tid]; 628 tid_data = &priv->tid_data[sta_id][tid];
562 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 629 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
630 tid_data->agg.txq_id = txq_id;
563 631
564 *ssn = tid_data->agg.ssn; 632 *ssn = tid_data->agg.ssn;
565 633
566 ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
567 if (ret) {
568 spin_unlock_bh(&priv->sta_lock);
569 return ret;
570 }
571
572 if (*ssn == tid_data->next_reclaimed) { 634 if (*ssn == tid_data->next_reclaimed) {
573 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", 635 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
574 tid_data->agg.ssn); 636 tid_data->agg.ssn);
@@ -581,7 +643,6 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
581 tid_data->next_reclaimed); 643 tid_data->next_reclaimed);
582 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; 644 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
583 } 645 }
584
585 spin_unlock_bh(&priv->sta_lock); 646 spin_unlock_bh(&priv->sta_lock);
586 647
587 return ret; 648 return ret;
@@ -592,15 +653,20 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
592{ 653{
593 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; 654 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
594 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 655 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
656 int q, fifo;
595 u16 ssn; 657 u16 ssn;
596 658
597 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); 659 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
598 660
599 spin_lock_bh(&priv->sta_lock); 661 spin_lock_bh(&priv->sta_lock);
600 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; 662 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
663 q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
601 spin_unlock_bh(&priv->sta_lock); 664 spin_unlock_bh(&priv->sta_lock);
602 665
603 iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid, 666 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
667
668 iwl_trans_tx_agg_setup(trans(priv), q, fifo,
669 sta_priv->sta_id, tid,
604 buf_size, ssn); 670 buf_size, ssn);
605 671
606 /* 672 /*
@@ -666,7 +732,9 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
666 IWL_DEBUG_TX_QUEUES(priv, 732 IWL_DEBUG_TX_QUEUES(priv,
667 "Can continue DELBA flow ssn = next_recl =" 733 "Can continue DELBA flow ssn = next_recl ="
668 " %d", tid_data->next_reclaimed); 734 " %d", tid_data->next_reclaimed);
669 iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); 735 iwl_trans_tx_agg_disable(trans(priv),
736 tid_data->agg.txq_id);
737 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
670 tid_data->agg.state = IWL_AGG_OFF; 738 tid_data->agg.state = IWL_AGG_OFF;
671 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); 739 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
672 } 740 }
@@ -1005,6 +1073,29 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
1005 } 1073 }
1006} 1074}
1007 1075
1076static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
1077 int txq_id, int ssn, struct sk_buff_head *skbs)
1078{
1079 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1080 tid != IWL_TID_NON_QOS &&
1081 txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
1082 /*
1083 * FIXME: this is a uCode bug which need to be addressed,
1084 * log the information and return for now.
1085 * Since it is can possibly happen very often and in order
1086 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1087 */
1088 IWL_DEBUG_TX_QUEUES(priv,
1089 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1090 txq_id, sta_id, tid,
1091 priv->tid_data[sta_id][tid].agg.txq_id);
1092 return 1;
1093 }
1094
1095 iwl_trans_reclaim(trans(priv), txq_id, ssn, skbs);
1096 return 0;
1097}
1098
1008int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, 1099int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1009 struct iwl_device_cmd *cmd) 1100 struct iwl_device_cmd *cmd)
1010{ 1101{
@@ -1064,8 +1155,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1064 } 1155 }
1065 1156
1066 /*we can free until ssn % q.n_bd not inclusive */ 1157 /*we can free until ssn % q.n_bd not inclusive */
1067 WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, 1158 WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
1068 txq_id, ssn, &skbs));
1069 iwlagn_check_ratid_empty(priv, sta_id, tid); 1159 iwlagn_check_ratid_empty(priv, sta_id, tid);
1070 freed = 0; 1160 freed = 0;
1071 1161
@@ -1183,8 +1273,8 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1183 /* Release all TFDs before the SSN, i.e. all TFDs in front of 1273 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1184 * block-ack window (we assume that they've been successfully 1274 * block-ack window (we assume that they've been successfully
1185 * transmitted ... if not, it's too late anyway). */ 1275 * transmitted ... if not, it's too late anyway). */
1186 if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, 1276 if (iwl_reclaim(priv, sta_id, tid, scd_flow,
1187 ba_resp_scd_ssn, &reclaimed_skbs)) { 1277 ba_resp_scd_ssn, &reclaimed_skbs)) {
1188 spin_unlock(&priv->sta_lock); 1278 spin_unlock(&priv->sta_lock);
1189 return 0; 1279 return 0;
1190 } 1280 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e7da3a50d82c..c9079af61b1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -488,6 +488,93 @@ static void iwl_bg_tx_flush(struct work_struct *work)
488 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 488 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
489} 489}
490 490
491/*
492 * queue/FIFO/AC mapping definitions
493 */
494
495#define IWL_TX_FIFO_BK 0 /* shared */
496#define IWL_TX_FIFO_BE 1
497#define IWL_TX_FIFO_VI 2 /* shared */
498#define IWL_TX_FIFO_VO 3
499#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
500#define IWL_TX_FIFO_BE_IPAN 4
501#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
502#define IWL_TX_FIFO_VO_IPAN 5
503/* re-uses the VO FIFO, uCode will properly flush/schedule */
504#define IWL_TX_FIFO_AUX 5
505#define IWL_TX_FIFO_UNUSED -1
506
507#define IWLAGN_CMD_FIFO_NUM 7
508
509/*
510 * This queue number is required for proper operation
511 * because the ucode will stop/start the scheduler as
512 * required.
513 */
514#define IWL_IPAN_MCAST_QUEUE 8
515
516static const u8 iwlagn_default_queue_to_tx_fifo[] = {
517 IWL_TX_FIFO_VO,
518 IWL_TX_FIFO_VI,
519 IWL_TX_FIFO_BE,
520 IWL_TX_FIFO_BK,
521 IWLAGN_CMD_FIFO_NUM,
522};
523
524static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
525 IWL_TX_FIFO_VO,
526 IWL_TX_FIFO_VI,
527 IWL_TX_FIFO_BE,
528 IWL_TX_FIFO_BK,
529 IWL_TX_FIFO_BK_IPAN,
530 IWL_TX_FIFO_BE_IPAN,
531 IWL_TX_FIFO_VI_IPAN,
532 IWL_TX_FIFO_VO_IPAN,
533 IWL_TX_FIFO_BE_IPAN,
534 IWLAGN_CMD_FIFO_NUM,
535 IWL_TX_FIFO_AUX,
536};
537
538static const u8 iwlagn_bss_ac_to_fifo[] = {
539 IWL_TX_FIFO_VO,
540 IWL_TX_FIFO_VI,
541 IWL_TX_FIFO_BE,
542 IWL_TX_FIFO_BK,
543};
544
545static const u8 iwlagn_bss_ac_to_queue[] = {
546 0, 1, 2, 3,
547};
548
549static const u8 iwlagn_pan_ac_to_fifo[] = {
550 IWL_TX_FIFO_VO_IPAN,
551 IWL_TX_FIFO_VI_IPAN,
552 IWL_TX_FIFO_BE_IPAN,
553 IWL_TX_FIFO_BK_IPAN,
554};
555
556static const u8 iwlagn_pan_ac_to_queue[] = {
557 7, 6, 5, 4,
558};
559
560static const u8 iwlagn_bss_queue_to_ac[] = {
561 IEEE80211_AC_VO,
562 IEEE80211_AC_VI,
563 IEEE80211_AC_BE,
564 IEEE80211_AC_BK,
565};
566
567static const u8 iwlagn_pan_queue_to_ac[] = {
568 IEEE80211_AC_VO,
569 IEEE80211_AC_VI,
570 IEEE80211_AC_BE,
571 IEEE80211_AC_BK,
572 IEEE80211_AC_BK,
573 IEEE80211_AC_BE,
574 IEEE80211_AC_VI,
575 IEEE80211_AC_VO,
576};
577
491static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) 578static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
492{ 579{
493 int i; 580 int i;
@@ -520,6 +607,10 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
520 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; 607 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
521 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; 608 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
522 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; 609 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
610 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
611 iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
612 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
613 iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
523 614
524 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON; 615 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
525 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = 616 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
@@ -542,6 +633,11 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
542 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; 633 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
543 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; 634 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
544 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; 635 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
636 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
637 iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
638 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
639 iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
640 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
545 641
546 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 642 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
547} 643}
@@ -869,6 +965,7 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
869 u8 bt_load; 965 u8 bt_load;
870 u8 bt_status; 966 u8 bt_status;
871 bool bt_is_sco; 967 bool bt_is_sco;
968 int i;
872 969
873 lockdep_assert_held(&priv->mutex); 970 lockdep_assert_held(&priv->mutex);
874 971
@@ -898,6 +995,15 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
898 priv->bt_traffic_load = bt_load; 995 priv->bt_traffic_load = bt_load;
899 priv->bt_status = bt_status; 996 priv->bt_status = bt_status;
900 priv->bt_is_sco = bt_is_sco; 997 priv->bt_is_sco = bt_is_sco;
998
999 /* reset all queues */
1000 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1001 atomic_set(&priv->ac_stop_count[i], 0);
1002
1003 for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
1004 priv->queue_to_ac[i] = IWL_INVALID_AC;
1005
1006 memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
901} 1007}
902 1008
903static void iwl_bg_restart(struct work_struct *data) 1009static void iwl_bg_restart(struct work_struct *data)
@@ -1130,8 +1236,6 @@ static void iwl_set_hw_params(struct iwl_priv *priv)
1130 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 1236 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
1131 hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE; 1237 hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
1132 1238
1133 hw_params(priv).num_ampdu_queues =
1134 cfg(priv)->base_params->num_of_ampdu_queues;
1135 hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout; 1239 hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
1136 1240
1137 /* Device-specific setup */ 1241 /* Device-specific setup */
@@ -1192,6 +1296,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1192 STATISTICS_NOTIFICATION, 1296 STATISTICS_NOTIFICATION,
1193 REPLY_TX, 1297 REPLY_TX,
1194 }; 1298 };
1299 const u8 *q_to_ac;
1300 int n_q_to_ac;
1301 int i;
1195 1302
1196 /************************ 1303 /************************
1197 * 1. Allocating HW data 1304 * 1. Allocating HW data
@@ -1228,9 +1335,19 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1228 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { 1335 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1229 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; 1336 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1230 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; 1337 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1338 trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
1339 trans_cfg.n_queue_to_fifo =
1340 ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
1341 q_to_ac = iwlagn_pan_queue_to_ac;
1342 n_q_to_ac = ARRAY_SIZE(iwlagn_pan_queue_to_ac);
1231 } else { 1343 } else {
1232 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1344 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1233 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1345 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1346 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1347 trans_cfg.n_queue_to_fifo =
1348 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1349 q_to_ac = iwlagn_bss_queue_to_ac;
1350 n_q_to_ac = ARRAY_SIZE(iwlagn_bss_queue_to_ac);
1234 } 1351 }
1235 1352
1236 /* Configure transport layer */ 1353 /* Configure transport layer */
@@ -1319,6 +1436,11 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1319 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; 1436 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1320 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1437 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1321 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1438 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1439 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1440 trans_cfg.n_queue_to_fifo =
1441 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1442 q_to_ac = iwlagn_bss_queue_to_ac;
1443 n_q_to_ac = ARRAY_SIZE(iwlagn_bss_queue_to_ac);
1322 1444
1323 /* Configure transport layer again*/ 1445 /* Configure transport layer again*/
1324 iwl_trans_configure(trans(priv), &trans_cfg); 1446 iwl_trans_configure(trans(priv), &trans_cfg);
@@ -1327,6 +1449,18 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1327 /******************* 1449 /*******************
1328 * 5. Setup priv 1450 * 5. Setup priv
1329 *******************/ 1451 *******************/
1452 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1453 atomic_set(&priv->ac_stop_count[i], 0);
1454
1455 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
1456 if (i < n_q_to_ac)
1457 priv->queue_to_ac[i] = q_to_ac[i];
1458 else
1459 priv->queue_to_ac[i] = IWL_INVALID_AC;
1460 }
1461
1462 WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
1463 IWLAGN_CMD_FIFO_NUM);
1330 1464
1331 if (iwl_init_drv(priv)) 1465 if (iwl_init_drv(priv))
1332 goto out_free_eeprom; 1466 goto out_free_eeprom;
@@ -1439,17 +1573,39 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
1439 cfg(priv)->lib->nic_config(priv); 1573 cfg(priv)->lib->nic_config(priv);
1440} 1574}
1441 1575
1442static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac) 1576static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
1443{ 1577{
1444 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1578 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1579 int ac = priv->queue_to_ac[queue];
1580
1581 if (WARN_ON_ONCE(ac == IWL_INVALID_AC))
1582 return;
1583
1584 if (atomic_inc_return(&priv->ac_stop_count[ac]) > 1) {
1585 IWL_DEBUG_TX_QUEUES(priv,
1586 "queue %d (AC %d) already stopped\n",
1587 queue, ac);
1588 return;
1589 }
1445 1590
1446 set_bit(ac, &priv->transport_queue_stop); 1591 set_bit(ac, &priv->transport_queue_stop);
1447 ieee80211_stop_queue(priv->hw, ac); 1592 ieee80211_stop_queue(priv->hw, ac);
1448} 1593}
1449 1594
1450static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac) 1595static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
1451{ 1596{
1452 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1597 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1598 int ac = priv->queue_to_ac[queue];
1599
1600 if (WARN_ON_ONCE(ac == IWL_INVALID_AC))
1601 return;
1602
1603 if (atomic_dec_return(&priv->ac_stop_count[ac]) > 0) {
1604 IWL_DEBUG_TX_QUEUES(priv,
1605 "queue %d (AC %d) already awake\n",
1606 queue, ac);
1607 return;
1608 }
1453 1609
1454 clear_bit(ac, &priv->transport_queue_stop); 1610 clear_bit(ac, &priv->transport_queue_stop);
1455 1611
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2dfa5642366a..436611c32ff1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,13 @@
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67 67
68/* The first 11 queues (0-10) are used otherwise */
69#define IWLAGN_FIRST_AMPDU_QUEUE 11
70
71/* AUX (TX during scan dwell) queue */
72#define IWL_AUX_QUEUE 10
73
74
68struct iwl_ucode_capabilities; 75struct iwl_ucode_capabilities;
69 76
70extern struct ieee80211_ops iwlagn_hw_ops; 77extern struct ieee80211_ops iwlagn_hw_ops;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index b7b1c04f2fba..bc0bed88c8ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -375,14 +375,19 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
375 i, station->sta.sta.addr, 375 i, station->sta.sta.addr,
376 station->sta.station_flags_msk); 376 station->sta.station_flags_msk);
377 pos += scnprintf(buf + pos, bufsz - pos, 377 pos += scnprintf(buf + pos, bufsz - pos,
378 "TID\tseq_num\trate_n_flags\n"); 378 "TID seqno next_rclmd "
379 "rate_n_flags state txq\n");
379 380
380 for (j = 0; j < IWL_MAX_TID_COUNT; j++) { 381 for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
381 tid_data = &priv->tid_data[i][j]; 382 tid_data = &priv->tid_data[i][j];
382 pos += scnprintf(buf + pos, bufsz - pos, 383 pos += scnprintf(buf + pos, bufsz - pos,
383 "%d:\t%#x\t%#x", 384 "%d: 0x%.4x 0x%.4x 0x%.8x "
385 "%d %.2d",
384 j, tid_data->seq_number, 386 j, tid_data->seq_number,
385 tid_data->agg.rate_n_flags); 387 tid_data->next_reclaimed,
388 tid_data->agg.rate_n_flags,
389 tid_data->agg.state,
390 tid_data->agg.txq_id);
386 391
387 if (tid_data->agg.wait_for_ba) 392 if (tid_data->agg.wait_for_ba)
388 pos += scnprintf(buf + pos, bufsz - pos, 393 pos += scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 16956b777f96..297508df36bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -220,8 +220,7 @@ enum iwl_agg_state {
220 * Tx response (REPLY_TX), and the block ack notification 220 * Tx response (REPLY_TX), and the block ack notification
221 * (REPLY_COMPRESSED_BA). 221 * (REPLY_COMPRESSED_BA).
222 * @state: state of the BA agreement establishment / tear down. 222 * @state: state of the BA agreement establishment / tear down.
223 * @txq_id: Tx queue used by the BA session - used by the transport layer. 223 * @txq_id: Tx queue used by the BA session
224 * Needed by the upper layer for debugfs only.
225 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 224 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
226 * the first packet to be sent in legacy HW queue in Tx AGG stop flow. 225 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
227 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 226 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -623,6 +622,10 @@ struct iwl_force_reset {
623struct iwl_rxon_context { 622struct iwl_rxon_context {
624 struct ieee80211_vif *vif; 623 struct ieee80211_vif *vif;
625 624
625 u8 mcast_queue;
626 u8 ac_to_queue[IEEE80211_NUM_ACS];
627 u8 ac_to_fifo[IEEE80211_NUM_ACS];
628
626 /* 629 /*
627 * We could use the vif to indicate active, but we 630 * We could use the vif to indicate active, but we
628 * also need it to be active during disabling when 631 * also need it to be active during disabling when
@@ -720,6 +723,11 @@ struct iwl_priv {
720 723
721 unsigned long transport_queue_stop; 724 unsigned long transport_queue_stop;
722 bool passive_no_rx; 725 bool passive_no_rx;
726#define IWL_INVALID_AC 0xff
727 u8 queue_to_ac[IWL_MAX_HW_QUEUES];
728 atomic_t ac_stop_count[IEEE80211_NUM_ACS];
729
730 unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
723 731
724 /* ieee device used by generic ieee processing code */ 732 /* ieee device used by generic ieee processing code */
725 struct ieee80211_hw *hw; 733 struct ieee80211_hw *hw;
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index d2be4b60488d..1bd021a24a8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -654,6 +654,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
654 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 654 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
655 break; 655 break;
656 case IEEE80211_AMPDU_TX_START: 656 case IEEE80211_AMPDU_TX_START:
657 if (!trans(priv)->ops->tx_agg_setup)
658 break;
657 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 659 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
658 break; 660 break;
659 IWL_DEBUG_HT(priv, "start Tx\n"); 661 IWL_DEBUG_HT(priv, "start Tx\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 6ea4163ff56a..b1fd251e88d5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -111,10 +111,10 @@ struct iwl_fw;
111 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 111 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
112 * HCMD the this Rx responds to. 112 * HCMD the this Rx responds to.
113 * Must be atomic. 113 * Must be atomic.
114 * @queue_full: notifies that a HW queue is full. Ac is the ac of the queue 114 * @queue_full: notifies that a HW queue is full.
115 * Must be atomic 115 * Must be atomic
116 * @queue_not_full: notifies that a HW queue is not full any more. 116 * @queue_not_full: notifies that a HW queue is not full any more.
117 * Ac is the ac of the queue. Must be atomic 117 * Must be atomic
118 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that 118 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
119 * the radio is killed. Must be atomic. 119 * the radio is killed. Must be atomic.
120 * @free_skb: allows the transport layer to free skbs that haven't been 120 * @free_skb: allows the transport layer to free skbs that haven't been
@@ -132,8 +132,8 @@ struct iwl_op_mode_ops {
132 void (*stop)(struct iwl_op_mode *op_mode); 132 void (*stop)(struct iwl_op_mode *op_mode);
133 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 133 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
134 struct iwl_device_cmd *cmd); 134 struct iwl_device_cmd *cmd);
135 void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac); 135 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
136 void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac); 136 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
137 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 137 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
138 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); 138 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
139 void (*nic_error)(struct iwl_op_mode *op_mode); 139 void (*nic_error)(struct iwl_op_mode *op_mode);
@@ -169,15 +169,16 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
169 return op_mode->ops->rx(op_mode, rxb, cmd); 169 return op_mode->ops->rx(op_mode, rxb, cmd);
170} 170}
171 171
172static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, u8 ac) 172static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
173 int queue)
173{ 174{
174 op_mode->ops->queue_full(op_mode, ac); 175 op_mode->ops->queue_full(op_mode, queue);
175} 176}
176 177
177static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, 178static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
178 u8 ac) 179 int queue)
179{ 180{
180 op_mode->ops->queue_not_full(op_mode, ac); 181 op_mode->ops->queue_not_full(op_mode, queue);
181} 182}
182 183
183static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, 184static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index f53feb8b5e85..e4f619c6ec9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -160,7 +160,6 @@ struct iwl_mod_params {
160 * 160 *
161 * Holds the module parameters 161 * Holds the module parameters
162 * 162 *
163 * @num_ampdu_queues: num of ampdu queues
164 * @tx_chains_num: Number of TX chains 163 * @tx_chains_num: Number of TX chains
165 * @rx_chains_num: Number of RX chains 164 * @rx_chains_num: Number of RX chains
166 * @valid_tx_ant: usable antennas for TX 165 * @valid_tx_ant: usable antennas for TX
@@ -176,7 +175,6 @@ struct iwl_mod_params {
176 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 175 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
177 */ 176 */
178struct iwl_hw_params { 177struct iwl_hw_params {
179 u8 num_ampdu_queues;
180 u8 tx_chains_num; 178 u8 tx_chains_num;
181 u8 rx_chains_num; 179 u8 rx_chains_num;
182 u8 valid_tx_ant; 180 u8 valid_tx_ant;
@@ -230,7 +228,6 @@ enum iwl_led_mode {
230struct iwl_base_params { 228struct iwl_base_params {
231 int eeprom_size; 229 int eeprom_size;
232 int num_of_queues; /* def: HW dependent */ 230 int num_of_queues; /* def: HW dependent */
233 int num_of_ampdu_queues;/* def: HW dependent */
234 /* for iwl_apm_init() */ 231 /* for iwl_apm_init() */
235 u32 pll_cfg_val; 232 u32 pll_cfg_val;
236 233
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 1c2fe87bd7e2..5325ff7cf5ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -136,13 +136,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
136 return --index & (n_bd - 1); 136 return --index & (n_bd - 1);
137} 137}
138 138
139/*
140 * This queue number is required for proper operation
141 * because the ucode will stop/start the scheduler as
142 * required.
143 */
144#define IWL_IPAN_MCAST_QUEUE 8
145
146struct iwl_cmd_meta { 139struct iwl_cmd_meta {
147 /* only for SYNC commands, iff the reply skb is wanted */ 140 /* only for SYNC commands, iff the reply skb is wanted */
148 struct iwl_host_cmd *source; 141 struct iwl_host_cmd *source;
@@ -199,9 +192,6 @@ struct iwl_queue {
199 * lock: queue lock 192 * lock: queue lock
200 * @time_stamp: time (in jiffies) of last read_ptr change 193 * @time_stamp: time (in jiffies) of last read_ptr change
201 * @need_update: indicates need to update read/write index 194 * @need_update: indicates need to update read/write index
202 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
203 * @sta_id: valid if sched_retry is set
204 * @tid: valid if sched_retry is set
205 * 195 *
206 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 196 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
207 * descriptors) and required locking structures. 197 * descriptors) and required locking structures.
@@ -218,12 +208,7 @@ struct iwl_tx_queue {
218 spinlock_t lock; 208 spinlock_t lock;
219 unsigned long time_stamp; 209 unsigned long time_stamp;
220 u8 need_update; 210 u8 need_update;
221 u8 sched_retry;
222 u8 active; 211 u8 active;
223 u8 swq_id;
224
225 u16 sta_id;
226 u16 tid;
227}; 212};
228 213
229/** 214/**
@@ -236,13 +221,6 @@ struct iwl_tx_queue {
236 * @scd_base_addr: scheduler sram base address in SRAM 221 * @scd_base_addr: scheduler sram base address in SRAM
237 * @scd_bc_tbls: pointer to the byte count table of the scheduler 222 * @scd_bc_tbls: pointer to the byte count table of the scheduler
238 * @kw: keep warm address 223 * @kw: keep warm address
239 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
240 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
241 * @mcast_queue:
242 * @txq: Tx DMA processing queues
243 * @txq_ctx_active_msk: what queue is active
244 * queue_stopped: tracks what queue is stopped
245 * queue_stop_count: tracks what SW queue is stopped
246 * @pci_dev: basic pci-network driver stuff 224 * @pci_dev: basic pci-network driver stuff
247 * @hw_base: pci hardware address support 225 * @hw_base: pci hardware address support
248 * @ucode_write_complete: indicates that the ucode has been copied. 226 * @ucode_write_complete: indicates that the ucode has been copied.
@@ -272,16 +250,9 @@ struct iwl_trans_pcie {
272 struct iwl_dma_ptr scd_bc_tbls; 250 struct iwl_dma_ptr scd_bc_tbls;
273 struct iwl_dma_ptr kw; 251 struct iwl_dma_ptr kw;
274 252
275 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
276 const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
277 u8 mcast_queue[NUM_IWL_RXON_CTX];
278 u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
279
280 struct iwl_tx_queue *txq; 253 struct iwl_tx_queue *txq;
281 unsigned long txq_ctx_active_msk; 254 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
282#define IWL_MAX_HW_QUEUES 32
283 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 255 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
284 atomic_t queue_stop_count[4];
285 256
286 /* PCI bus related data */ 257 /* PCI bus related data */
287 struct pci_dev *pci_dev; 258 struct pci_dev *pci_dev;
@@ -293,6 +264,8 @@ struct iwl_trans_pcie {
293 u8 cmd_queue; 264 u8 cmd_queue;
294 u8 n_no_reclaim_cmds; 265 u8 n_no_reclaim_cmds;
295 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 266 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
267 u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
268 u8 n_q_to_fifo;
296}; 269};
297 270
298#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ 271#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -331,15 +304,12 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
331void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 304void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
332 struct iwl_tx_queue *txq, 305 struct iwl_tx_queue *txq,
333 u16 byte_cnt); 306 u16 byte_cnt);
334int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, 307void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
335 int sta_id, int tid);
336void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); 308void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
337void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 309void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
338 struct iwl_tx_queue *txq, 310 struct iwl_tx_queue *txq,
339 int tx_fifo_id, int scd_retry); 311 int tx_fifo_id, bool active);
340int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); 312void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
341void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
342 enum iwl_rxon_context_id ctx,
343 int sta_id, int tid, int frame_limit, u16 ssn); 313 int sta_id, int tid, int frame_limit, u16 ssn);
344void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 314void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
345 int index, enum dma_data_direction dma_dir); 315 int index, enum dma_data_direction dma_dir);
@@ -388,91 +358,28 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
388 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); 358 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
389} 359}
390 360
391/*
392 * we have 8 bits used like this:
393 *
394 * 7 6 5 4 3 2 1 0
395 * | | | | | | | |
396 * | | | | | | +-+-------- AC queue (0-3)
397 * | | | | | |
398 * | +-+-+-+-+------------ HW queue ID
399 * |
400 * +---------------------- unused
401 */
402static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
403{
404 BUG_ON(ac > 3); /* only have 2 bits */
405 BUG_ON(hwq > 31); /* only use 5 bits */
406
407 txq->swq_id = (hwq << 2) | ac;
408}
409
410static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
411{
412 return txq->swq_id & 0x3;
413}
414
415static inline void iwl_wake_queue(struct iwl_trans *trans, 361static inline void iwl_wake_queue(struct iwl_trans *trans,
416 struct iwl_tx_queue *txq) 362 struct iwl_tx_queue *txq)
417{ 363{
418 u8 queue = txq->swq_id; 364 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
419 u8 ac = queue & 3; 365
420 u8 hwq = (queue >> 2) & 0x1f; 366 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
421 struct iwl_trans_pcie *trans_pcie = 367 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
422 IWL_TRANS_GET_PCIE_TRANS(trans); 368 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
423
424 if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
425 if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
426 iwl_op_mode_queue_not_full(trans->op_mode, ac);
427 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
428 hwq, ac);
429 } else {
430 IWL_DEBUG_TX_QUEUES(trans,
431 "Don't wake hwq %d ac %d stop count %d",
432 hwq, ac,
433 atomic_read(&trans_pcie->queue_stop_count[ac]));
434 }
435 } 369 }
436} 370}
437 371
438static inline void iwl_stop_queue(struct iwl_trans *trans, 372static inline void iwl_stop_queue(struct iwl_trans *trans,
439 struct iwl_tx_queue *txq) 373 struct iwl_tx_queue *txq)
440{ 374{
441 u8 queue = txq->swq_id; 375 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
442 u8 ac = queue & 3;
443 u8 hwq = (queue >> 2) & 0x1f;
444 struct iwl_trans_pcie *trans_pcie =
445 IWL_TRANS_GET_PCIE_TRANS(trans);
446
447 if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
448 if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
449 iwl_op_mode_queue_full(trans->op_mode, ac);
450 IWL_DEBUG_TX_QUEUES(trans,
451 "Stop hwq %d ac %d stop count %d",
452 hwq, ac,
453 atomic_read(&trans_pcie->queue_stop_count[ac]));
454 } else {
455 IWL_DEBUG_TX_QUEUES(trans,
456 "Don't stop hwq %d ac %d stop count %d",
457 hwq, ac,
458 atomic_read(&trans_pcie->queue_stop_count[ac]));
459 }
460 } else {
461 IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
462 hwq);
463 }
464}
465
466static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
467 int txq_id)
468{
469 set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
470}
471 376
472static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie, 377 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
473 int txq_id) 378 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
474{ 379 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
475 clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk); 380 } else
381 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
382 txq->q.id);
476} 383}
477 384
478static inline int iwl_queue_used(const struct iwl_queue *q, int i) 385static inline int iwl_queue_used(const struct iwl_queue *q, int i)
@@ -487,19 +394,4 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
487 return index & (q->n_window - 1); 394 return index & (q->n_window - 1);
488} 395}
489 396
490#define IWL_TX_FIFO_BK 0 /* shared */
491#define IWL_TX_FIFO_BE 1
492#define IWL_TX_FIFO_VI 2 /* shared */
493#define IWL_TX_FIFO_VO 3
494#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
495#define IWL_TX_FIFO_BE_IPAN 4
496#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
497#define IWL_TX_FIFO_VO_IPAN 5
498/* re-uses the VO FIFO, uCode will properly flush/schedule */
499#define IWL_TX_FIFO_AUX 5
500#define IWL_TX_FIFO_UNUSED -1
501
502/* AUX (TX during scan dwell) queue */
503#define IWL_AUX_QUEUE 10
504
505#endif /* __iwl_trans_int_pcie_h__ */ 397#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index a1c4550334b7..105c093bae3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -41,43 +41,6 @@
41#define IWL_TX_CRC_SIZE 4 41#define IWL_TX_CRC_SIZE 4
42#define IWL_TX_DELIMITER_SIZE 4 42#define IWL_TX_DELIMITER_SIZE 4
43 43
44/*
45 * mac80211 queues, ACs, hardware queues, FIFOs.
46 *
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 *
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
51 *
52 * VO 0
53 * VI 1
54 * BE 2
55 * BK 3
56 *
57 *
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
64 *
65 * Due to the way hw queues are set up (by the hw specific code), the AC->hw
66 * queue mapping is the identity mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80
81/** 44/**
82 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 45 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
83 */ 46 */
@@ -455,13 +418,10 @@ void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
455} 418}
456 419
457void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 420void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
458 struct iwl_tx_queue *txq, 421 struct iwl_tx_queue *txq,
459 int tx_fifo_id, int scd_retry) 422 int tx_fifo_id, bool active)
460{ 423{
461 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
462 int txq_id = txq->q.id; 424 int txq_id = txq->q.id;
463 int active =
464 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
465 425
466 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 426 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
467 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 427 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
@@ -469,77 +429,22 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
469 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 429 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
470 SCD_QUEUE_STTS_REG_MSK); 430 SCD_QUEUE_STTS_REG_MSK);
471 431
472 txq->sched_retry = scd_retry;
473
474 if (active) 432 if (active)
475 IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n", 433 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
476 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); 434 txq_id, tx_fifo_id);
477 else 435 else
478 IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n", 436 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
479 scd_retry ? "BA" : "AC/CMD", txq_id);
480} 437}
481 438
482static inline int get_ac_from_tid(u16 tid) 439void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
440 int sta_id, int tid, int frame_limit, u16 ssn)
483{ 441{
484 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 442 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
485 return tid_to_ac[tid];
486
487 /* no support for TIDs 8-15 yet */
488 return -EINVAL;
489}
490
491static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
492 u8 ctx, u16 tid)
493{
494 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
495 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
496 return ac_to_fifo[tid_to_ac[tid]];
497
498 /* no support for TIDs 8-15 yet */
499 return -EINVAL;
500}
501
502static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
503{
504 if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
505 return false;
506 return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
507 hw_params(trans).num_ampdu_queues);
508}
509
510void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
511 enum iwl_rxon_context_id ctx, int sta_id,
512 int tid, int frame_limit, u16 ssn)
513{
514 int tx_fifo, txq_id;
515 u16 ra_tid;
516 unsigned long flags; 443 unsigned long flags;
444 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
517 445
518 struct iwl_trans_pcie *trans_pcie = 446 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
519 IWL_TRANS_GET_PCIE_TRANS(trans); 447 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
520
521 if (WARN_ON(sta_id == IWL_INVALID_STATION))
522 return;
523 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
524 return;
525
526 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
527 if (WARN_ON(tx_fifo < 0)) {
528 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
529 return;
530 }
531
532 txq_id = trans_pcie->agg_txq[sta_id][tid];
533 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
534 IWL_ERR(trans,
535 "queue number out of range: %d, must be %d to %d\n",
536 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
537 IWLAGN_FIRST_AMPDU_QUEUE +
538 hw_params(trans).num_ampdu_queues - 1);
539 return;
540 }
541
542 ra_tid = BUILD_RAxTID(sta_id, tid);
543 448
544 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 449 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
545 450
@@ -550,10 +455,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
550 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); 455 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
551 456
552 /* Set this queue as a chain-building queue */ 457 /* Set this queue as a chain-building queue */
553 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id)); 458 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
554 459
555 /* enable aggregations for the queue */ 460 /* enable aggregations for the queue */
556 iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id)); 461 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
557 462
558 /* Place first TFD at index corresponding to start sequence number. 463 /* Place first TFD at index corresponding to start sequence number.
559 * Assumes that ssn_idx is valid (!= 0xFFF) */ 464 * Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -563,92 +468,42 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
563 468
564 /* Set up Tx window size and frame limit for this queue */ 469 /* Set up Tx window size and frame limit for this queue */
565 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 470 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
566 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 471 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
567 sizeof(u32), 472 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
568 ((frame_limit << 473 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
569 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 474 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
570 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 475 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
571 ((frame_limit <<
572 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
573 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
574 476
575 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); 477 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
576 478
577 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 479 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
578 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 480 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
579 tx_fifo, 1); 481 fifo, true);
580
581 trans_pcie->txq[txq_id].sta_id = sta_id;
582 trans_pcie->txq[txq_id].tid = tid;
583 482
584 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 483 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
585} 484}
586 485
587/* 486void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
588 * Find first available (lowest unused) Tx Queue, mark it "active".
589 * Called only when finding queue for aggregation.
590 * Should never return anything < 7, because they should already
591 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
592 */
593static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
594{ 487{
595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 488 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
596 int txq_id;
597
598 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
599 txq_id++)
600 if (!test_and_set_bit(txq_id,
601 &trans_pcie->txq_ctx_active_msk))
602 return txq_id;
603 return -1;
604}
605 489
606int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, 490 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
607 int sta_id, int tid) 491 WARN_ONCE(1, "queue %d not used", txq_id);
608{ 492 return;
609 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
610 int txq_id;
611
612 txq_id = iwlagn_txq_ctx_activate_free(trans);
613 if (txq_id == -1) {
614 IWL_ERR(trans, "No free aggregation queue available\n");
615 return -ENXIO;
616 }
617
618 trans_pcie->agg_txq[sta_id][tid] = txq_id;
619 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
620
621 return 0;
622}
623
624int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
625{
626 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
627 u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
628
629 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
630 IWL_ERR(trans,
631 "queue number out of range: %d, must be %d to %d\n",
632 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
633 IWLAGN_FIRST_AMPDU_QUEUE +
634 hw_params(trans).num_ampdu_queues - 1);
635 return -EINVAL;
636 } 493 }
637 494
638 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 495 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
639 496
640 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id)); 497 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
641 498
642 trans_pcie->agg_txq[sta_id][tid] = 0;
643 trans_pcie->txq[txq_id].q.read_ptr = 0; 499 trans_pcie->txq[txq_id].q.read_ptr = 0;
644 trans_pcie->txq[txq_id].q.write_ptr = 0; 500 trans_pcie->txq[txq_id].q.write_ptr = 0;
645 /* supposes that ssn_idx is valid (!= 0xFFF) */
646 iwl_trans_set_wr_ptrs(trans, txq_id, 0); 501 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
647 502
648 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); 503 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
649 iwl_txq_ctx_deactivate(trans_pcie, txq_id); 504
650 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); 505 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
651 return 0; 506 0, false);
652} 507}
653 508
654/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 509/*************** HOST COMMAND QUEUE FUNCTIONS *****/
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 98cd71fb385e..0a2337253532 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -369,21 +369,13 @@ error:
369} 369}
370 370
371static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, 371static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
372 int slots_num, u32 txq_id) 372 int slots_num, u32 txq_id)
373{ 373{
374 int ret; 374 int ret;
375 375
376 txq->need_update = 0; 376 txq->need_update = 0;
377 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); 377 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
378 378
379 /*
380 * For the default queues 0-3, set up the swq_id
381 * already -- all others need to get one later
382 * (if they need one at all).
383 */
384 if (txq_id < 4)
385 iwl_set_swq_id(txq, txq_id, txq_id);
386
387 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 379 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
388 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 380 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
389 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 381 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
@@ -894,59 +886,6 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
894 return ret; 886 return ret;
895} 887}
896 888
897#define IWL_AC_UNSET -1
898
899struct queue_to_fifo_ac {
900 s8 fifo, ac;
901};
902
903static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
904 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
905 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
906 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
907 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
908 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
909 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
910 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
911 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
912 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
913 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
914 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
915};
916
917static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
918 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
919 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
920 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
921 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
922 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
923 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
924 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
925 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
926 { IWL_TX_FIFO_BE_IPAN, 2, },
927 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
928 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
929};
930
931static const u8 iwlagn_bss_ac_to_fifo[] = {
932 IWL_TX_FIFO_VO,
933 IWL_TX_FIFO_VI,
934 IWL_TX_FIFO_BE,
935 IWL_TX_FIFO_BK,
936};
937static const u8 iwlagn_bss_ac_to_queue[] = {
938 0, 1, 2, 3,
939};
940static const u8 iwlagn_pan_ac_to_fifo[] = {
941 IWL_TX_FIFO_VO_IPAN,
942 IWL_TX_FIFO_VI_IPAN,
943 IWL_TX_FIFO_BE_IPAN,
944 IWL_TX_FIFO_BK_IPAN,
945};
946static const u8 iwlagn_pan_ac_to_queue[] = {
947 7, 6, 5, 4,
948};
949
950/* 889/*
951 * ucode 890 * ucode
952 */ 891 */
@@ -1027,19 +966,8 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1027 const struct fw_img *fw) 966 const struct fw_img *fw)
1028{ 967{
1029 int ret; 968 int ret;
1030 struct iwl_trans_pcie *trans_pcie =
1031 IWL_TRANS_GET_PCIE_TRANS(trans);
1032 bool hw_rfkill; 969 bool hw_rfkill;
1033 970
1034 trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
1035 trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
1036
1037 trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
1038 trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
1039
1040 trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
1041 trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
1042
1043 /* This may fail if AMT took ownership of the device */ 971 /* This may fail if AMT took ownership of the device */
1044 if (iwl_prepare_card_hw(trans)) { 972 if (iwl_prepare_card_hw(trans)) {
1045 IWL_WARN(trans, "Exit HW not ready\n"); 973 IWL_WARN(trans, "Exit HW not ready\n");
@@ -1097,9 +1025,7 @@ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1097 1025
1098static void iwl_tx_start(struct iwl_trans *trans) 1026static void iwl_tx_start(struct iwl_trans *trans)
1099{ 1027{
1100 const struct queue_to_fifo_ac *queue_to_fifo; 1028 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1101 struct iwl_trans_pcie *trans_pcie =
1102 IWL_TRANS_GET_PCIE_TRANS(trans);
1103 u32 a; 1029 u32 a;
1104 unsigned long flags; 1030 unsigned long flags;
1105 int i, chan; 1031 int i, chan;
@@ -1165,41 +1091,19 @@ static void iwl_tx_start(struct iwl_trans *trans)
1165 /* Activate all Tx DMA/FIFO channels */ 1091 /* Activate all Tx DMA/FIFO channels */
1166 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); 1092 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1167 1093
1168 /* map queues to FIFOs */
1169 if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
1170 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
1171 else
1172 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1173
1174 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0); 1094 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
1175 1095
1176 /* make sure all queue are not stopped */ 1096 /* make sure all queue are not stopped/used */
1177 memset(&trans_pcie->queue_stopped[0], 0, 1097 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1178 sizeof(trans_pcie->queue_stopped)); 1098 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1179 for (i = 0; i < 4; i++)
1180 atomic_set(&trans_pcie->queue_stop_count[i], 0);
1181
1182 /* reset to 0 to enable all the queue first */
1183 trans_pcie->txq_ctx_active_msk = 0;
1184 1099
1185 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < 1100 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1186 IWLAGN_FIRST_AMPDU_QUEUE); 1101 int fifo = trans_pcie->setup_q_to_fifo[i];
1187 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
1188 IWLAGN_FIRST_AMPDU_QUEUE);
1189 1102
1190 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { 1103 set_bit(i, trans_pcie->queue_used);
1191 int fifo = queue_to_fifo[i].fifo;
1192 int ac = queue_to_fifo[i].ac;
1193 1104
1194 iwl_txq_ctx_activate(trans_pcie, i);
1195
1196 if (fifo == IWL_TX_FIFO_UNUSED)
1197 continue;
1198
1199 if (ac != IWL_AC_UNSET)
1200 iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
1201 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], 1105 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
1202 fifo, 0); 1106 fifo, true);
1203 } 1107 }
1204 1108
1205 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1109 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
@@ -1324,70 +1228,32 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1324} 1228}
1325 1229
1326static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1230static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1327 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 1231 struct iwl_device_cmd *dev_cmd, int txq_id)
1328 u8 sta_id, u8 tid)
1329{ 1232{
1330 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1331 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1234 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1333 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; 1235 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1334 struct iwl_cmd_meta *out_meta; 1236 struct iwl_cmd_meta *out_meta;
1335 struct iwl_tx_queue *txq; 1237 struct iwl_tx_queue *txq;
1336 struct iwl_queue *q; 1238 struct iwl_queue *q;
1337
1338 dma_addr_t phys_addr = 0; 1239 dma_addr_t phys_addr = 0;
1339 dma_addr_t txcmd_phys; 1240 dma_addr_t txcmd_phys;
1340 dma_addr_t scratch_phys; 1241 dma_addr_t scratch_phys;
1341 u16 len, firstlen, secondlen; 1242 u16 len, firstlen, secondlen;
1342 u8 wait_write_ptr = 0; 1243 u8 wait_write_ptr = 0;
1343 u8 txq_id;
1344 bool is_agg = false;
1345 __le16 fc = hdr->frame_control; 1244 __le16 fc = hdr->frame_control;
1346 u8 hdr_len = ieee80211_hdrlen(fc); 1245 u8 hdr_len = ieee80211_hdrlen(fc);
1347 u16 __maybe_unused wifi_seq; 1246 u16 __maybe_unused wifi_seq;
1348 1247
1349 /*
1350 * Send this frame after DTIM -- there's a special queue
1351 * reserved for this for contexts that support AP mode.
1352 */
1353 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1354 txq_id = trans_pcie->mcast_queue[ctx];
1355
1356 /*
1357 * The microcode will clear the more data
1358 * bit in the last frame it transmits.
1359 */
1360 hdr->frame_control |=
1361 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1362 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
1363 txq_id = IWL_AUX_QUEUE;
1364 else
1365 txq_id =
1366 trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
1367
1368 /* aggregation is on for this <sta,tid> */
1369 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1370 WARN_ON(tid >= IWL_MAX_TID_COUNT);
1371 txq_id = trans_pcie->agg_txq[sta_id][tid];
1372 is_agg = true;
1373 }
1374
1375 txq = &trans_pcie->txq[txq_id]; 1248 txq = &trans_pcie->txq[txq_id];
1376 q = &txq->q; 1249 q = &txq->q;
1377 1250
1378 spin_lock(&txq->lock); 1251 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1252 WARN_ON_ONCE(1);
1253 return -EINVAL;
1254 }
1379 1255
1380 /* In AGG mode, the index in the ring must correspond to the WiFi 1256 spin_lock(&txq->lock);
1381 * sequence number. This is a HW requirements to help the SCD to parse
1382 * the BA.
1383 * Check here that the packets are in the right place on the ring.
1384 */
1385#ifdef CONFIG_IWLWIFI_DEBUG
1386 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1387 WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
1388 "Q: %d WiFi Seq %d tfdNum %d",
1389 txq_id, wifi_seq, q->write_ptr);
1390#endif
1391 1257
1392 /* Set up driver data for this TFD */ 1258 /* Set up driver data for this TFD */
1393 txq->skbs[q->write_ptr] = skb; 1259 txq->skbs[q->write_ptr] = skb;
@@ -1564,8 +1430,8 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
1564 iwl_enable_rfkill_int(trans); 1430 iwl_enable_rfkill_int(trans);
1565} 1431}
1566 1432
1567static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, 1433static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1568 int txq_id, int ssn, struct sk_buff_head *skbs) 1434 struct sk_buff_head *skbs)
1569{ 1435{
1570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1436 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1571 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 1437 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1577,33 +1443,15 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1577 1443
1578 txq->time_stamp = jiffies; 1444 txq->time_stamp = jiffies;
1579 1445
1580 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1581 tid != IWL_TID_NON_QOS &&
1582 txq_id != trans_pcie->agg_txq[sta_id][tid])) {
1583 /*
1584 * FIXME: this is a uCode bug which need to be addressed,
1585 * log the information and return for now.
1586 * Since it is can possibly happen very often and in order
1587 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1588 */
1589 IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
1590 "agg_txq[sta_id[tid] %d", txq_id,
1591 trans_pcie->agg_txq[sta_id][tid]);
1592 spin_unlock(&txq->lock);
1593 return 1;
1594 }
1595
1596 if (txq->q.read_ptr != tfd_num) { 1446 if (txq->q.read_ptr != tfd_num) {
1597 IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", 1447 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1598 txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, 1448 txq_id, txq->q.read_ptr, tfd_num, ssn);
1599 tfd_num, ssn);
1600 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); 1449 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1601 if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1450 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1602 iwl_wake_queue(trans, txq); 1451 iwl_wake_queue(trans, txq);
1603 } 1452 }
1604 1453
1605 spin_unlock(&txq->lock); 1454 spin_unlock(&txq->lock);
1606 return 0;
1607} 1455}
1608 1456
1609static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1457static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1622,7 +1470,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1622} 1470}
1623 1471
1624static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1472static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1625 const struct iwl_trans_config *trans_cfg) 1473 const struct iwl_trans_config *trans_cfg)
1626{ 1474{
1627 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1475 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1628 1476
@@ -1634,6 +1482,17 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1634 if (trans_pcie->n_no_reclaim_cmds) 1482 if (trans_pcie->n_no_reclaim_cmds)
1635 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1483 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1636 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1484 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1485
1486 trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
1487
1488 if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
1489 trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
1490
1491 /* at least the command queue must be mapped */
1492 WARN_ON(!trans_pcie->n_q_to_fifo);
1493
1494 memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
1495 trans_pcie->n_q_to_fifo * sizeof(u8));
1637} 1496}
1638 1497
1639static void iwl_trans_pcie_free(struct iwl_trans *trans) 1498static void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1957,18 +1816,10 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1957 txq = &trans_pcie->txq[cnt]; 1816 txq = &trans_pcie->txq[cnt];
1958 q = &txq->q; 1817 q = &txq->q;
1959 pos += scnprintf(buf + pos, bufsz - pos, 1818 pos += scnprintf(buf + pos, bufsz - pos,
1960 "hwq %.2d: read=%u write=%u stop=%d" 1819 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1961 " swq_id=%#.2x (ac %d/hwq %d)\n",
1962 cnt, q->read_ptr, q->write_ptr, 1820 cnt, q->read_ptr, q->write_ptr,
1963 !!test_bit(cnt, trans_pcie->queue_stopped), 1821 !!test_bit(cnt, trans_pcie->queue_used),
1964 txq->swq_id, txq->swq_id & 3, 1822 !!test_bit(cnt, trans_pcie->queue_stopped));
1965 (txq->swq_id >> 2) & 0x1f);
1966 if (cnt >= 4)
1967 continue;
1968 /* for the ACs, display the stop count too */
1969 pos += scnprintf(buf + pos, bufsz - pos,
1970 " stop-count: %d\n",
1971 atomic_read(&trans_pcie->queue_stop_count[cnt]));
1972 } 1823 }
1973 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1824 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1974 kfree(buf); 1825 kfree(buf);
@@ -2210,7 +2061,6 @@ const struct iwl_trans_ops trans_ops_pcie = {
2210 .reclaim = iwl_trans_pcie_reclaim, 2061 .reclaim = iwl_trans_pcie_reclaim,
2211 2062
2212 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, 2063 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
2213 .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
2214 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, 2064 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
2215 2065
2216 .free = iwl_trans_pcie_free, 2066 .free = iwl_trans_pcie_free,
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 57d8ae7b7ba9..27853087a803 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -285,11 +285,19 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
285 285
286#define MAX_NO_RECLAIM_CMDS 6 286#define MAX_NO_RECLAIM_CMDS 6
287 287
288/*
289 * Maximum number of HW queues the transport layer
290 * currently supports
291 */
292#define IWL_MAX_HW_QUEUES 32
293
288/** 294/**
289 * struct iwl_trans_config - transport configuration 295 * struct iwl_trans_config - transport configuration
290 * 296 *
291 * @op_mode: pointer to the upper layer. 297 * @op_mode: pointer to the upper layer.
292 * Must be set before any other call. 298 * @queue_to_fifo: queue to FIFO mapping to set up by
299 * default
300 * @n_queue_to_fifo: number of queues to set up
293 * @cmd_queue: the index of the command queue. 301 * @cmd_queue: the index of the command queue.
294 * Must be set before start_fw. 302 * Must be set before start_fw.
295 * @no_reclaim_cmds: Some devices erroneously don't set the 303 * @no_reclaim_cmds: Some devices erroneously don't set the
@@ -300,6 +308,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
300 */ 308 */
301struct iwl_trans_config { 309struct iwl_trans_config {
302 struct iwl_op_mode *op_mode; 310 struct iwl_op_mode *op_mode;
311 const u8 *queue_to_fifo;
312 u8 n_queue_to_fifo;
313
303 u8 cmd_queue; 314 u8 cmd_queue;
304 const u8 *no_reclaim_cmds; 315 const u8 *no_reclaim_cmds;
305 int n_no_reclaim_cmds; 316 int n_no_reclaim_cmds;
@@ -331,8 +342,6 @@ struct iwl_trans_config {
331 * Must be atomic 342 * Must be atomic
332 * @reclaim: free packet until ssn. Returns a list of freed packets. 343 * @reclaim: free packet until ssn. Returns a list of freed packets.
333 * Must be atomic 344 * Must be atomic
334 * @tx_agg_alloc: allocate resources for a TX BA session
335 * Must be atomic
336 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is 345 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
337 * ready and a successful ADDBA response has been received. 346 * ready and a successful ADDBA response has been received.
338 * May sleep 347 * May sleep
@@ -369,18 +378,13 @@ struct iwl_trans_ops {
369 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 378 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
370 379
371 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 380 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
372 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 381 struct iwl_device_cmd *dev_cmd, int queue);
373 u8 sta_id, u8 tid); 382 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
374 int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, 383 struct sk_buff_head *skbs);
375 int txq_id, int ssn, struct sk_buff_head *skbs); 384
376 385 void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
377 int (*tx_agg_disable)(struct iwl_trans *trans, 386 int sta_id, int tid, int frame_limit, u16 ssn);
378 int sta_id, int tid); 387 void (*tx_agg_disable)(struct iwl_trans *trans, int queue);
379 int (*tx_agg_alloc)(struct iwl_trans *trans,
380 int sta_id, int tid);
381 void (*tx_agg_setup)(struct iwl_trans *trans,
382 enum iwl_rxon_context_id ctx, int sta_id, int tid,
383 int frame_limit, u16 ssn);
384 388
385 void (*free)(struct iwl_trans *trans); 389 void (*free)(struct iwl_trans *trans);
386 390
@@ -516,55 +520,42 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
516} 520}
517 521
518static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 522static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
519 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 523 struct iwl_device_cmd *dev_cmd, int queue)
520 u8 sta_id, u8 tid)
521{
522 if (trans->state != IWL_TRANS_FW_ALIVE)
523 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
524
525 return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
526}
527
528static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
529 int tid, int txq_id, int ssn,
530 struct sk_buff_head *skbs)
531{ 524{
532 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 525 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
533 "%s bad state = %d", __func__, trans->state); 526 "%s bad state = %d", __func__, trans->state);
534 527
535 return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs); 528 return trans->ops->tx(trans, skb, dev_cmd, queue);
536} 529}
537 530
538static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans, 531static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
539 int sta_id, int tid) 532 int ssn, struct sk_buff_head *skbs)
540{ 533{
541 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 534 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
542 "%s bad state = %d", __func__, trans->state); 535 "%s bad state = %d", __func__, trans->state);
543 536
544 return trans->ops->tx_agg_disable(trans, sta_id, tid); 537 trans->ops->reclaim(trans, queue, ssn, skbs);
545} 538}
546 539
547static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans, 540static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue)
548 int sta_id, int tid)
549{ 541{
550 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 542 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
551 "%s bad state = %d", __func__, trans->state); 543 "%s bad state = %d", __func__, trans->state);
552 544
553 return trans->ops->tx_agg_alloc(trans, sta_id, tid); 545 trans->ops->tx_agg_disable(trans, queue);
554} 546}
555 547
556 548static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
557static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, 549 int fifo, int sta_id, int tid,
558 enum iwl_rxon_context_id ctx, 550 int frame_limit, u16 ssn)
559 int sta_id, int tid,
560 int frame_limit, u16 ssn)
561{ 551{
562 might_sleep(); 552 might_sleep();
563 553
564 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 554 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
565 "%s bad state = %d", __func__, trans->state); 555 "%s bad state = %d", __func__, trans->state);
566 556
567 trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn); 557 trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
558 frame_limit, ssn);
568} 559}
569 560
570static inline void iwl_trans_free(struct iwl_trans *trans) 561static inline void iwl_trans_free(struct iwl_trans *trans)