aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi
diff options
context:
space:
mode:
authorLiad Kaufman <liad.kaufman@intel.com>2015-08-13 12:16:08 -0400
committerLuca Coelho <luciano.coelho@intel.com>2016-05-10 15:34:09 -0400
commitcf961e16620f88686e0662753bd92d8383f36862 (patch)
tree1ea121d82600c6bef205956ea7e6d3e472259458 /drivers/net/wireless/intel/iwlwifi
parent192185d68dcc9b4517001fcec645111946f84d40 (diff)
iwlwifi: mvm: support dqa-mode agg on non-shared queue
In non-shared queues, DQA requires re-configuring existing queues to become aggregated rather than allocating a new one. It also requires "un-aggregating" an existing queue when aggregations are turned off. Support this requirement for non-shared queues. Signed-off-by: Liad Kaufman <liad.kaufman@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c37
6 files changed, 207 insertions, 40 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index cd710176cea0..e5f267b21316 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -452,7 +452,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
452 if (mvm->trans->max_skb_frags) 452 if (mvm->trans->max_skb_frags)
453 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 453 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
454 454
455 hw->queues = mvm->first_agg_queue; 455 if (!iwl_mvm_is_dqa_supported(mvm))
456 hw->queues = mvm->first_agg_queue;
457 else
458 hw->queues = IEEE80211_MAX_QUEUES;
456 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 459 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
457 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 460 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
458 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 461 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 820f8d661e15..ffbd41dcc0d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -671,6 +671,28 @@ struct iwl_mvm_baid_data {
671 struct iwl_mvm_reorder_buffer reorder_buf[]; 671 struct iwl_mvm_reorder_buffer reorder_buf[];
672}; 672};
673 673
674/*
675 * enum iwl_mvm_queue_status - queue status
676 * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
677 * Basically, this means that this queue can be used for any purpose
678 * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
679 * This is the state of a queue that has been dedicated for some RATID
680 * (agg'd or not), but that hasn't yet gone through the actual enablement
681 * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
682 * Note that in this state there is no requirement to already know what TID
683 * should be used with this queue, it is just marked as a queue that will
684 * be used, and shouldn't be allocated to anyone else.
685 * @IWL_MVM_QUEUE_READY: queue is ready to be used
686 * This is the state of a queue that has been fully configured (including
687 * SCD pointers, etc), has a specific RA/TID assigned to it, and can be
688 * used to send traffic.
689 */
690enum iwl_mvm_queue_status {
691 IWL_MVM_QUEUE_FREE,
692 IWL_MVM_QUEUE_RESERVED,
693 IWL_MVM_QUEUE_READY,
694};
695
674struct iwl_mvm { 696struct iwl_mvm {
675 /* for logger access */ 697 /* for logger access */
676 struct device *dev; 698 struct device *dev;
@@ -726,13 +748,8 @@ struct iwl_mvm {
726 u32 hw_queue_to_mac80211; 748 u32 hw_queue_to_mac80211;
727 u8 hw_queue_refcount; 749 u8 hw_queue_refcount;
728 u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ 750 u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
729 /*
730 * This is to mark that queue is reserved for a STA but not yet
731 * allocated. This is needed to make sure we have at least one
732 * available queue to use when adding a new STA
733 */
734 bool setup_reserved;
735 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ 751 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
752 enum iwl_mvm_queue_status status;
736 } queue_info[IWL_MAX_HW_QUEUES]; 753 } queue_info[IWL_MAX_HW_QUEUES];
737 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ 754 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
738 struct work_struct add_stream_wk; /* To add streams to queues */ 755 struct work_struct add_stream_wk; /* To add streams to queues */
@@ -1631,6 +1648,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1631void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); 1648void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
1632void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); 1649void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
1633 1650
1651/* Re-configure the SCD for a queue that has already been configured */
1652int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
1653 int tid, int frame_limit, u16 ssn);
1654
1634/* Thermal management and CT-kill */ 1655/* Thermal management and CT-kill */
1635void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); 1656void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1636void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); 1657void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 6f91c5bdfd62..a68054f127fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -554,8 +554,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
554 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; 554 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
555 555
556 mvm->aux_queue = 15; 556 mvm->aux_queue = 15;
557 mvm->first_agg_queue = 16; 557 if (!iwl_mvm_is_dqa_supported(mvm)) {
558 mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; 558 mvm->first_agg_queue = 16;
559 mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
560 } else {
561 mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
562 mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
563 }
559 if (mvm->cfg->base_params->num_of_queues == 16) { 564 if (mvm->cfg->base_params->num_of_queues == 16) {
560 mvm->aux_queue = 11; 565 mvm->aux_queue = 11;
561 mvm->first_agg_queue = 12; 566 mvm->first_agg_queue = 12;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 855684ace030..fea4d3437e2f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -326,6 +326,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
326 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 326 u8 mac_queue = mvmsta->vif->hw_queue[ac];
327 int queue = -1; 327 int queue = -1;
328 int ssn; 328 int ssn;
329 int ret;
329 330
330 lockdep_assert_held(&mvm->mutex); 331 lockdep_assert_held(&mvm->mutex);
331 332
@@ -354,8 +355,15 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
354 if (queue < 0) 355 if (queue < 0)
355 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, 356 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
356 IWL_MVM_DQA_MAX_DATA_QUEUE); 357 IWL_MVM_DQA_MAX_DATA_QUEUE);
358
359 /*
360 * Mark TXQ as ready, even though it hasn't been fully configured yet,
361 * to make sure no one else takes it.
362 * This will allow avoiding re-acquiring the lock at the end of the
363 * configuration. On error we'll mark it back as free.
364 */
357 if (queue >= 0) 365 if (queue >= 0)
358 mvm->queue_info[queue].setup_reserved = false; 366 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
359 367
360 spin_unlock_bh(&mvm->queue_info_lock); 368 spin_unlock_bh(&mvm->queue_info_lock);
361 369
@@ -387,7 +395,16 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
387 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 395 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
388 spin_unlock_bh(&mvmsta->lock); 396 spin_unlock_bh(&mvmsta->lock);
389 397
390 return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 398 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
399 if (ret)
400 goto out_err;
401
402 return 0;
403
404out_err:
405 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
406
407 return ret;
391} 408}
392 409
393static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 410static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
@@ -493,7 +510,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
493 /* Make sure we have free resources for this STA */ 510 /* Make sure we have free resources for this STA */
494 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 511 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
495 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && 512 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
496 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved) 513 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
514 IWL_MVM_QUEUE_FREE))
497 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 515 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
498 else 516 else
499 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, 517 queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
@@ -503,7 +521,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
503 IWL_ERR(mvm, "No available queues for new station\n"); 521 IWL_ERR(mvm, "No available queues for new station\n");
504 return -ENOSPC; 522 return -ENOSPC;
505 } 523 }
506 mvm->queue_info[queue].setup_reserved = true; 524 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
507 525
508 spin_unlock_bh(&mvm->queue_info_lock); 526 spin_unlock_bh(&mvm->queue_info_lock);
509 527
@@ -1398,7 +1416,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1398 mvm_sta->tfd_queue_msk |= BIT(queue); 1416 mvm_sta->tfd_queue_msk |= BIT(queue);
1399 mvm_sta->tid_disable_agg &= ~BIT(tid); 1417 mvm_sta->tid_disable_agg &= ~BIT(tid);
1400 } else { 1418 } else {
1401 mvm_sta->tfd_queue_msk &= ~BIT(queue); 1419 /* In DQA-mode the queue isn't removed on agg termination */
1420 if (!iwl_mvm_is_dqa_supported(mvm))
1421 mvm_sta->tfd_queue_msk &= ~BIT(queue);
1402 mvm_sta->tid_disable_agg |= BIT(tid); 1422 mvm_sta->tid_disable_agg |= BIT(tid);
1403 } 1423 }
1404 1424
@@ -1481,17 +1501,35 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1481 1501
1482 spin_lock_bh(&mvm->queue_info_lock); 1502 spin_lock_bh(&mvm->queue_info_lock);
1483 1503
1484 txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, 1504 /*
1485 mvm->last_agg_queue); 1505 * Note the possible cases:
1486 if (txq_id < 0) { 1506 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
1487 ret = txq_id; 1507 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
1488 spin_unlock_bh(&mvm->queue_info_lock); 1508 * one and mark it as reserved
1489 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 1509 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
1490 goto release_locks; 1510 * non-DQA mode, since the TXQ hasn't yet been allocated
1511 */
1512 txq_id = mvmsta->tid_data[tid].txq_id;
1513 if (!iwl_mvm_is_dqa_supported(mvm) ||
1514 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
1515 txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
1516 mvm->last_agg_queue);
1517 if (txq_id < 0) {
1518 ret = txq_id;
1519 spin_unlock_bh(&mvm->queue_info_lock);
1520 IWL_ERR(mvm, "Failed to allocate agg queue\n");
1521 goto release_locks;
1522 }
1523
1524 /* TXQ hasn't yet been enabled, so mark it only as reserved */
1525 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
1491 } 1526 }
1492 mvm->queue_info[txq_id].setup_reserved = true;
1493 spin_unlock_bh(&mvm->queue_info_lock); 1527 spin_unlock_bh(&mvm->queue_info_lock);
1494 1528
1529 IWL_DEBUG_TX_QUEUES(mvm,
1530 "AGG for tid %d will be on queue #%d\n",
1531 tid, txq_id);
1532
1495 tid_data = &mvmsta->tid_data[tid]; 1533 tid_data = &mvmsta->tid_data[tid];
1496 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1534 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1497 tid_data->txq_id = txq_id; 1535 tid_data->txq_id = txq_id;
@@ -1526,6 +1564,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1526 unsigned int wdg_timeout = 1564 unsigned int wdg_timeout =
1527 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 1565 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
1528 int queue, ret; 1566 int queue, ret;
1567 bool alloc_queue = true;
1529 u16 ssn; 1568 u16 ssn;
1530 1569
1531 struct iwl_trans_txq_scd_cfg cfg = { 1570 struct iwl_trans_txq_scd_cfg cfg = {
@@ -1551,8 +1590,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1551 1590
1552 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 1591 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1553 1592
1554 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]], 1593 /* In DQA mode, the existing queue might need to be reconfigured */
1555 ssn, &cfg, wdg_timeout); 1594 if (iwl_mvm_is_dqa_supported(mvm)) {
1595 spin_lock_bh(&mvm->queue_info_lock);
1596 /* Maybe there is no need to even alloc a queue... */
1597 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
1598 alloc_queue = false;
1599 spin_unlock_bh(&mvm->queue_info_lock);
1600
1601 /*
1602 * Only reconfig the SCD for the queue if the window size has
1603 * changed from current (become smaller)
1604 */
1605 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
1606 /*
1607 * If reconfiguring an existing queue, it first must be
1608 * drained
1609 */
1610 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1611 BIT(queue));
1612 if (ret) {
1613 IWL_ERR(mvm,
1614 "Error draining queue before reconfig\n");
1615 return ret;
1616 }
1617
1618 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
1619 mvmsta->sta_id, tid,
1620 buf_size, ssn);
1621 if (ret) {
1622 IWL_ERR(mvm,
1623 "Error reconfiguring TXQ #%d\n", queue);
1624 return ret;
1625 }
1626 }
1627 }
1628
1629 if (alloc_queue)
1630 iwl_mvm_enable_txq(mvm, queue,
1631 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
1632 &cfg, wdg_timeout);
1556 1633
1557 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1634 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1558 if (ret) 1635 if (ret)
@@ -1560,7 +1637,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1560 1637
1561 /* No need to mark as reserved */ 1638 /* No need to mark as reserved */
1562 spin_lock_bh(&mvm->queue_info_lock); 1639 spin_lock_bh(&mvm->queue_info_lock);
1563 mvm->queue_info[queue].setup_reserved = false; 1640 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1564 spin_unlock_bh(&mvm->queue_info_lock); 1641 spin_unlock_bh(&mvm->queue_info_lock);
1565 1642
1566 /* 1643 /*
@@ -1607,9 +1684,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1607 1684
1608 mvmsta->agg_tids &= ~BIT(tid); 1685 mvmsta->agg_tids &= ~BIT(tid);
1609 1686
1610 /* No need to mark as reserved anymore */
1611 spin_lock_bh(&mvm->queue_info_lock); 1687 spin_lock_bh(&mvm->queue_info_lock);
1612 mvm->queue_info[txq_id].setup_reserved = false; 1688 /*
1689 * The TXQ is marked as reserved only if no traffic came through yet
1690 * This means no traffic has been sent on this TID (agg'd or not), so
1691 * we no longer have use for the queue. Since it hasn't even been
1692 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1693 * free.
1694 */
1695 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
1696 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
1613 spin_unlock_bh(&mvm->queue_info_lock); 1697 spin_unlock_bh(&mvm->queue_info_lock);
1614 1698
1615 switch (tid_data->state) { 1699 switch (tid_data->state) {
@@ -1635,9 +1719,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1635 1719
1636 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 1720 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1637 1721
1638 iwl_mvm_disable_txq(mvm, txq_id, 1722 if (!iwl_mvm_is_dqa_supported(mvm)) {
1639 vif->hw_queue[tid_to_mac80211_ac[tid]], tid, 1723 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
1640 0); 1724
1725 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
1726 }
1641 return 0; 1727 return 0;
1642 case IWL_AGG_STARTING: 1728 case IWL_AGG_STARTING:
1643 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1729 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1688,9 +1774,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1688 mvmsta->agg_tids &= ~BIT(tid); 1774 mvmsta->agg_tids &= ~BIT(tid);
1689 spin_unlock_bh(&mvmsta->lock); 1775 spin_unlock_bh(&mvmsta->lock);
1690 1776
1691 /* No need to mark as reserved */
1692 spin_lock_bh(&mvm->queue_info_lock); 1777 spin_lock_bh(&mvm->queue_info_lock);
1693 mvm->queue_info[txq_id].setup_reserved = false; 1778 /*
1779 * The TXQ is marked as reserved only if no traffic came through yet
1780 * This means no traffic has been sent on this TID (agg'd or not), so
1781 * we no longer have use for the queue. Since it hasn't even been
1782 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1783 * free.
1784 */
1785 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
1786 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
1694 spin_unlock_bh(&mvm->queue_info_lock); 1787 spin_unlock_bh(&mvm->queue_info_lock);
1695 1788
1696 if (old_state >= IWL_AGG_ON) { 1789 if (old_state >= IWL_AGG_ON) {
@@ -1703,9 +1796,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1703 1796
1704 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 1797 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1705 1798
1706 iwl_mvm_disable_txq(mvm, tid_data->txq_id, 1799 if (!iwl_mvm_is_dqa_supported(mvm)) {
1707 vif->hw_queue[tid_to_mac80211_ac[tid]], tid, 1800 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
1708 0); 1801
1802 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
1803 tid, 0);
1804 }
1709 } 1805 }
1710 1806
1711 return 0; 1807 return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index ff615b92e4ef..779bafcbc9a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -933,7 +933,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
933 933
934 spin_unlock(&mvmsta->lock); 934 spin_unlock(&mvmsta->lock);
935 935
936 if (txq_id < mvm->first_agg_queue) 936 /* Increase pending frames count if this isn't AMPDU */
937 if (!is_ampdu)
937 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); 938 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
938 939
939 return 0; 940 return 0;
@@ -1181,6 +1182,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1181 u8 skb_freed = 0; 1182 u8 skb_freed = 0;
1182 u16 next_reclaimed, seq_ctl; 1183 u16 next_reclaimed, seq_ctl;
1183 bool is_ndp = false; 1184 bool is_ndp = false;
1185 bool txq_agg = false; /* Is this TXQ aggregated */
1184 1186
1185 __skb_queue_head_init(&skbs); 1187 __skb_queue_head_init(&skbs);
1186 1188
@@ -1311,6 +1313,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1311 bool send_eosp_ndp = false; 1313 bool send_eosp_ndp = false;
1312 1314
1313 spin_lock_bh(&mvmsta->lock); 1315 spin_lock_bh(&mvmsta->lock);
1316 txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON);
1317
1314 if (!is_ndp) { 1318 if (!is_ndp) {
1315 tid_data->next_reclaimed = next_reclaimed; 1319 tid_data->next_reclaimed = next_reclaimed;
1316 IWL_DEBUG_TX_REPLY(mvm, 1320 IWL_DEBUG_TX_REPLY(mvm,
@@ -1366,11 +1370,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1366 * If the txq is not an AMPDU queue, there is no chance we freed 1370 * If the txq is not an AMPDU queue, there is no chance we freed
1367 * several skbs. Check that out... 1371 * several skbs. Check that out...
1368 */ 1372 */
1369 if (txq_id >= mvm->first_agg_queue) 1373 if (txq_agg)
1370 goto out; 1374 goto out;
1371 1375
1372 /* We can't free more than one frame at once on a shared queue */ 1376 /* We can't free more than one frame at once on a shared queue */
1373 WARN_ON(skb_freed > 1); 1377 WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
1374 1378
1375 /* If we have still frames for this STA nothing to do here */ 1379 /* If we have still frames for this STA nothing to do here */
1376 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) 1380 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
@@ -1465,8 +1469,11 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1465 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); 1469 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1466 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1470 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1467 struct iwl_mvm_sta *mvmsta; 1471 struct iwl_mvm_sta *mvmsta;
1472 int queue = SEQ_TO_QUEUE(sequence);
1468 1473
1469 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue)) 1474 if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
1475 (!iwl_mvm_is_dqa_supported(mvm) ||
1476 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
1470 return; 1477 return;
1471 1478
1472 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) 1479 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index eb41d3bd8059..161b99efd63d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -587,12 +587,45 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
587 587
588 for (i = minq; i <= maxq; i++) 588 for (i = minq; i <= maxq; i++)
589 if (mvm->queue_info[i].hw_queue_refcount == 0 && 589 if (mvm->queue_info[i].hw_queue_refcount == 0 &&
590 !mvm->queue_info[i].setup_reserved) 590 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
591 return i; 591 return i;
592 592
593 return -ENOSPC; 593 return -ENOSPC;
594} 594}
595 595
596int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
597 int tid, int frame_limit, u16 ssn)
598{
599 struct iwl_scd_txq_cfg_cmd cmd = {
600 .scd_queue = queue,
601 .enable = 1,
602 .window = frame_limit,
603 .sta_id = sta_id,
604 .ssn = cpu_to_le16(ssn),
605 .tx_fifo = fifo,
606 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
607 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
608 .tid = tid,
609 };
610 int ret;
611
612 spin_lock_bh(&mvm->queue_info_lock);
613 if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
614 "Trying to reconfig unallocated queue %d\n", queue)) {
615 spin_unlock_bh(&mvm->queue_info_lock);
616 return -ENXIO;
617 }
618 spin_unlock_bh(&mvm->queue_info_lock);
619
620 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
621
622 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
623 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
624 queue, fifo, ret);
625
626 return ret;
627}
628
596void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 629void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
597 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, 630 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
598 unsigned int wdg_timeout) 631 unsigned int wdg_timeout)
@@ -688,6 +721,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
688 mvm->queue_info[queue].hw_queue_refcount--; 721 mvm->queue_info[queue].hw_queue_refcount--;
689 722
690 cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; 723 cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
724 if (!cmd.enable)
725 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
691 726
692 IWL_DEBUG_TX_QUEUES(mvm, 727 IWL_DEBUG_TX_QUEUES(mvm,
693 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 728 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",