aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/htt_tx.c
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2016-03-06 09:14:43 -0500
committerKalle Valo <kvalo@qca.qualcomm.com>2016-03-06 09:31:12 -0500
commit426e10eaf76d7229ed6c2978f0d473d04ba0b377 (patch)
tree61e542d7a2334099f00092a69e5a8ed38a61d985 /drivers/net/wireless/ath/ath10k/htt_tx.c
parent3cc0fef6170dce8e7d4ec29afb4f34267fb9bf14 (diff)
ath10k: implement push-pull tx
The current/old tx path design was that host, at its own leisure, pushed tx frames to the device. For HTT there was ~1000-1400 msdu queue depth. After reaching that limit the driver would request mac80211 to stop queues. There was little control over what packets got in there as far as DA/RA was considered so it was rather easy to starve per-station traffic flows. With MU-MIMO this became a significant problem because the queue depth was insufficient to buffer frames from multiple clients (which could have different signal quality and capabilities) in an efficient fashion. Hence the new tx path in 10.4 was introduced: a pull-push mode. Firmware and host can share tx queue state via DMA. The state is logically a 2 dimensional array addressed via peer_id+tid pair. Each entry is a counter (either number of bytes or packets. Host keeps it updated and firmware uses it for scheduling Tx pull requests to host. This allows MU-MIMO to become a lot more effective with 10+ clients. Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/htt_tx.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c39
1 files changed, 29 insertions, 10 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 6643be8692b5..a30c34eae0a7 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -64,6 +64,9 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
64 if (!ar->htt.tx_q_state.enabled) 64 if (!ar->htt.tx_q_state.enabled)
65 return; 65 return;
66 66
67 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
68 return;
69
67 if (txq->sta) 70 if (txq->sta)
68 peer_id = arsta->peer_id; 71 peer_id = arsta->peer_id;
69 else 72 else
@@ -101,6 +104,9 @@ static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
101 if (!ar->htt.tx_q_state.enabled) 104 if (!ar->htt.tx_q_state.enabled)
102 return; 105 return;
103 106
107 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
108 return;
109
104 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); 110 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
105 seq++; 111 seq++;
106 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); 112 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
@@ -115,6 +121,23 @@ static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
115 DMA_TO_DEVICE); 121 DMA_TO_DEVICE);
116} 122}
117 123
124void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
125 struct ieee80211_txq *txq)
126{
127 struct ath10k *ar = hw->priv;
128
129 spin_lock_bh(&ar->htt.tx_lock);
130 __ath10k_htt_tx_txq_recalc(hw, txq);
131 spin_unlock_bh(&ar->htt.tx_lock);
132}
133
134void ath10k_htt_tx_txq_sync(struct ath10k *ar)
135{
136 spin_lock_bh(&ar->htt.tx_lock);
137 __ath10k_htt_tx_txq_sync(ar);
138 spin_unlock_bh(&ar->htt.tx_lock);
139}
140
118void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, 141void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
119 struct ieee80211_txq *txq) 142 struct ieee80211_txq *txq)
120{ 143{
@@ -638,10 +661,14 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
638{ 661{
639 struct sk_buff *skb; 662 struct sk_buff *skb;
640 struct htt_cmd *cmd; 663 struct htt_cmd *cmd;
641 u16 resp_id; 664 const u16 resp_id = 0;
642 int len = 0; 665 int len = 0;
643 int ret; 666 int ret;
644 667
668 /* Response IDs are echo-ed back only for host driver convienence
669 * purposes. They aren't used for anything in the driver yet so use 0.
670 */
671
645 len += sizeof(cmd->hdr); 672 len += sizeof(cmd->hdr);
646 len += sizeof(cmd->tx_fetch_resp); 673 len += sizeof(cmd->tx_fetch_resp);
647 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; 674 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
@@ -650,11 +677,6 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
650 if (!skb) 677 if (!skb)
651 return -ENOMEM; 678 return -ENOMEM;
652 679
653 resp_id = 0; /* TODO: allocate resp_id */
654 ret = 0;
655 if (ret)
656 goto err_free_skb;
657
658 skb_put(skb, len); 680 skb_put(skb, len);
659 cmd = (struct htt_cmd *)skb->data; 681 cmd = (struct htt_cmd *)skb->data;
660 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; 682 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
@@ -669,14 +691,11 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
669 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); 691 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
670 if (ret) { 692 if (ret) {
671 ath10k_warn(ar, "failed to submit htc command: %d\n", ret); 693 ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
672 goto err_free_resp_id; 694 goto err_free_skb;
673 } 695 }
674 696
675 return 0; 697 return 0;
676 698
677err_free_resp_id:
678 (void)resp_id; /* TODO: free resp_id */
679
680err_free_skb: 699err_free_skb:
681 dev_kfree_skb_any(skb); 700 dev_kfree_skb_any(skb);
682 701