aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2015-01-24 05:14:48 -0500
committerKalle Valo <kvalo@qca.qualcomm.com>2015-01-27 08:55:58 -0500
commitc545070e404bfb20e5b72ae725332fe512e5d22c (patch)
treef99ece68c4b041fa671bfd649d013c9fa0cbbf7e /drivers/net/wireless/ath
parent8582bf3be70f35b6150da37eed9549a585498363 (diff)
ath10k: implement rx reorder support
New firmware and firmware (qca6174 hw3.0+ and fw 266+) are capable of full aggregation rx reordering. If it's enabled then Rx is handled via a new, separate htt event. The rx ring behaviour is changed a little to support the new rx scheme. These changes shouldn't affect qca988x performance. Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath')
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h79
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c393
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c11
5 files changed, 463 insertions, 36 deletions
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index a5465752a3ff..6860afbe68d9 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1061,6 +1061,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
1061 goto err_hif_stop; 1061 goto err_hif_stop;
1062 } 1062 }
1063 1063
1064 /* If firmware indicates Full Rx Reorder support it must be used in a
1065 * slightly different manner. Let HTT code know.
1066 */
1067 ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
1068 ar->wmi.svc_map));
1069
1070 status = ath10k_htt_rx_ring_refill(ar);
1071 if (status) {
1072 ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
1073 goto err_hif_stop;
1074 }
1075
1064 /* we don't care about HTT in UTF mode */ 1076 /* we don't care about HTT in UTF mode */
1065 if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { 1077 if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
1066 status = ath10k_htt_setup(&ar->htt); 1078 status = ath10k_htt_setup(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 739d9d69cf1c..774d8ceb4053 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -99,6 +99,7 @@ struct ath10k_skb_cb {
99 99
100struct ath10k_skb_rxcb { 100struct ath10k_skb_rxcb {
101 dma_addr_t paddr; 101 dma_addr_t paddr;
102 struct hlist_node hlist;
102}; 103};
103 104
104static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) 105static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -114,6 +115,9 @@ static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
114 return (struct ath10k_skb_rxcb *)skb->cb; 115 return (struct ath10k_skb_rxcb *)skb->cb;
115} 116}
116 117
118#define ATH10K_RXCB_SKB(rxcb) \
119 container_of((void *)rxcb, struct sk_buff, cb)
120
117static inline u32 host_interest_item_address(u32 item_offset) 121static inline u32 host_interest_item_address(u32 item_offset)
118{ 122{
119 return QCA988X_HOST_INTEREST_ADDRESS + item_offset; 123 return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 8809b37e1912..d1f6eb287a10 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/dmapool.h> 23#include <linux/dmapool.h>
24#include <linux/hashtable.h>
24#include <net/mac80211.h> 25#include <net/mac80211.h>
25 26
26#include "htc.h" 27#include "htc.h"
@@ -286,7 +287,19 @@ enum htt_t2h_msg_type {
286 HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, 287 HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
287 HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, 288 HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
288 HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, 289 HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
290 HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
291 HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
292 HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
293 HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
294 /* 0x13 reservd */
295 HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
296
297 /* FIXME: Do not depend on this event id. Numbering of this event id is
298 * broken across different firmware revisions and HTT version fails to
299 * indicate this.
300 */
289 HTT_T2H_MSG_TYPE_TEST, 301 HTT_T2H_MSG_TYPE_TEST,
302
290 /* keep this last */ 303 /* keep this last */
291 HTT_T2H_NUM_MSGS 304 HTT_T2H_NUM_MSGS
292}; 305};
@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication {
655#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 668#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
656#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 669#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
657 670
671struct htt_rx_pn_ind {
672 __le16 peer_id;
673 u8 tid;
674 u8 seqno_start;
675 u8 seqno_end;
676 u8 pn_ie_count;
677 u8 reserved;
678 u8 pn_ies[0];
679} __packed;
680
681struct htt_rx_offload_msdu {
682 __le16 msdu_len;
683 __le16 peer_id;
684 u8 vdev_id;
685 u8 tid;
686 u8 fw_desc;
687 u8 payload[0];
688} __packed;
689
690struct htt_rx_offload_ind {
691 u8 reserved;
692 __le16 msdu_count;
693} __packed;
694
695struct htt_rx_in_ord_msdu_desc {
696 __le32 msdu_paddr;
697 __le16 msdu_len;
698 u8 fw_desc;
699 u8 reserved;
700} __packed;
701
702struct htt_rx_in_ord_ind {
703 u8 info;
704 __le16 peer_id;
705 u8 vdev_id;
706 u8 reserved;
707 __le16 msdu_count;
708 struct htt_rx_in_ord_msdu_desc msdu_descs[0];
709} __packed;
710
711#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
712#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
713#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
714#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
715#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
716#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
717
658/* 718/*
659 * target -> host test message definition 719 * target -> host test message definition
660 * 720 *
@@ -1150,6 +1210,9 @@ struct htt_resp {
1150 struct htt_rx_test rx_test; 1210 struct htt_rx_test rx_test;
1151 struct htt_pktlog_msg pktlog_msg; 1211 struct htt_pktlog_msg pktlog_msg;
1152 struct htt_stats_conf stats_conf; 1212 struct htt_stats_conf stats_conf;
1213 struct htt_rx_pn_ind rx_pn_ind;
1214 struct htt_rx_offload_ind rx_offload_ind;
1215 struct htt_rx_in_ord_ind rx_in_ord_ind;
1153 }; 1216 };
1154} __packed; 1217} __packed;
1155 1218
@@ -1197,6 +1260,20 @@ struct ath10k_htt {
1197 * filled. 1260 * filled.
1198 */ 1261 */
1199 struct sk_buff **netbufs_ring; 1262 struct sk_buff **netbufs_ring;
1263
1264 /* This is used only with firmware supporting IN_ORD_IND.
1265 *
1266 * With Full Rx Reorder the HTT Rx Ring is more of a temporary
1267 * buffer ring from which buffer addresses are copied by the
1268 * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
1269 * pointing to specific (re-ordered) buffers.
1270 *
1271 * FIXME: With kernel generic hashing functions there's a lot
1272 * of hash collisions for sk_buffs.
1273 */
1274 bool in_ord_rx;
1275 DECLARE_HASHTABLE(skb_table, 4);
1276
1200 /* 1277 /*
1201 * Ring of buffer addresses - 1278 * Ring of buffer addresses -
1202 * This ring holds the "physical" device address of the 1279 * This ring holds the "physical" device address of the
@@ -1270,6 +1347,7 @@ struct ath10k_htt {
1270 struct tasklet_struct txrx_compl_task; 1347 struct tasklet_struct txrx_compl_task;
1271 struct sk_buff_head tx_compl_q; 1348 struct sk_buff_head tx_compl_q;
1272 struct sk_buff_head rx_compl_q; 1349 struct sk_buff_head rx_compl_q;
1350 struct sk_buff_head rx_in_ord_compl_q;
1273 1351
1274 /* rx_status template */ 1352 /* rx_status template */
1275 struct ieee80211_rx_status rx_status; 1353 struct ieee80211_rx_status rx_status;
@@ -1333,6 +1411,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
1333void ath10k_htt_tx_free(struct ath10k_htt *htt); 1411void ath10k_htt_tx_free(struct ath10k_htt *htt);
1334 1412
1335int ath10k_htt_rx_alloc(struct ath10k_htt *htt); 1413int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
1414int ath10k_htt_rx_ring_refill(struct ath10k *ar);
1336void ath10k_htt_rx_free(struct ath10k_htt *htt); 1415void ath10k_htt_rx_free(struct ath10k_htt *htt);
1337 1416
1338void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); 1417void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 2e55ed7241ae..661785fb9906 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -25,8 +25,8 @@
25 25
26#include <linux/log2.h> 26#include <linux/log2.h>
27 27
28#define HTT_RX_RING_SIZE 1024 28#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29#define HTT_RX_RING_FILL_LEVEL 1000 29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
30 30
31/* when under memory pressure rx ring refill may fail and needs a retry */ 31/* when under memory pressure rx ring refill may fail and needs a retry */
32#define HTT_RX_RING_REFILL_RETRY_MS 50 32#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -34,31 +34,70 @@
34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
35static void ath10k_htt_txrx_compl_task(unsigned long ptr); 35static void ath10k_htt_txrx_compl_task(unsigned long ptr);
36 36
37static struct sk_buff *
38ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
39{
40 struct ath10k_skb_rxcb *rxcb;
41
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
45
46 WARN_ON_ONCE(1);
47 return NULL;
48}
49
37static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 50static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
38{ 51{
39 struct sk_buff *skb; 52 struct sk_buff *skb;
40 struct ath10k_skb_rxcb *cb; 53 struct ath10k_skb_rxcb *rxcb;
54 struct hlist_node *n;
41 int i; 55 int i;
42 56
43 for (i = 0; i < htt->rx_ring.fill_cnt; i++) { 57 if (htt->rx_ring.in_ord_rx) {
44 skb = htt->rx_ring.netbufs_ring[i]; 58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
45 cb = ATH10K_SKB_RXCB(skb); 59 skb = ATH10K_RXCB_SKB(rxcb);
46 dma_unmap_single(htt->ar->dev, cb->paddr, 60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
47 skb->len + skb_tailroom(skb), 61 skb->len + skb_tailroom(skb),
48 DMA_FROM_DEVICE); 62 DMA_FROM_DEVICE);
49 dev_kfree_skb_any(skb); 63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
65 }
66 } else {
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
69 if (!skb)
70 continue;
71
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
75 DMA_FROM_DEVICE);
76 dev_kfree_skb_any(skb);
77 }
50 } 78 }
51 79
52 htt->rx_ring.fill_cnt = 0; 80 htt->rx_ring.fill_cnt = 0;
81 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
53} 84}
54 85
55static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 86static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
56{ 87{
57 struct htt_rx_desc *rx_desc; 88 struct htt_rx_desc *rx_desc;
89 struct ath10k_skb_rxcb *rxcb;
58 struct sk_buff *skb; 90 struct sk_buff *skb;
59 dma_addr_t paddr; 91 dma_addr_t paddr;
60 int ret = 0, idx; 92 int ret = 0, idx;
61 93
94 /* The Full Rx Reorder firmware has no way of telling the host
95 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
96 * To keep things simple make sure ring is always half empty. This
97 * guarantees there'll be no replenishment overruns possible.
98 */
99 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
100
62 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 101 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
63 while (num > 0) { 102 while (num > 0) {
64 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 103 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
@@ -86,11 +125,18 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
86 goto fail; 125 goto fail;
87 } 126 }
88 127
89 ATH10K_SKB_RXCB(skb)->paddr = paddr; 128 rxcb = ATH10K_SKB_RXCB(skb);
129 rxcb->paddr = paddr;
90 htt->rx_ring.netbufs_ring[idx] = skb; 130 htt->rx_ring.netbufs_ring[idx] = skb;
91 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); 131 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
92 htt->rx_ring.fill_cnt++; 132 htt->rx_ring.fill_cnt++;
93 133
134 if (htt->rx_ring.in_ord_rx) {
135 hash_add(htt->rx_ring.skb_table,
136 &ATH10K_SKB_RXCB(skb)->hlist,
137 (u32)paddr);
138 }
139
94 num--; 140 num--;
95 idx++; 141 idx++;
96 idx &= htt->rx_ring.size_mask; 142 idx &= htt->rx_ring.size_mask;
@@ -158,22 +204,20 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
158 ath10k_htt_rx_msdu_buff_replenish(htt); 204 ath10k_htt_rx_msdu_buff_replenish(htt);
159} 205}
160 206
161static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt) 207int ath10k_htt_rx_ring_refill(struct ath10k *ar)
162{ 208{
163 struct sk_buff *skb; 209 struct ath10k_htt *htt = &ar->htt;
164 int i; 210 int ret;
165 211
166 for (i = 0; i < htt->rx_ring.size; i++) { 212 spin_lock_bh(&htt->rx_ring.lock);
167 skb = htt->rx_ring.netbufs_ring[i]; 213 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
168 if (!skb) 214 htt->rx_ring.fill_cnt));
169 continue; 215 spin_unlock_bh(&htt->rx_ring.lock);
170 216
171 dma_unmap_single(htt->ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 217 if (ret)
172 skb->len + skb_tailroom(skb), 218 ath10k_htt_rx_ring_free(htt);
173 DMA_FROM_DEVICE); 219
174 dev_kfree_skb_any(skb); 220 return ret;
175 htt->rx_ring.netbufs_ring[i] = NULL;
176 }
177} 221}
178 222
179void ath10k_htt_rx_free(struct ath10k_htt *htt) 223void ath10k_htt_rx_free(struct ath10k_htt *htt)
@@ -184,8 +228,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
184 228
185 skb_queue_purge(&htt->tx_compl_q); 229 skb_queue_purge(&htt->tx_compl_q);
186 skb_queue_purge(&htt->rx_compl_q); 230 skb_queue_purge(&htt->rx_compl_q);
231 skb_queue_purge(&htt->rx_in_ord_compl_q);
187 232
188 ath10k_htt_rx_ring_clean_up(htt); 233 ath10k_htt_rx_ring_free(htt);
189 234
190 dma_free_coherent(htt->ar->dev, 235 dma_free_coherent(htt->ar->dev,
191 (htt->rx_ring.size * 236 (htt->rx_ring.size *
@@ -217,6 +262,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
217 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 262 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
218 msdu = htt->rx_ring.netbufs_ring[idx]; 263 msdu = htt->rx_ring.netbufs_ring[idx];
219 htt->rx_ring.netbufs_ring[idx] = NULL; 264 htt->rx_ring.netbufs_ring[idx] = NULL;
265 htt->rx_ring.paddrs_ring[idx] = 0;
220 266
221 idx++; 267 idx++;
222 idx &= htt->rx_ring.size_mask; 268 idx &= htt->rx_ring.size_mask;
@@ -384,6 +430,82 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
384 ath10k_htt_rx_msdu_buff_replenish(htt); 430 ath10k_htt_rx_msdu_buff_replenish(htt);
385} 431}
386 432
433static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
434 u32 paddr)
435{
436 struct ath10k *ar = htt->ar;
437 struct ath10k_skb_rxcb *rxcb;
438 struct sk_buff *msdu;
439
440 lockdep_assert_held(&htt->rx_ring.lock);
441
442 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
443 if (!msdu)
444 return NULL;
445
446 rxcb = ATH10K_SKB_RXCB(msdu);
447 hash_del(&rxcb->hlist);
448 htt->rx_ring.fill_cnt--;
449
450 dma_unmap_single(htt->ar->dev, rxcb->paddr,
451 msdu->len + skb_tailroom(msdu),
452 DMA_FROM_DEVICE);
453 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
454 msdu->data, msdu->len + skb_tailroom(msdu));
455
456 return msdu;
457}
458
459static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
460 struct htt_rx_in_ord_ind *ev,
461 struct sk_buff_head *list)
462{
463 struct ath10k *ar = htt->ar;
464 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
465 struct htt_rx_desc *rxd;
466 struct sk_buff *msdu;
467 int msdu_count;
468 bool is_offload;
469 u32 paddr;
470
471 lockdep_assert_held(&htt->rx_ring.lock);
472
473 msdu_count = __le16_to_cpu(ev->msdu_count);
474 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
475
476 while (msdu_count--) {
477 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
478
479 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
480 if (!msdu) {
481 __skb_queue_purge(list);
482 return -ENOENT;
483 }
484
485 __skb_queue_tail(list, msdu);
486
487 if (!is_offload) {
488 rxd = (void *)msdu->data;
489
490 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
491
492 skb_put(msdu, sizeof(*rxd));
493 skb_pull(msdu, sizeof(*rxd));
494 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
495
496 if (!(__le32_to_cpu(rxd->attention.flags) &
497 RX_ATTENTION_FLAGS_MSDU_DONE)) {
498 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
499 return -EIO;
500 }
501 }
502
503 msdu_desc++;
504 }
505
506 return 0;
507}
508
387int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 509int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
388{ 510{
389 struct ath10k *ar = htt->ar; 511 struct ath10k *ar = htt->ar;
@@ -429,7 +551,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
429 551
430 htt->rx_ring.alloc_idx.vaddr = vaddr; 552 htt->rx_ring.alloc_idx.vaddr = vaddr;
431 htt->rx_ring.alloc_idx.paddr = paddr; 553 htt->rx_ring.alloc_idx.paddr = paddr;
432 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 554 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
433 *htt->rx_ring.alloc_idx.vaddr = 0; 555 *htt->rx_ring.alloc_idx.vaddr = 0;
434 556
435 /* Initialize the Rx refill retry timer */ 557 /* Initialize the Rx refill retry timer */
@@ -438,14 +560,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
438 spin_lock_init(&htt->rx_ring.lock); 560 spin_lock_init(&htt->rx_ring.lock);
439 561
440 htt->rx_ring.fill_cnt = 0; 562 htt->rx_ring.fill_cnt = 0;
441 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) 563 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
442 goto err_fill_ring; 564 hash_init(htt->rx_ring.skb_table);
443 565
444 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, 566 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
445 (unsigned long)htt); 567 (unsigned long)htt);
446 568
447 skb_queue_head_init(&htt->tx_compl_q); 569 skb_queue_head_init(&htt->tx_compl_q);
448 skb_queue_head_init(&htt->rx_compl_q); 570 skb_queue_head_init(&htt->rx_compl_q);
571 skb_queue_head_init(&htt->rx_in_ord_compl_q);
449 572
450 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, 573 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
451 (unsigned long)htt); 574 (unsigned long)htt);
@@ -454,12 +577,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
454 htt->rx_ring.size, htt->rx_ring.fill_level); 577 htt->rx_ring.size, htt->rx_ring.fill_level);
455 return 0; 578 return 0;
456 579
457err_fill_ring:
458 ath10k_htt_rx_ring_free(htt);
459 dma_free_coherent(htt->ar->dev,
460 sizeof(*htt->rx_ring.alloc_idx.vaddr),
461 htt->rx_ring.alloc_idx.vaddr,
462 htt->rx_ring.alloc_idx.paddr);
463err_dma_idx: 580err_dma_idx:
464 dma_free_coherent(htt->ar->dev, 581 dma_free_coherent(htt->ar->dev,
465 (htt->rx_ring.size * 582 (htt->rx_ring.size *
@@ -1583,6 +1700,194 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1583 spin_unlock_bh(&ar->data_lock); 1700 spin_unlock_bh(&ar->data_lock);
1584} 1701}
1585 1702
1703static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1704 struct sk_buff_head *amsdu)
1705{
1706 struct sk_buff *msdu;
1707 struct htt_rx_desc *rxd;
1708
1709 if (skb_queue_empty(list))
1710 return -ENOBUFS;
1711
1712 if (WARN_ON(!skb_queue_empty(amsdu)))
1713 return -EINVAL;
1714
1715 while ((msdu = __skb_dequeue(list))) {
1716 __skb_queue_tail(amsdu, msdu);
1717
1718 rxd = (void *)msdu->data - sizeof(*rxd);
1719 if (rxd->msdu_end.info0 &
1720 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1721 break;
1722 }
1723
1724 msdu = skb_peek_tail(amsdu);
1725 rxd = (void *)msdu->data - sizeof(*rxd);
1726 if (!(rxd->msdu_end.info0 &
1727 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1728 skb_queue_splice_init(amsdu, list);
1729 return -EAGAIN;
1730 }
1731
1732 return 0;
1733}
1734
1735static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1736 struct sk_buff *skb)
1737{
1738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1739
1740 if (!ieee80211_has_protected(hdr->frame_control))
1741 return;
1742
1743 /* Offloaded frames are already decrypted but firmware insists they are
1744 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1745 * will drop the frame.
1746 */
1747
1748 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1749 status->flag |= RX_FLAG_DECRYPTED |
1750 RX_FLAG_IV_STRIPPED |
1751 RX_FLAG_MMIC_STRIPPED;
1752}
1753
1754static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1755 struct sk_buff_head *list)
1756{
1757 struct ath10k_htt *htt = &ar->htt;
1758 struct ieee80211_rx_status *status = &htt->rx_status;
1759 struct htt_rx_offload_msdu *rx;
1760 struct sk_buff *msdu;
1761 size_t offset;
1762
1763 while ((msdu = __skb_dequeue(list))) {
1764 /* Offloaded frames don't have Rx descriptor. Instead they have
1765 * a short meta information header.
1766 */
1767
1768 rx = (void *)msdu->data;
1769
1770 skb_put(msdu, sizeof(*rx));
1771 skb_pull(msdu, sizeof(*rx));
1772
1773 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1774 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1775 dev_kfree_skb_any(msdu);
1776 continue;
1777 }
1778
1779 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1780
1781 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1782 * actual payload is unaligned. Align the frame. Otherwise
1783 * mac80211 complains. This shouldn't reduce performance much
1784 * because these offloaded frames are rare.
1785 */
1786 offset = 4 - ((unsigned long)msdu->data & 3);
1787 skb_put(msdu, offset);
1788 memmove(msdu->data + offset, msdu->data, msdu->len);
1789 skb_pull(msdu, offset);
1790
1791 /* FIXME: The frame is NWifi. Re-construct QoS Control
1792 * if possible later.
1793 */
1794
1795 memset(status, 0, sizeof(*status));
1796 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1797
1798 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1799 ath10k_htt_rx_h_channel(ar, status);
1800 ath10k_process_rx(ar, status, msdu);
1801 }
1802}
1803
1804static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1805{
1806 struct ath10k_htt *htt = &ar->htt;
1807 struct htt_resp *resp = (void *)skb->data;
1808 struct ieee80211_rx_status *status = &htt->rx_status;
1809 struct sk_buff_head list;
1810 struct sk_buff_head amsdu;
1811 u16 peer_id;
1812 u16 msdu_count;
1813 u8 vdev_id;
1814 u8 tid;
1815 bool offload;
1816 bool frag;
1817 int ret;
1818
1819 lockdep_assert_held(&htt->rx_ring.lock);
1820
1821 if (htt->rx_confused)
1822 return;
1823
1824 skb_pull(skb, sizeof(resp->hdr));
1825 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1826
1827 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1828 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1829 vdev_id = resp->rx_in_ord_ind.vdev_id;
1830 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1831 offload = !!(resp->rx_in_ord_ind.info &
1832 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1833 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1834
1835 ath10k_dbg(ar, ATH10K_DBG_HTT,
1836 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1837 vdev_id, peer_id, tid, offload, frag, msdu_count);
1838
1839 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1840 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1841 return;
1842 }
1843
1844 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1845 * extracted and processed.
1846 */
1847 __skb_queue_head_init(&list);
1848 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1849 if (ret < 0) {
1850 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1851 htt->rx_confused = true;
1852 return;
1853 }
1854
1855 /* Offloaded frames are very different and need to be handled
1856 * separately.
1857 */
1858 if (offload)
1859 ath10k_htt_rx_h_rx_offload(ar, &list);
1860
1861 while (!skb_queue_empty(&list)) {
1862 __skb_queue_head_init(&amsdu);
1863 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1864 switch (ret) {
1865 case 0:
1866 /* Note: The in-order indication may report interleaved
1867 * frames from different PPDUs meaning reported rx rate
1868 * to mac80211 isn't accurate/reliable. It's still
1869 * better to report something than nothing though. This
1870 * should still give an idea about rx rate to the user.
1871 */
1872 ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
1873 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1874 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1875 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1876 break;
1877 case -EAGAIN:
1878 /* fall through */
1879 default:
1880 /* Should not happen. */
1881 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1882 htt->rx_confused = true;
1883 __skb_queue_purge(&list);
1884 return;
1885 }
1886 }
1887
1888 tasklet_schedule(&htt->rx_replenish_task);
1889}
1890
1586void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 1891void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1587{ 1892{
1588 struct ath10k_htt *htt = &ar->htt; 1893 struct ath10k_htt *htt = &ar->htt;
@@ -1705,6 +2010,20 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1705 */ 2010 */
1706 break; 2011 break;
1707 } 2012 }
2013 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2014 spin_lock_bh(&htt->rx_ring.lock);
2015 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2016 spin_unlock_bh(&htt->rx_ring.lock);
2017 tasklet_schedule(&htt->txrx_compl_task);
2018 return;
2019 }
2020 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2021 /* FIXME: This WMI-TLV event is overlapping with 10.2
2022 * CHAN_CHANGE - both being 0xF. Neither is being used in
2023 * practice so no immediate action is necessary. Nevertheless
2024 * HTT may need an abstraction layer like WMI has one day.
2025 */
2026 break;
1708 default: 2027 default:
1709 ath10k_warn(ar, "htt event (%d) not handled\n", 2028 ath10k_warn(ar, "htt event (%d) not handled\n",
1710 resp->hdr.msg_type); 2029 resp->hdr.msg_type);
@@ -1720,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1720static void ath10k_htt_txrx_compl_task(unsigned long ptr) 2039static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1721{ 2040{
1722 struct ath10k_htt *htt = (struct ath10k_htt *)ptr; 2041 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2042 struct ath10k *ar = htt->ar;
1723 struct htt_resp *resp; 2043 struct htt_resp *resp;
1724 struct sk_buff *skb; 2044 struct sk_buff *skb;
1725 2045
@@ -1736,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1736 ath10k_htt_rx_handler(htt, &resp->rx_ind); 2056 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1737 dev_kfree_skb_any(skb); 2057 dev_kfree_skb_any(skb);
1738 } 2058 }
2059
2060 while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
2061 ath10k_htt_rx_in_ord_ind(ar, skb);
2062 dev_kfree_skb_any(skb);
2063 }
1739 spin_unlock_bh(&htt->rx_ring.lock); 2064 spin_unlock_bh(&htt->rx_ring.lock);
1740} 2065}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index a6c634a585dd..35f519123752 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1052,8 +1052,15 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1052 1052
1053 cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1053 cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1054 cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); 1054 cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1055 cfg->num_offload_peers = __cpu_to_le32(0); 1055
1056 cfg->num_offload_reorder_bufs = __cpu_to_le32(0); 1056 if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1057 cfg->num_offload_peers = __cpu_to_le32(3);
1058 cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
1059 } else {
1060 cfg->num_offload_peers = __cpu_to_le32(0);
1061 cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1062 }
1063
1057 cfg->num_peer_keys = __cpu_to_le32(2); 1064 cfg->num_peer_keys = __cpu_to_le32(2);
1058 cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); 1065 cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1059 cfg->ast_skid_limit = __cpu_to_le32(0x10); 1066 cfg->ast_skid_limit = __cpu_to_le32(0x10);