aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k')
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c236
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c205
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c268
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c389
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c18
13 files changed, 638 insertions, 657 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index d44d618b05f9..a79499c82350 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -266,12 +266,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
266 * ath10k_ce_sendlist_send. 266 * ath10k_ce_sendlist_send.
267 * The caller takes responsibility for any needed locking. 267 * The caller takes responsibility for any needed locking.
268 */ 268 */
269static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, 269int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
270 void *per_transfer_context, 270 void *per_transfer_context,
271 u32 buffer, 271 u32 buffer,
272 unsigned int nbytes, 272 unsigned int nbytes,
273 unsigned int transfer_id, 273 unsigned int transfer_id,
274 unsigned int flags) 274 unsigned int flags)
275{ 275{
276 struct ath10k *ar = ce_state->ar; 276 struct ath10k *ar = ce_state->ar;
277 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 277 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -1067,9 +1067,9 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
1067 * 1067 *
1068 * For the lack of a better place do the check here. 1068 * For the lack of a better place do the check here.
1069 */ 1069 */
1070 BUILD_BUG_ON(TARGET_NUM_MSDU_DESC > 1070 BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
1071 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1071 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1072 BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC > 1072 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
1073 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1073 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1074 1074
1075 ret = ath10k_pci_wake(ar); 1075 ret = ath10k_pci_wake(ar);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 67dbde6a5c74..8eb7f99ed992 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -23,7 +23,7 @@
23 23
24/* Maximum number of Copy Engine's supported */ 24/* Maximum number of Copy Engine's supported */
25#define CE_COUNT_MAX 8 25#define CE_COUNT_MAX 8
26#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 26#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
27 27
28/* Descriptor rings must be aligned to this boundary */ 28/* Descriptor rings must be aligned to this boundary */
29#define CE_DESC_RING_ALIGN 8 29#define CE_DESC_RING_ALIGN 8
@@ -152,6 +152,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
152 unsigned int transfer_id, 152 unsigned int transfer_id,
153 unsigned int flags); 153 unsigned int flags);
154 154
155int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
156 void *per_transfer_context,
157 u32 buffer,
158 unsigned int nbytes,
159 unsigned int transfer_id,
160 unsigned int flags);
161
155void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, 162void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
156 void (*send_cb)(struct ath10k_ce_pipe *), 163 void (*send_cb)(struct ath10k_ce_pipe *),
157 int disable_interrupts); 164 int disable_interrupts);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 1fc26fe057e8..0e71979d837c 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -62,16 +62,13 @@ struct ath10k;
62 62
63struct ath10k_skb_cb { 63struct ath10k_skb_cb {
64 dma_addr_t paddr; 64 dma_addr_t paddr;
65 bool is_mapped;
66 bool is_aborted;
67 u8 vdev_id; 65 u8 vdev_id;
68 66
69 struct { 67 struct {
70 u8 tid; 68 u8 tid;
71 bool is_offchan; 69 bool is_offchan;
72 70 struct ath10k_htt_txbuf *txbuf;
73 u8 frag_len; 71 u32 txbuf_paddr;
74 u8 pad_len;
75 } __packed htt; 72 } __packed htt;
76 73
77 struct { 74 struct {
@@ -87,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
87 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; 84 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
88} 85}
89 86
90static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
91{
92 if (ATH10K_SKB_CB(skb)->is_mapped)
93 return -EINVAL;
94
95 ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
96 DMA_TO_DEVICE);
97
98 if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
99 return -EIO;
100
101 ATH10K_SKB_CB(skb)->is_mapped = true;
102 return 0;
103}
104
105static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
106{
107 if (!ATH10K_SKB_CB(skb)->is_mapped)
108 return -EINVAL;
109
110 dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
111 DMA_TO_DEVICE);
112 ATH10K_SKB_CB(skb)->is_mapped = false;
113 return 0;
114}
115
116static inline u32 host_interest_item_address(u32 item_offset) 87static inline u32 host_interest_item_address(u32 item_offset)
117{ 88{
118 return QCA988X_HOST_INTEREST_ADDRESS + item_offset; 89 return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@@ -288,6 +259,7 @@ struct ath10k_vif {
288 259
289 u8 fixed_rate; 260 u8 fixed_rate;
290 u8 fixed_nss; 261 u8 fixed_nss;
262 u8 force_sgi;
291}; 263};
292 264
293struct ath10k_vif_iter { 265struct ath10k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index dcdea68bcc0a..2ac7beacddca 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -21,6 +21,14 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include "core.h" 22#include "core.h"
23 23
24struct ath10k_hif_sg_item {
25 u16 transfer_id;
26 void *transfer_context; /* NULL = tx completion callback not called */
27 void *vaddr; /* for debugging mostly */
28 u32 paddr;
29 u16 len;
30};
31
24struct ath10k_hif_cb { 32struct ath10k_hif_cb {
25 int (*tx_completion)(struct ath10k *ar, 33 int (*tx_completion)(struct ath10k *ar,
26 struct sk_buff *wbuf, 34 struct sk_buff *wbuf,
@@ -31,11 +39,9 @@ struct ath10k_hif_cb {
31}; 39};
32 40
33struct ath10k_hif_ops { 41struct ath10k_hif_ops {
34 /* Send the head of a buffer to HIF for transmission to the target. */ 42 /* send a scatter-gather list to the target */
35 int (*send_head)(struct ath10k *ar, u8 pipe_id, 43 int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
36 unsigned int transfer_id, 44 struct ath10k_hif_sg_item *items, int n_items);
37 unsigned int nbytes,
38 struct sk_buff *buf);
39 45
40 /* 46 /*
41 * API to handle HIF-specific BMI message exchanges, this API is 47 * API to handle HIF-specific BMI message exchanges, this API is
@@ -86,12 +92,11 @@ struct ath10k_hif_ops {
86}; 92};
87 93
88 94
89static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id, 95static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
90 unsigned int transfer_id, 96 struct ath10k_hif_sg_item *items,
91 unsigned int nbytes, 97 int n_items)
92 struct sk_buff *buf)
93{ 98{
94 return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf); 99 return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
95} 100}
96 101
97static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, 102static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index edc57ab505c8..7f1bccd3597f 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
63static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, 63static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
64 struct sk_buff *skb) 64 struct sk_buff *skb)
65{ 65{
66 ath10k_skb_unmap(htc->ar->dev, skb); 66 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
67
68 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
67 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 69 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
68} 70}
69 71
@@ -122,6 +124,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
122 struct sk_buff *skb) 124 struct sk_buff *skb)
123{ 125{
124 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 126 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
127 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
128 struct ath10k_hif_sg_item sg_item;
129 struct device *dev = htc->ar->dev;
125 int credits = 0; 130 int credits = 0;
126 int ret; 131 int ret;
127 132
@@ -157,19 +162,25 @@ int ath10k_htc_send(struct ath10k_htc *htc,
157 162
158 ath10k_htc_prepare_tx_skb(ep, skb); 163 ath10k_htc_prepare_tx_skb(ep, skb);
159 164
160 ret = ath10k_skb_map(htc->ar->dev, skb); 165 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
166 ret = dma_mapping_error(dev, skb_cb->paddr);
161 if (ret) 167 if (ret)
162 goto err_credits; 168 goto err_credits;
163 169
164 ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid, 170 sg_item.transfer_id = ep->eid;
165 skb->len, skb); 171 sg_item.transfer_context = skb;
172 sg_item.vaddr = skb->data;
173 sg_item.paddr = skb_cb->paddr;
174 sg_item.len = skb->len;
175
176 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
166 if (ret) 177 if (ret)
167 goto err_unmap; 178 goto err_unmap;
168 179
169 return 0; 180 return 0;
170 181
171err_unmap: 182err_unmap:
172 ath10k_skb_unmap(htc->ar->dev, skb); 183 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
173err_credits: 184err_credits:
174 if (ep->tx_credit_flow_enabled) { 185 if (ep->tx_credit_flow_enabled) {
175 spin_lock_bh(&htc->tx_lock); 186 spin_lock_bh(&htc->tx_lock);
@@ -191,10 +202,8 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
191 struct ath10k_htc *htc = &ar->htc; 202 struct ath10k_htc *htc = &ar->htc;
192 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 203 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
193 204
194 if (!skb) { 205 if (WARN_ON_ONCE(!skb))
195 ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
196 return 0; 206 return 0;
197 }
198 207
199 ath10k_htc_notify_tx_completion(ep, skb); 208 ath10k_htc_notify_tx_completion(ep, skb);
200 /* the skb now belongs to the completion handler */ 209 /* the skb now belongs to the completion handler */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index b93ae355bc08..654867fc1ae7 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -20,6 +20,7 @@
20 20
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/dmapool.h>
23 24
24#include "htc.h" 25#include "htc.h"
25#include "rx_desc.h" 26#include "rx_desc.h"
@@ -1181,11 +1182,20 @@ struct htt_rx_info {
1181 u32 info1; 1182 u32 info1;
1182 u32 info2; 1183 u32 info2;
1183 } rate; 1184 } rate;
1185
1186 u32 tsf;
1184 bool fcs_err; 1187 bool fcs_err;
1185 bool amsdu_more; 1188 bool amsdu_more;
1186 bool mic_err; 1189 bool mic_err;
1187}; 1190};
1188 1191
1192struct ath10k_htt_txbuf {
1193 struct htt_data_tx_desc_frag frags[2];
1194 struct ath10k_htc_hdr htc_hdr;
1195 struct htt_cmd_hdr cmd_hdr;
1196 struct htt_data_tx_desc cmd_tx;
1197} __packed;
1198
1189struct ath10k_htt { 1199struct ath10k_htt {
1190 struct ath10k *ar; 1200 struct ath10k *ar;
1191 enum ath10k_htc_ep_id eid; 1201 enum ath10k_htc_ep_id eid;
@@ -1267,11 +1277,18 @@ struct ath10k_htt {
1267 struct sk_buff **pending_tx; 1277 struct sk_buff **pending_tx;
1268 unsigned long *used_msdu_ids; /* bitmap */ 1278 unsigned long *used_msdu_ids; /* bitmap */
1269 wait_queue_head_t empty_tx_wq; 1279 wait_queue_head_t empty_tx_wq;
1280 struct dma_pool *tx_pool;
1270 1281
1271 /* set if host-fw communication goes haywire 1282 /* set if host-fw communication goes haywire
1272 * used to avoid further failures */ 1283 * used to avoid further failures */
1273 bool rx_confused; 1284 bool rx_confused;
1274 struct tasklet_struct rx_replenish_task; 1285 struct tasklet_struct rx_replenish_task;
1286
1287 /* This is used to group tx/rx completions separately and process them
1288 * in batches to reduce cache stalls */
1289 struct tasklet_struct txrx_compl_task;
1290 struct sk_buff_head tx_compl_q;
1291 struct sk_buff_head rx_compl_q;
1275}; 1292};
1276 1293
1277#define RX_HTT_HDR_STATUS_LEN 64 1294#define RX_HTT_HDR_STATUS_LEN 64
@@ -1343,4 +1360,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
1343void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); 1360void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
1344int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); 1361int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
1345int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); 1362int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
1363
1346#endif 1364#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 4767c24bf819..cdcbe2de95f9 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -43,7 +43,7 @@
43 43
44 44
45static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 45static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
46 46static void ath10k_htt_txrx_compl_task(unsigned long ptr);
47 47
48static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) 48static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
49{ 49{
@@ -225,18 +225,16 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
225 ath10k_htt_rx_msdu_buff_replenish(htt); 225 ath10k_htt_rx_msdu_buff_replenish(htt);
226} 226}
227 227
228static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
229{
230 return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
231 htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
232}
233
234void ath10k_htt_rx_detach(struct ath10k_htt *htt) 228void ath10k_htt_rx_detach(struct ath10k_htt *htt)
235{ 229{
236 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; 230 int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
237 231
238 del_timer_sync(&htt->rx_ring.refill_retry_timer); 232 del_timer_sync(&htt->rx_ring.refill_retry_timer);
239 tasklet_kill(&htt->rx_replenish_task); 233 tasklet_kill(&htt->rx_replenish_task);
234 tasklet_kill(&htt->txrx_compl_task);
235
236 skb_queue_purge(&htt->tx_compl_q);
237 skb_queue_purge(&htt->rx_compl_q);
240 238
241 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { 239 while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
242 struct sk_buff *skb = 240 struct sk_buff *skb =
@@ -270,10 +268,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
270 int idx; 268 int idx;
271 struct sk_buff *msdu; 269 struct sk_buff *msdu;
272 270
273 spin_lock_bh(&htt->rx_ring.lock); 271 lockdep_assert_held(&htt->rx_ring.lock);
274 272
275 if (ath10k_htt_rx_ring_elems(htt) == 0) 273 if (htt->rx_ring.fill_cnt == 0) {
276 ath10k_warn("htt rx ring is empty!\n"); 274 ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
275 return NULL;
276 }
277 277
278 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 278 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
279 msdu = htt->rx_ring.netbufs_ring[idx]; 279 msdu = htt->rx_ring.netbufs_ring[idx];
@@ -283,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
283 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 283 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
284 htt->rx_ring.fill_cnt--; 284 htt->rx_ring.fill_cnt--;
285 285
286 spin_unlock_bh(&htt->rx_ring.lock);
287 return msdu; 286 return msdu;
288} 287}
289 288
@@ -307,8 +306,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
307 struct sk_buff *msdu; 306 struct sk_buff *msdu;
308 struct htt_rx_desc *rx_desc; 307 struct htt_rx_desc *rx_desc;
309 308
310 if (ath10k_htt_rx_ring_elems(htt) == 0) 309 lockdep_assert_held(&htt->rx_ring.lock);
311 ath10k_warn("htt rx ring is empty!\n");
312 310
313 if (htt->rx_confused) { 311 if (htt->rx_confused) {
314 ath10k_warn("htt is confused. refusing rx\n"); 312 ath10k_warn("htt is confused. refusing rx\n");
@@ -400,6 +398,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
400 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), 398 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
401 RX_MSDU_START_INFO0_MSDU_LENGTH); 399 RX_MSDU_START_INFO0_MSDU_LENGTH);
402 msdu_chained = rx_desc->frag_info.ring2_more_count; 400 msdu_chained = rx_desc->frag_info.ring2_more_count;
401 msdu_chaining = msdu_chained;
403 402
404 if (msdu_len_invalid) 403 if (msdu_len_invalid)
405 msdu_len = 0; 404 msdu_len = 0;
@@ -427,7 +426,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
427 426
428 msdu->next = next; 427 msdu->next = next;
429 msdu = next; 428 msdu = next;
430 msdu_chaining = 1;
431 } 429 }
432 430
433 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 431 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
@@ -529,6 +527,12 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
529 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, 527 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
530 (unsigned long)htt); 528 (unsigned long)htt);
531 529
530 skb_queue_head_init(&htt->tx_compl_q);
531 skb_queue_head_init(&htt->rx_compl_q);
532
533 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
534 (unsigned long)htt);
535
532 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 536 ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
533 htt->rx_ring.size, htt->rx_ring.fill_level); 537 htt->rx_ring.size, htt->rx_ring.fill_level);
534 return 0; 538 return 0;
@@ -632,6 +636,12 @@ struct amsdu_subframe_hdr {
632 __be16 len; 636 __be16 len;
633} __packed; 637} __packed;
634 638
639static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
640{
641 /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
642 return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
643}
644
635static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, 645static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
636 struct htt_rx_info *info) 646 struct htt_rx_info *info)
637{ 647{
@@ -681,7 +691,7 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
681 case RX_MSDU_DECAP_NATIVE_WIFI: 691 case RX_MSDU_DECAP_NATIVE_WIFI:
682 /* pull decapped header and copy DA */ 692 /* pull decapped header and copy DA */
683 hdr = (struct ieee80211_hdr *)skb->data; 693 hdr = (struct ieee80211_hdr *)skb->data;
684 hdr_len = ieee80211_hdrlen(hdr->frame_control); 694 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
685 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); 695 memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
686 skb_pull(skb, hdr_len); 696 skb_pull(skb, hdr_len);
687 697
@@ -768,7 +778,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
768 case RX_MSDU_DECAP_NATIVE_WIFI: 778 case RX_MSDU_DECAP_NATIVE_WIFI:
769 /* Pull decapped header */ 779 /* Pull decapped header */
770 hdr = (struct ieee80211_hdr *)skb->data; 780 hdr = (struct ieee80211_hdr *)skb->data;
771 hdr_len = ieee80211_hdrlen(hdr->frame_control); 781 hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
772 skb_pull(skb, hdr_len); 782 skb_pull(skb, hdr_len);
773 783
774 /* Push original header */ 784 /* Push original header */
@@ -846,6 +856,20 @@ static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
846 return false; 856 return false;
847} 857}
848 858
859static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
860{
861 struct htt_rx_desc *rxd;
862 u32 flags;
863
864 rxd = (void *)skb->data - sizeof(*rxd);
865 flags = __le32_to_cpu(rxd->attention.flags);
866
867 if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
868 return true;
869
870 return false;
871}
872
849static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) 873static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
850{ 874{
851 struct htt_rx_desc *rxd; 875 struct htt_rx_desc *rxd;
@@ -877,6 +901,57 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
877 return CHECKSUM_UNNECESSARY; 901 return CHECKSUM_UNNECESSARY;
878} 902}
879 903
904static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
905{
906 struct sk_buff *next = msdu_head->next;
907 struct sk_buff *to_free = next;
908 int space;
909 int total_len = 0;
910
911 /* TODO: Might could optimize this by using
912 * skb_try_coalesce or similar method to
913 * decrease copying, or maybe get mac80211 to
914 * provide a way to just receive a list of
915 * skb?
916 */
917
918 msdu_head->next = NULL;
919
920 /* Allocate total length all at once. */
921 while (next) {
922 total_len += next->len;
923 next = next->next;
924 }
925
926 space = total_len - skb_tailroom(msdu_head);
927 if ((space > 0) &&
928 (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
929 /* TODO: bump some rx-oom error stat */
930 /* put it back together so we can free the
931 * whole list at once.
932 */
933 msdu_head->next = to_free;
934 return -1;
935 }
936
937 /* Walk list again, copying contents into
938 * msdu_head
939 */
940 next = to_free;
941 while (next) {
942 skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
943 next->len);
944 next = next->next;
945 }
946
947 /* If here, we have consolidated skb. Free the
948 * fragments and pass the main skb on up the
949 * stack.
950 */
951 ath10k_htt_rx_free_msdu_chain(to_free);
952 return 0;
953}
954
880static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 955static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
881 struct htt_rx_indication *rx) 956 struct htt_rx_indication *rx)
882{ 957{
@@ -888,6 +963,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
888 u8 *fw_desc; 963 u8 *fw_desc;
889 int i, j; 964 int i, j;
890 965
966 lockdep_assert_held(&htt->rx_ring.lock);
967
891 memset(&info, 0, sizeof(info)); 968 memset(&info, 0, sizeof(info));
892 969
893 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); 970 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
@@ -940,7 +1017,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
940 status = info.status; 1017 status = info.status;
941 1018
942 /* Skip mgmt frames while we handle this in WMI */ 1019 /* Skip mgmt frames while we handle this in WMI */
943 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { 1020 if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
1021 ath10k_htt_rx_is_mgmt(msdu_head)) {
944 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); 1022 ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
945 ath10k_htt_rx_free_msdu_chain(msdu_head); 1023 ath10k_htt_rx_free_msdu_chain(msdu_head);
946 continue; 1024 continue;
@@ -964,10 +1042,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
964 continue; 1042 continue;
965 } 1043 }
966 1044
967 /* FIXME: we do not support chaining yet. 1045 if (msdu_chaining &&
968 * this needs investigation */ 1046 (ath10k_unchain_msdu(msdu_head) < 0)) {
969 if (msdu_chaining) {
970 ath10k_warn("htt rx msdu_chaining is true\n");
971 ath10k_htt_rx_free_msdu_chain(msdu_head); 1047 ath10k_htt_rx_free_msdu_chain(msdu_head);
972 continue; 1048 continue;
973 } 1049 }
@@ -990,6 +1066,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
990 info.rate.info0 = rx->ppdu.info0; 1066 info.rate.info0 = rx->ppdu.info0;
991 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); 1067 info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
992 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); 1068 info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
1069 info.tsf = __le32_to_cpu(rx->ppdu.tsf);
993 1070
994 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); 1071 hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
995 1072
@@ -1023,8 +1100,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1023 1100
1024 msdu_head = NULL; 1101 msdu_head = NULL;
1025 msdu_tail = NULL; 1102 msdu_tail = NULL;
1103
1104 spin_lock_bh(&htt->rx_ring.lock);
1026 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, 1105 msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1027 &msdu_head, &msdu_tail); 1106 &msdu_head, &msdu_tail);
1107 spin_unlock_bh(&htt->rx_ring.lock);
1028 1108
1029 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); 1109 ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1030 1110
@@ -1116,6 +1196,45 @@ end:
1116 } 1196 }
1117} 1197}
1118 1198
1199static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1200 struct sk_buff *skb)
1201{
1202 struct ath10k_htt *htt = &ar->htt;
1203 struct htt_resp *resp = (struct htt_resp *)skb->data;
1204 struct htt_tx_done tx_done = {};
1205 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1206 __le16 msdu_id;
1207 int i;
1208
1209 lockdep_assert_held(&htt->tx_lock);
1210
1211 switch (status) {
1212 case HTT_DATA_TX_STATUS_NO_ACK:
1213 tx_done.no_ack = true;
1214 break;
1215 case HTT_DATA_TX_STATUS_OK:
1216 break;
1217 case HTT_DATA_TX_STATUS_DISCARD:
1218 case HTT_DATA_TX_STATUS_POSTPONE:
1219 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1220 tx_done.discard = true;
1221 break;
1222 default:
1223 ath10k_warn("unhandled tx completion status %d\n", status);
1224 tx_done.discard = true;
1225 break;
1226 }
1227
1228 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1229 resp->data_tx_completion.num_msdus);
1230
1231 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1232 msdu_id = resp->data_tx_completion.msdus[i];
1233 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1234 ath10k_txrx_tx_unref(htt, &tx_done);
1235 }
1236}
1237
1119void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 1238void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1120{ 1239{
1121 struct ath10k_htt *htt = &ar->htt; 1240 struct ath10k_htt *htt = &ar->htt;
@@ -1134,10 +1253,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1134 complete(&htt->target_version_received); 1253 complete(&htt->target_version_received);
1135 break; 1254 break;
1136 } 1255 }
1137 case HTT_T2H_MSG_TYPE_RX_IND: { 1256 case HTT_T2H_MSG_TYPE_RX_IND:
1138 ath10k_htt_rx_handler(htt, &resp->rx_ind); 1257 spin_lock_bh(&htt->rx_ring.lock);
1139 break; 1258 __skb_queue_tail(&htt->rx_compl_q, skb);
1140 } 1259 spin_unlock_bh(&htt->rx_ring.lock);
1260 tasklet_schedule(&htt->txrx_compl_task);
1261 return;
1141 case HTT_T2H_MSG_TYPE_PEER_MAP: { 1262 case HTT_T2H_MSG_TYPE_PEER_MAP: {
1142 struct htt_peer_map_event ev = { 1263 struct htt_peer_map_event ev = {
1143 .vdev_id = resp->peer_map.vdev_id, 1264 .vdev_id = resp->peer_map.vdev_id,
@@ -1172,44 +1293,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1172 break; 1293 break;
1173 } 1294 }
1174 1295
1296 spin_lock_bh(&htt->tx_lock);
1175 ath10k_txrx_tx_unref(htt, &tx_done); 1297 ath10k_txrx_tx_unref(htt, &tx_done);
1298 spin_unlock_bh(&htt->tx_lock);
1176 break; 1299 break;
1177 } 1300 }
1178 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { 1301 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
1179 struct htt_tx_done tx_done = {}; 1302 spin_lock_bh(&htt->tx_lock);
1180 int status = MS(resp->data_tx_completion.flags, 1303 __skb_queue_tail(&htt->tx_compl_q, skb);
1181 HTT_DATA_TX_STATUS); 1304 spin_unlock_bh(&htt->tx_lock);
1182 __le16 msdu_id; 1305 tasklet_schedule(&htt->txrx_compl_task);
1183 int i; 1306 return;
1184
1185 switch (status) {
1186 case HTT_DATA_TX_STATUS_NO_ACK:
1187 tx_done.no_ack = true;
1188 break;
1189 case HTT_DATA_TX_STATUS_OK:
1190 break;
1191 case HTT_DATA_TX_STATUS_DISCARD:
1192 case HTT_DATA_TX_STATUS_POSTPONE:
1193 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1194 tx_done.discard = true;
1195 break;
1196 default:
1197 ath10k_warn("unhandled tx completion status %d\n",
1198 status);
1199 tx_done.discard = true;
1200 break;
1201 }
1202
1203 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1204 resp->data_tx_completion.num_msdus);
1205
1206 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1207 msdu_id = resp->data_tx_completion.msdus[i];
1208 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1209 ath10k_txrx_tx_unref(htt, &tx_done);
1210 }
1211 break;
1212 }
1213 case HTT_T2H_MSG_TYPE_SEC_IND: { 1307 case HTT_T2H_MSG_TYPE_SEC_IND: {
1214 struct ath10k *ar = htt->ar; 1308 struct ath10k *ar = htt->ar;
1215 struct htt_security_indication *ev = &resp->security_indication; 1309 struct htt_security_indication *ev = &resp->security_indication;
@@ -1249,3 +1343,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1249 /* Free the indication buffer */ 1343 /* Free the indication buffer */
1250 dev_kfree_skb_any(skb); 1344 dev_kfree_skb_any(skb);
1251} 1345}
1346
1347static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1348{
1349 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
1350 struct htt_resp *resp;
1351 struct sk_buff *skb;
1352
1353 spin_lock_bh(&htt->tx_lock);
1354 while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
1355 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
1356 dev_kfree_skb_any(skb);
1357 }
1358 spin_unlock_bh(&htt->tx_lock);
1359
1360 spin_lock_bh(&htt->rx_ring.lock);
1361 while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
1362 resp = (struct htt_resp *)skb->data;
1363 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1364 dev_kfree_skb_any(skb);
1365 }
1366 spin_unlock_bh(&htt->rx_ring.lock);
1367}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index acaa046dc93b..7a3e2e40dd5c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -109,6 +109,14 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
109 return -ENOMEM; 109 return -ENOMEM;
110 } 110 }
111 111
112 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
113 sizeof(struct ath10k_htt_txbuf), 4, 0);
114 if (!htt->tx_pool) {
115 kfree(htt->used_msdu_ids);
116 kfree(htt->pending_tx);
117 return -ENOMEM;
118 }
119
112 return 0; 120 return 0;
113} 121}
114 122
@@ -117,9 +125,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
117 struct htt_tx_done tx_done = {0}; 125 struct htt_tx_done tx_done = {0};
118 int msdu_id; 126 int msdu_id;
119 127
120 /* No locks needed. Called after communication with the device has 128 spin_lock_bh(&htt->tx_lock);
121 * been stopped. */
122
123 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { 129 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
124 if (!test_bit(msdu_id, htt->used_msdu_ids)) 130 if (!test_bit(msdu_id, htt->used_msdu_ids))
125 continue; 131 continue;
@@ -132,6 +138,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
132 138
133 ath10k_txrx_tx_unref(htt, &tx_done); 139 ath10k_txrx_tx_unref(htt, &tx_done);
134 } 140 }
141 spin_unlock_bh(&htt->tx_lock);
135} 142}
136 143
137void ath10k_htt_tx_detach(struct ath10k_htt *htt) 144void ath10k_htt_tx_detach(struct ath10k_htt *htt)
@@ -139,6 +146,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
139 ath10k_htt_tx_cleanup_pending(htt); 146 ath10k_htt_tx_cleanup_pending(htt);
140 kfree(htt->pending_tx); 147 kfree(htt->pending_tx);
141 kfree(htt->used_msdu_ids); 148 kfree(htt->used_msdu_ids);
149 dma_pool_destroy(htt->tx_pool);
142 return; 150 return;
143} 151}
144 152
@@ -334,7 +342,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
334 goto err_free_msdu_id; 342 goto err_free_msdu_id;
335 } 343 }
336 344
337 res = ath10k_skb_map(dev, msdu); 345 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
346 DMA_TO_DEVICE);
347 res = dma_mapping_error(dev, skb_cb->paddr);
338 if (res) 348 if (res)
339 goto err_free_txdesc; 349 goto err_free_txdesc;
340 350
@@ -348,8 +358,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
348 memcpy(cmd->mgmt_tx.hdr, msdu->data, 358 memcpy(cmd->mgmt_tx.hdr, msdu->data,
349 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 359 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
350 360
351 skb_cb->htt.frag_len = 0; 361 skb_cb->htt.txbuf = NULL;
352 skb_cb->htt.pad_len = 0;
353 362
354 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 363 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
355 if (res) 364 if (res)
@@ -358,7 +367,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
358 return 0; 367 return 0;
359 368
360err_unmap_msdu: 369err_unmap_msdu:
361 ath10k_skb_unmap(dev, msdu); 370 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
362err_free_txdesc: 371err_free_txdesc:
363 dev_kfree_skb_any(txdesc); 372 dev_kfree_skb_any(txdesc);
364err_free_msdu_id: 373err_free_msdu_id:
@@ -375,19 +384,19 @@ err:
375int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 384int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
376{ 385{
377 struct device *dev = htt->ar->dev; 386 struct device *dev = htt->ar->dev;
378 struct htt_cmd *cmd;
379 struct htt_data_tx_desc_frag *tx_frags;
380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 387 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
381 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 388 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
382 struct sk_buff *txdesc = NULL; 389 struct ath10k_hif_sg_item sg_items[2];
383 bool use_frags; 390 struct htt_data_tx_desc_frag *frags;
384 u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; 391 u8 vdev_id = skb_cb->vdev_id;
385 u8 tid; 392 u8 tid = skb_cb->htt.tid;
386 int prefetch_len, desc_len; 393 int prefetch_len;
387 int msdu_id = -1;
388 int res; 394 int res;
389 u8 flags0; 395 u8 flags0 = 0;
390 u16 flags1; 396 u16 msdu_id, flags1 = 0;
397 dma_addr_t paddr;
398 u32 frags_paddr;
399 bool use_frags;
391 400
392 res = ath10k_htt_tx_inc_pending(htt); 401 res = ath10k_htt_tx_inc_pending(htt);
393 if (res) 402 if (res)
@@ -406,114 +415,120 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
406 prefetch_len = min(htt->prefetch_len, msdu->len); 415 prefetch_len = min(htt->prefetch_len, msdu->len);
407 prefetch_len = roundup(prefetch_len, 4); 416 prefetch_len = roundup(prefetch_len, 4);
408 417
409 desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
410
411 txdesc = ath10k_htc_alloc_skb(desc_len);
412 if (!txdesc) {
413 res = -ENOMEM;
414 goto err_free_msdu_id;
415 }
416
417 /* Since HTT 3.0 there is no separate mgmt tx command. However in case 418 /* Since HTT 3.0 there is no separate mgmt tx command. However in case
418 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx 419 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
419 * fragment list host driver specifies directly frame pointer. */ 420 * fragment list host driver specifies directly frame pointer. */
420 use_frags = htt->target_version_major < 3 || 421 use_frags = htt->target_version_major < 3 ||
421 !ieee80211_is_mgmt(hdr->frame_control); 422 !ieee80211_is_mgmt(hdr->frame_control);
422 423
423 if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { 424 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
424 ath10k_warn("htt alignment check failed. dropping packet.\n"); 425 &paddr);
425 res = -EIO; 426 if (!skb_cb->htt.txbuf)
426 goto err_free_txdesc; 427 goto err_free_msdu_id;
427 } 428 skb_cb->htt.txbuf_paddr = paddr;
428 429
429 if (use_frags) { 430 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
430 skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; 431 DMA_TO_DEVICE);
431 skb_cb->htt.pad_len = (unsigned long)msdu->data - 432 res = dma_mapping_error(dev, skb_cb->paddr);
432 round_down((unsigned long)msdu->data, 4); 433 if (res)
434 goto err_free_txbuf;
433 435
434 skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 436 if (likely(use_frags)) {
435 } else { 437 frags = skb_cb->htt.txbuf->frags;
436 skb_cb->htt.frag_len = 0;
437 skb_cb->htt.pad_len = 0;
438 }
439 438
440 res = ath10k_skb_map(dev, msdu); 439 frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
441 if (res) 440 frags[0].len = __cpu_to_le32(msdu->len);
442 goto err_pull_txfrag; 441 frags[1].paddr = 0;
443 442 frags[1].len = 0;
444 if (use_frags) {
445 dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
446 DMA_TO_DEVICE);
447
448 /* tx fragment list must be terminated with zero-entry */
449 tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
450 tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
451 skb_cb->htt.frag_len +
452 skb_cb->htt.pad_len);
453 tx_frags[0].len = __cpu_to_le32(msdu->len -
454 skb_cb->htt.frag_len -
455 skb_cb->htt.pad_len);
456 tx_frags[1].paddr = __cpu_to_le32(0);
457 tx_frags[1].len = __cpu_to_le32(0);
458
459 dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
460 DMA_TO_DEVICE);
461 }
462 443
463 ath10k_dbg(ATH10K_DBG_HTT, "tx-msdu 0x%llx\n", 444 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
464 (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); 445 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
465 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "tx-msdu: ",
466 msdu->data, msdu->len);
467 446
468 skb_put(txdesc, desc_len); 447 frags_paddr = skb_cb->htt.txbuf_paddr;
469 cmd = (struct htt_cmd *)txdesc->data; 448 } else {
449 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
450 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
470 451
471 tid = ATH10K_SKB_CB(msdu)->htt.tid; 452 frags_paddr = skb_cb->paddr;
453 }
472 454
473 ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); 455 /* Normally all commands go through HTC which manages tx credits for
456 * each endpoint and notifies when tx is completed.
457 *
458 * HTT endpoint is creditless so there's no need to care about HTC
459 * flags. In that case it is trivial to fill the HTC header here.
460 *
461 * MSDU transmission is considered completed upon HTT event. This
462 * implies no relevant resources can be freed until after the event is
463 * received. That's why HTC tx completion handler itself is ignored by
464 * setting NULL to transfer_context for all sg items.
465 *
466 * There is simply no point in pushing HTT TX_FRM through HTC tx path
467 * as it's a waste of resources. By bypassing HTC it is possible to
468 * avoid extra memory allocations, compress data structures and thus
469 * improve performance. */
470
471 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
472 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
473 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
474 sizeof(skb_cb->htt.txbuf->cmd_tx) +
475 prefetch_len);
476 skb_cb->htt.txbuf->htc_hdr.flags = 0;
474 477
475 flags0 = 0;
476 if (!ieee80211_has_protected(hdr->frame_control)) 478 if (!ieee80211_has_protected(hdr->frame_control))
477 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 479 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
478 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
479 480
480 if (use_frags) 481 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
481 flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
482 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
483 else
484 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
485 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
486 482
487 flags1 = 0;
488 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 483 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
489 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 484 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
490 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 485 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
491 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 486 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
492 487
493 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 488 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
494 cmd->data_tx.flags0 = flags0; 489 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
495 cmd->data_tx.flags1 = __cpu_to_le16(flags1); 490 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
496 cmd->data_tx.len = __cpu_to_le16(msdu->len - 491 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
497 skb_cb->htt.frag_len - 492 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
498 skb_cb->htt.pad_len); 493 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
499 cmd->data_tx.id = __cpu_to_le16(msdu_id); 494 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
500 cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); 495
501 cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); 496 ath10k_dbg(ATH10K_DBG_HTT,
502 497 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
503 memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); 498 flags0, flags1, msdu->len, msdu_id, frags_paddr,
499 (u32)skb_cb->paddr, vdev_id, tid);
500 ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
501 msdu->data, msdu->len);
504 502
505 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 503 sg_items[0].transfer_id = 0;
504 sg_items[0].transfer_context = NULL;
505 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
506 sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
507 sizeof(skb_cb->htt.txbuf->frags);
508 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
509 sizeof(skb_cb->htt.txbuf->cmd_hdr) +
510 sizeof(skb_cb->htt.txbuf->cmd_tx);
511
512 sg_items[1].transfer_id = 0;
513 sg_items[1].transfer_context = NULL;
514 sg_items[1].vaddr = msdu->data;
515 sg_items[1].paddr = skb_cb->paddr;
516 sg_items[1].len = prefetch_len;
517
518 res = ath10k_hif_tx_sg(htt->ar,
519 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
520 sg_items, ARRAY_SIZE(sg_items));
506 if (res) 521 if (res)
507 goto err_unmap_msdu; 522 goto err_unmap_msdu;
508 523
509 return 0; 524 return 0;
510 525
511err_unmap_msdu: 526err_unmap_msdu:
512 ath10k_skb_unmap(dev, msdu); 527 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
513err_pull_txfrag: 528err_free_txbuf:
514 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 529 dma_pool_free(htt->tx_pool,
515err_free_txdesc: 530 skb_cb->htt.txbuf,
516 dev_kfree_skb_any(txdesc); 531 skb_cb->htt.txbuf_paddr);
517err_free_msdu_id: 532err_free_msdu_id:
518 spin_lock_bh(&htt->tx_lock); 533 spin_lock_bh(&htt->tx_lock);
519 htt->pending_tx[msdu_id] = NULL; 534 htt->pending_tx[msdu_id] = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e17f5d732b5a..511a2f81e7af 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -323,13 +323,15 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
323 323
324 ret = ath10k_wmi_peer_create(ar, vdev_id, addr); 324 ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
325 if (ret) { 325 if (ret) {
326 ath10k_warn("Failed to create wmi peer: %i\n", ret); 326 ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
327 addr, vdev_id, ret);
327 return ret; 328 return ret;
328 } 329 }
329 330
330 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 331 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
331 if (ret) { 332 if (ret) {
332 ath10k_warn("Failed to wait for created wmi peer: %i\n", ret); 333 ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
334 addr, vdev_id, ret);
333 return ret; 335 return ret;
334 } 336 }
335 spin_lock_bh(&ar->data_lock); 337 spin_lock_bh(&ar->data_lock);
@@ -349,7 +351,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
349 ret = ath10k_wmi_pdev_set_param(ar, param, 351 ret = ath10k_wmi_pdev_set_param(ar, param,
350 ATH10K_KICKOUT_THRESHOLD); 352 ATH10K_KICKOUT_THRESHOLD);
351 if (ret) { 353 if (ret) {
352 ath10k_warn("Failed to set kickout threshold: %d\n", ret); 354 ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
355 arvif->vdev_id, ret);
353 return ret; 356 return ret;
354 } 357 }
355 358
@@ -357,8 +360,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
357 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 360 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
358 ATH10K_KEEPALIVE_MIN_IDLE); 361 ATH10K_KEEPALIVE_MIN_IDLE);
359 if (ret) { 362 if (ret) {
360 ath10k_warn("Failed to set keepalive minimum idle time : %d\n", 363 ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
361 ret); 364 arvif->vdev_id, ret);
362 return ret; 365 return ret;
363 } 366 }
364 367
@@ -366,8 +369,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
366 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 369 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
367 ATH10K_KEEPALIVE_MAX_IDLE); 370 ATH10K_KEEPALIVE_MAX_IDLE);
368 if (ret) { 371 if (ret) {
369 ath10k_warn("Failed to set keepalive maximum idle time: %d\n", 372 ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
370 ret); 373 arvif->vdev_id, ret);
371 return ret; 374 return ret;
372 } 375 }
373 376
@@ -375,8 +378,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
375 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 378 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
376 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 379 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
377 if (ret) { 380 if (ret) {
378 ath10k_warn("Failed to set keepalive maximum unresponsive time: %d\n", 381 ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
379 ret); 382 arvif->vdev_id, ret);
380 return ret; 383 return ret;
381 } 384 }
382 385
@@ -529,13 +532,15 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
529 532
530 ret = ath10k_wmi_vdev_start(ar, &arg); 533 ret = ath10k_wmi_vdev_start(ar, &arg);
531 if (ret) { 534 if (ret) {
532 ath10k_warn("WMI vdev start failed: ret %d\n", ret); 535 ath10k_warn("WMI vdev %i start failed: ret %d\n",
536 arg.vdev_id, ret);
533 return ret; 537 return ret;
534 } 538 }
535 539
536 ret = ath10k_vdev_setup_sync(ar); 540 ret = ath10k_vdev_setup_sync(ar);
537 if (ret) { 541 if (ret) {
538 ath10k_warn("vdev setup failed %d\n", ret); 542 ath10k_warn("vdev %i setup failed %d\n",
543 arg.vdev_id, ret);
539 return ret; 544 return ret;
540 } 545 }
541 546
@@ -553,13 +558,15 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
553 558
554 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 559 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
555 if (ret) { 560 if (ret) {
556 ath10k_warn("WMI vdev stop failed: ret %d\n", ret); 561 ath10k_warn("WMI vdev %i stop failed: ret %d\n",
562 arvif->vdev_id, ret);
557 return ret; 563 return ret;
558 } 564 }
559 565
560 ret = ath10k_vdev_setup_sync(ar); 566 ret = ath10k_vdev_setup_sync(ar);
561 if (ret) { 567 if (ret) {
562 ath10k_warn("vdev setup failed %d\n", ret); 568 ath10k_warn("vdev %i setup sync failed %d\n",
569 arvif->vdev_id, ret);
563 return ret; 570 return ret;
564 } 571 }
565 572
@@ -597,19 +604,22 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
597 604
598 ret = ath10k_wmi_vdev_start(ar, &arg); 605 ret = ath10k_wmi_vdev_start(ar, &arg);
599 if (ret) { 606 if (ret) {
600 ath10k_warn("Monitor vdev start failed: ret %d\n", ret); 607 ath10k_warn("Monitor vdev %i start failed: ret %d\n",
608 vdev_id, ret);
601 return ret; 609 return ret;
602 } 610 }
603 611
604 ret = ath10k_vdev_setup_sync(ar); 612 ret = ath10k_vdev_setup_sync(ar);
605 if (ret) { 613 if (ret) {
606 ath10k_warn("Monitor vdev setup failed %d\n", ret); 614 ath10k_warn("Monitor vdev %i setup failed %d\n",
615 vdev_id, ret);
607 return ret; 616 return ret;
608 } 617 }
609 618
610 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 619 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
611 if (ret) { 620 if (ret) {
612 ath10k_warn("Monitor vdev up failed: %d\n", ret); 621 ath10k_warn("Monitor vdev %i up failed: %d\n",
622 vdev_id, ret);
613 goto vdev_stop; 623 goto vdev_stop;
614 } 624 }
615 625
@@ -621,7 +631,8 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
621vdev_stop: 631vdev_stop:
622 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 632 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
623 if (ret) 633 if (ret)
624 ath10k_warn("Monitor vdev stop failed: %d\n", ret); 634 ath10k_warn("Monitor vdev %i stop failed: %d\n",
635 ar->monitor_vdev_id, ret);
625 636
626 return ret; 637 return ret;
627} 638}
@@ -644,15 +655,18 @@ static int ath10k_monitor_stop(struct ath10k *ar)
644 655
645 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 656 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
646 if (ret) 657 if (ret)
647 ath10k_warn("Monitor vdev down failed: %d\n", ret); 658 ath10k_warn("Monitor vdev %i down failed: %d\n",
659 ar->monitor_vdev_id, ret);
648 660
649 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 661 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
650 if (ret) 662 if (ret)
651 ath10k_warn("Monitor vdev stop failed: %d\n", ret); 663 ath10k_warn("Monitor vdev %i stop failed: %d\n",
664 ar->monitor_vdev_id, ret);
652 665
653 ret = ath10k_vdev_setup_sync(ar); 666 ret = ath10k_vdev_setup_sync(ar);
654 if (ret) 667 if (ret)
655 ath10k_warn("Monitor_down sync failed: %d\n", ret); 668 ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
669 ar->monitor_vdev_id, ret);
656 670
657 ar->monitor_enabled = false; 671 ar->monitor_enabled = false;
658 return ret; 672 return ret;
@@ -682,7 +696,8 @@ static int ath10k_monitor_create(struct ath10k *ar)
682 WMI_VDEV_TYPE_MONITOR, 696 WMI_VDEV_TYPE_MONITOR,
683 0, ar->mac_addr); 697 0, ar->mac_addr);
684 if (ret) { 698 if (ret) {
685 ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret); 699 ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
700 ar->monitor_vdev_id, ret);
686 goto vdev_fail; 701 goto vdev_fail;
687 } 702 }
688 703
@@ -711,7 +726,8 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
711 726
712 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 727 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
713 if (ret) { 728 if (ret) {
714 ath10k_warn("WMI vdev monitor delete failed: %d\n", ret); 729 ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
730 ar->monitor_vdev_id, ret);
715 return ret; 731 return ret;
716 } 732 }
717 733
@@ -839,7 +855,9 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
839 855
840 spin_lock_bh(&arvif->ar->data_lock); 856 spin_lock_bh(&arvif->ar->data_lock);
841 if (arvif->beacon) { 857 if (arvif->beacon) {
842 ath10k_skb_unmap(arvif->ar->dev, arvif->beacon); 858 dma_unmap_single(arvif->ar->dev,
859 ATH10K_SKB_CB(arvif->beacon)->paddr,
860 arvif->beacon->len, DMA_TO_DEVICE);
843 dev_kfree_skb_any(arvif->beacon); 861 dev_kfree_skb_any(arvif->beacon);
844 862
845 arvif->beacon = NULL; 863 arvif->beacon = NULL;
@@ -862,8 +880,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
862 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 880 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
863 arvif->bssid); 881 arvif->bssid);
864 if (ret) { 882 if (ret) {
865 ath10k_warn("Failed to bring up VDEV: %d\n", 883 ath10k_warn("Failed to bring up vdev %d: %i\n",
866 arvif->vdev_id); 884 arvif->vdev_id, ret);
867 ath10k_vdev_stop(arvif); 885 ath10k_vdev_stop(arvif);
868 return; 886 return;
869 } 887 }
@@ -943,8 +961,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
943 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 961 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
944 conf->dynamic_ps_timeout); 962 conf->dynamic_ps_timeout);
945 if (ret) { 963 if (ret) {
946 ath10k_warn("Failed to set inactivity time for VDEV: %d\n", 964 ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
947 arvif->vdev_id); 965 arvif->vdev_id, ret);
948 return ret; 966 return ret;
949 } 967 }
950 } else { 968 } else {
@@ -1196,8 +1214,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
1196 WMI_AP_PS_PEER_PARAM_UAPSD, 1214 WMI_AP_PS_PEER_PARAM_UAPSD,
1197 uapsd); 1215 uapsd);
1198 if (ret) { 1216 if (ret) {
1199 ath10k_warn("failed to set ap ps peer param uapsd: %d\n", 1217 ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
1200 ret); 1218 arvif->vdev_id, ret);
1201 return ret; 1219 return ret;
1202 } 1220 }
1203 1221
@@ -1206,8 +1224,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
1206 WMI_AP_PS_PEER_PARAM_MAX_SP, 1224 WMI_AP_PS_PEER_PARAM_MAX_SP,
1207 max_sp); 1225 max_sp);
1208 if (ret) { 1226 if (ret) {
1209 ath10k_warn("failed to set ap ps peer param max sp: %d\n", 1227 ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
1210 ret); 1228 arvif->vdev_id, ret);
1211 return ret; 1229 return ret;
1212 } 1230 }
1213 1231
@@ -1218,8 +1236,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
1218 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 1236 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
1219 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10); 1237 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
1220 if (ret) { 1238 if (ret) {
1221 ath10k_warn("failed to set ap ps peer param ageout time: %d\n", 1239 ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
1222 ret); 1240 arvif->vdev_id, ret);
1223 return ret; 1241 return ret;
1224 } 1242 }
1225 } 1243 }
@@ -1411,8 +1429,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1411 1429
1412 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 1430 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
1413 if (!ap_sta) { 1431 if (!ap_sta) {
1414 ath10k_warn("Failed to find station entry for %pM\n", 1432 ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
1415 bss_conf->bssid); 1433 bss_conf->bssid, arvif->vdev_id);
1416 rcu_read_unlock(); 1434 rcu_read_unlock();
1417 return; 1435 return;
1418 } 1436 }
@@ -1424,8 +1442,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1424 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, 1442 ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
1425 bss_conf, &peer_arg); 1443 bss_conf, &peer_arg);
1426 if (ret) { 1444 if (ret) {
1427 ath10k_warn("Peer assoc prepare failed for %pM\n: %d", 1445 ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
1428 bss_conf->bssid, ret); 1446 bss_conf->bssid, arvif->vdev_id, ret);
1429 rcu_read_unlock(); 1447 rcu_read_unlock();
1430 return; 1448 return;
1431 } 1449 }
@@ -1434,14 +1452,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
1434 1452
1435 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1453 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1436 if (ret) { 1454 if (ret) {
1437 ath10k_warn("Peer assoc failed for %pM\n: %d", 1455 ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
1438 bss_conf->bssid, ret); 1456 bss_conf->bssid, arvif->vdev_id, ret);
1439 return; 1457 return;
1440 } 1458 }
1441 1459
1442 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 1460 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
1443 if (ret) { 1461 if (ret) {
1444 ath10k_warn("failed to setup peer SMPS: %d\n", ret); 1462 ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
1463 arvif->vdev_id, ret);
1445 return; 1464 return;
1446 } 1465 }
1447 1466
@@ -1514,34 +1533,35 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
1514 1533
1515 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); 1534 ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
1516 if (ret) { 1535 if (ret) {
1517 ath10k_warn("WMI peer assoc prepare failed for %pM\n", 1536 ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
1518 sta->addr); 1537 sta->addr, arvif->vdev_id, ret);
1519 return ret; 1538 return ret;
1520 } 1539 }
1521 1540
1522 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 1541 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
1523 if (ret) { 1542 if (ret) {
1524 ath10k_warn("Peer assoc failed for STA %pM\n: %d", 1543 ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
1525 sta->addr, ret); 1544 sta->addr, arvif->vdev_id, ret);
1526 return ret; 1545 return ret;
1527 } 1546 }
1528 1547
1529 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); 1548 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
1530 if (ret) { 1549 if (ret) {
1531 ath10k_warn("failed to setup peer SMPS: %d\n", ret); 1550 ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
1532 return ret; 1551 return ret;
1533 } 1552 }
1534 1553
1535 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 1554 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
1536 if (ret) { 1555 if (ret) {
1537 ath10k_warn("could not install peer wep keys (%d)\n", ret); 1556 ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
1557 arvif->vdev_id, ret);
1538 return ret; 1558 return ret;
1539 } 1559 }
1540 1560
1541 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 1561 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
1542 if (ret) { 1562 if (ret) {
1543 ath10k_warn("could not set qos params for STA %pM, %d\n", 1563 ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
1544 sta->addr, ret); 1564 sta->addr, arvif->vdev_id, ret);
1545 return ret; 1565 return ret;
1546 } 1566 }
1547 1567
@@ -1557,7 +1577,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
1557 1577
1558 ret = ath10k_clear_peer_keys(arvif, sta->addr); 1578 ret = ath10k_clear_peer_keys(arvif, sta->addr);
1559 if (ret) { 1579 if (ret) {
1560 ath10k_warn("could not clear all peer wep keys (%d)\n", ret); 1580 ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
1581 arvif->vdev_id, ret);
1561 return ret; 1582 return ret;
1562 } 1583 }
1563 1584
@@ -2524,7 +2545,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2524 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 2545 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
2525 arvif->vdev_subtype, vif->addr); 2546 arvif->vdev_subtype, vif->addr);
2526 if (ret) { 2547 if (ret) {
2527 ath10k_warn("WMI vdev create failed: ret %d\n", ret); 2548 ath10k_warn("WMI vdev %i create failed: ret %d\n",
2549 arvif->vdev_id, ret);
2528 goto err; 2550 goto err;
2529 } 2551 }
2530 2552
@@ -2535,7 +2557,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2535 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, 2557 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
2536 arvif->def_wep_key_idx); 2558 arvif->def_wep_key_idx);
2537 if (ret) { 2559 if (ret) {
2538 ath10k_warn("Failed to set default keyid: %d\n", ret); 2560 ath10k_warn("Failed to set vdev %i default keyid: %d\n",
2561 arvif->vdev_id, ret);
2539 goto err_vdev_delete; 2562 goto err_vdev_delete;
2540 } 2563 }
2541 2564
@@ -2544,21 +2567,23 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2544 ATH10K_HW_TXRX_NATIVE_WIFI); 2567 ATH10K_HW_TXRX_NATIVE_WIFI);
2545 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 2568 /* 10.X firmware does not support this VDEV parameter. Do not warn */
2546 if (ret && ret != -EOPNOTSUPP) { 2569 if (ret && ret != -EOPNOTSUPP) {
2547 ath10k_warn("Failed to set TX encap: %d\n", ret); 2570 ath10k_warn("Failed to set vdev %i TX encap: %d\n",
2571 arvif->vdev_id, ret);
2548 goto err_vdev_delete; 2572 goto err_vdev_delete;
2549 } 2573 }
2550 2574
2551 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2575 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2552 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); 2576 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
2553 if (ret) { 2577 if (ret) {
2554 ath10k_warn("Failed to create peer for AP: %d\n", ret); 2578 ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
2579 arvif->vdev_id, ret);
2555 goto err_vdev_delete; 2580 goto err_vdev_delete;
2556 } 2581 }
2557 2582
2558 ret = ath10k_mac_set_kickout(arvif); 2583 ret = ath10k_mac_set_kickout(arvif);
2559 if (ret) { 2584 if (ret) {
2560 ath10k_warn("Failed to set kickout parameters: %d\n", 2585 ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
2561 ret); 2586 arvif->vdev_id, ret);
2562 goto err_peer_delete; 2587 goto err_peer_delete;
2563 } 2588 }
2564 } 2589 }
@@ -2569,7 +2594,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2569 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2594 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2570 param, value); 2595 param, value);
2571 if (ret) { 2596 if (ret) {
2572 ath10k_warn("Failed to set RX wake policy: %d\n", ret); 2597 ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
2598 arvif->vdev_id, ret);
2573 goto err_peer_delete; 2599 goto err_peer_delete;
2574 } 2600 }
2575 2601
@@ -2578,7 +2604,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2578 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2604 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2579 param, value); 2605 param, value);
2580 if (ret) { 2606 if (ret) {
2581 ath10k_warn("Failed to set TX wake thresh: %d\n", ret); 2607 ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
2608 arvif->vdev_id, ret);
2582 goto err_peer_delete; 2609 goto err_peer_delete;
2583 } 2610 }
2584 2611
@@ -2587,7 +2614,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2587 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 2614 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
2588 param, value); 2615 param, value);
2589 if (ret) { 2616 if (ret) {
2590 ath10k_warn("Failed to set PSPOLL count: %d\n", ret); 2617 ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
2618 arvif->vdev_id, ret);
2591 goto err_peer_delete; 2619 goto err_peer_delete;
2592 } 2620 }
2593 } 2621 }
@@ -2651,17 +2679,19 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
2651 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 2679 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
2652 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); 2680 ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
2653 if (ret) 2681 if (ret)
2654 ath10k_warn("Failed to remove peer for AP: %d\n", ret); 2682 ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
2683 arvif->vdev_id, ret);
2655 2684
2656 kfree(arvif->u.ap.noa_data); 2685 kfree(arvif->u.ap.noa_data);
2657 } 2686 }
2658 2687
2659 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n", 2688 ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
2660 arvif->vdev_id); 2689 arvif->vdev_id);
2661 2690
2662 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 2691 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
2663 if (ret) 2692 if (ret)
2664 ath10k_warn("WMI vdev delete failed: %d\n", ret); 2693 ath10k_warn("WMI vdev %i delete failed: %d\n",
2694 arvif->vdev_id, ret);
2665 2695
2666 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) 2696 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
2667 ar->monitor_present = false; 2697 ar->monitor_present = false;
@@ -2750,8 +2780,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2750 arvif->vdev_id, arvif->beacon_interval); 2780 arvif->vdev_id, arvif->beacon_interval);
2751 2781
2752 if (ret) 2782 if (ret)
2753 ath10k_warn("Failed to set beacon interval for VDEV: %d\n", 2783 ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
2754 arvif->vdev_id); 2784 arvif->vdev_id, ret);
2755 } 2785 }
2756 2786
2757 if (changed & BSS_CHANGED_BEACON) { 2787 if (changed & BSS_CHANGED_BEACON) {
@@ -2763,8 +2793,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2763 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 2793 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
2764 WMI_BEACON_STAGGERED_MODE); 2794 WMI_BEACON_STAGGERED_MODE);
2765 if (ret) 2795 if (ret)
2766 ath10k_warn("Failed to set beacon mode for VDEV: %d\n", 2796 ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
2767 arvif->vdev_id); 2797 arvif->vdev_id, ret);
2768 } 2798 }
2769 2799
2770 if (changed & BSS_CHANGED_BEACON_INFO) { 2800 if (changed & BSS_CHANGED_BEACON_INFO) {
@@ -2778,8 +2808,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2778 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2808 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2779 arvif->dtim_period); 2809 arvif->dtim_period);
2780 if (ret) 2810 if (ret)
2781 ath10k_warn("Failed to set dtim period for VDEV: %d\n", 2811 ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
2782 arvif->vdev_id); 2812 arvif->vdev_id, ret);
2783 } 2813 }
2784 2814
2785 if (changed & BSS_CHANGED_SSID && 2815 if (changed & BSS_CHANGED_SSID &&
@@ -2799,7 +2829,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2799 ret = ath10k_peer_create(ar, arvif->vdev_id, 2829 ret = ath10k_peer_create(ar, arvif->vdev_id,
2800 info->bssid); 2830 info->bssid);
2801 if (ret) 2831 if (ret)
2802 ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n", 2832 ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
2803 info->bssid, arvif->vdev_id, ret); 2833 info->bssid, arvif->vdev_id, ret);
2804 2834
2805 if (vif->type == NL80211_IFTYPE_STATION) { 2835 if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2815,8 +2845,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2815 2845
2816 ret = ath10k_vdev_start(arvif); 2846 ret = ath10k_vdev_start(arvif);
2817 if (ret) { 2847 if (ret) {
2818 ath10k_warn("failed to start vdev: %d\n", 2848 ath10k_warn("failed to start vdev %i: %d\n",
2819 ret); 2849 arvif->vdev_id, ret);
2820 goto exit; 2850 goto exit;
2821 } 2851 }
2822 2852
@@ -2851,8 +2881,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2851 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2881 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2852 cts_prot); 2882 cts_prot);
2853 if (ret) 2883 if (ret)
2854 ath10k_warn("Failed to set CTS prot for VDEV: %d\n", 2884 ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
2855 arvif->vdev_id); 2885 arvif->vdev_id, ret);
2856 } 2886 }
2857 2887
2858 if (changed & BSS_CHANGED_ERP_SLOT) { 2888 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2870,8 +2900,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2870 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2900 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2871 slottime); 2901 slottime);
2872 if (ret) 2902 if (ret)
2873 ath10k_warn("Failed to set erp slot for VDEV: %d\n", 2903 ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
2874 arvif->vdev_id); 2904 arvif->vdev_id, ret);
2875 } 2905 }
2876 2906
2877 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2907 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2889,8 +2919,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2889 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 2919 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2890 preamble); 2920 preamble);
2891 if (ret) 2921 if (ret)
2892 ath10k_warn("Failed to set preamble for VDEV: %d\n", 2922 ath10k_warn("Failed to set preamble for vdev %d: %i\n",
2893 arvif->vdev_id); 2923 arvif->vdev_id, ret);
2894 } 2924 }
2895 2925
2896 if (changed & BSS_CHANGED_ASSOC) { 2926 if (changed & BSS_CHANGED_ASSOC) {
@@ -3021,8 +3051,8 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
3021 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3051 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
3022 key->keyidx); 3052 key->keyidx);
3023 if (ret) 3053 if (ret)
3024 ath10k_warn("failed to set group key as default key: %d\n", 3054 ath10k_warn("failed to set vdev %i group key as default key: %d\n",
3025 ret); 3055 arvif->vdev_id, ret);
3026} 3056}
3027 3057
3028static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3058static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3082,7 +3112,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3082 3112
3083 ret = ath10k_install_key(arvif, key, cmd, peer_addr); 3113 ret = ath10k_install_key(arvif, key, cmd, peer_addr);
3084 if (ret) { 3114 if (ret) {
3085 ath10k_warn("ath10k_install_key failed (%d)\n", ret); 3115 ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
3116 arvif->vdev_id, peer_addr, ret);
3086 goto exit; 3117 goto exit;
3087 } 3118 }
3088 3119
@@ -3179,6 +3210,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3179 int max_num_peers; 3210 int max_num_peers;
3180 int ret = 0; 3211 int ret = 0;
3181 3212
3213 if (old_state == IEEE80211_STA_NOTEXIST &&
3214 new_state == IEEE80211_STA_NONE) {
3215 memset(arsta, 0, sizeof(*arsta));
3216 arsta->arvif = arvif;
3217 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
3218 }
3219
3182 /* cancel must be done outside the mutex to avoid deadlock */ 3220 /* cancel must be done outside the mutex to avoid deadlock */
3183 if ((old_state == IEEE80211_STA_NONE && 3221 if ((old_state == IEEE80211_STA_NONE &&
3184 new_state == IEEE80211_STA_NOTEXIST)) 3222 new_state == IEEE80211_STA_NOTEXIST))
@@ -3208,10 +3246,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3208 "mac vdev %d peer create %pM (new sta) num_peers %d\n", 3246 "mac vdev %d peer create %pM (new sta) num_peers %d\n",
3209 arvif->vdev_id, sta->addr, ar->num_peers); 3247 arvif->vdev_id, sta->addr, ar->num_peers);
3210 3248
3211 memset(arsta, 0, sizeof(*arsta));
3212 arsta->arvif = arvif;
3213 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
3214
3215 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); 3249 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
3216 if (ret) 3250 if (ret)
3217 ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n", 3251 ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
@@ -3226,8 +3260,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3226 arvif->vdev_id, sta->addr); 3260 arvif->vdev_id, sta->addr);
3227 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 3261 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
3228 if (ret) 3262 if (ret)
3229 ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", 3263 ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
3230 sta->addr, arvif->vdev_id); 3264 sta->addr, arvif->vdev_id, ret);
3231 3265
3232 if (vif->type == NL80211_IFTYPE_STATION) 3266 if (vif->type == NL80211_IFTYPE_STATION)
3233 ath10k_bss_disassoc(hw, vif); 3267 ath10k_bss_disassoc(hw, vif);
@@ -3243,8 +3277,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3243 3277
3244 ret = ath10k_station_assoc(ar, arvif, sta); 3278 ret = ath10k_station_assoc(ar, arvif, sta);
3245 if (ret) 3279 if (ret)
3246 ath10k_warn("Failed to associate station: %pM\n", 3280 ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
3247 sta->addr); 3281 sta->addr, arvif->vdev_id, ret);
3248 } else if (old_state == IEEE80211_STA_ASSOC && 3282 } else if (old_state == IEEE80211_STA_ASSOC &&
3249 new_state == IEEE80211_STA_AUTH && 3283 new_state == IEEE80211_STA_AUTH &&
3250 (vif->type == NL80211_IFTYPE_AP || 3284 (vif->type == NL80211_IFTYPE_AP ||
@@ -3257,8 +3291,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
3257 3291
3258 ret = ath10k_station_disassoc(ar, arvif, sta); 3292 ret = ath10k_station_disassoc(ar, arvif, sta);
3259 if (ret) 3293 if (ret)
3260 ath10k_warn("Failed to disassociate station: %pM\n", 3294 ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
3261 sta->addr); 3295 sta->addr, arvif->vdev_id, ret);
3262 } 3296 }
3263exit: 3297exit:
3264 mutex_unlock(&ar->conf_mutex); 3298 mutex_unlock(&ar->conf_mutex);
@@ -3539,7 +3573,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
3539 }), ATH10K_FLUSH_TIMEOUT_HZ); 3573 }), ATH10K_FLUSH_TIMEOUT_HZ);
3540 3574
3541 if (ret <= 0 || skip) 3575 if (ret <= 0 || skip)
3542 ath10k_warn("tx not flushed\n"); 3576 ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
3577 skip, ar->state, ret);
3543 3578
3544skip: 3579skip:
3545 mutex_unlock(&ar->conf_mutex); 3580 mutex_unlock(&ar->conf_mutex);
@@ -3905,7 +3940,8 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
3905 3940
3906static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, 3941static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3907 u8 fixed_rate, 3942 u8 fixed_rate,
3908 u8 fixed_nss) 3943 u8 fixed_nss,
3944 u8 force_sgi)
3909{ 3945{
3910 struct ath10k *ar = arvif->ar; 3946 struct ath10k *ar = arvif->ar;
3911 u32 vdev_param; 3947 u32 vdev_param;
@@ -3914,12 +3950,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3914 mutex_lock(&ar->conf_mutex); 3950 mutex_lock(&ar->conf_mutex);
3915 3951
3916 if (arvif->fixed_rate == fixed_rate && 3952 if (arvif->fixed_rate == fixed_rate &&
3917 arvif->fixed_nss == fixed_nss) 3953 arvif->fixed_nss == fixed_nss &&
3954 arvif->force_sgi == force_sgi)
3918 goto exit; 3955 goto exit;
3919 3956
3920 if (fixed_rate == WMI_FIXED_RATE_NONE) 3957 if (fixed_rate == WMI_FIXED_RATE_NONE)
3921 ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); 3958 ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
3922 3959
3960 if (force_sgi)
3961 ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
3962
3923 vdev_param = ar->wmi.vdev_param->fixed_rate; 3963 vdev_param = ar->wmi.vdev_param->fixed_rate;
3924 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 3964 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
3925 vdev_param, fixed_rate); 3965 vdev_param, fixed_rate);
@@ -3945,6 +3985,19 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
3945 3985
3946 arvif->fixed_nss = fixed_nss; 3986 arvif->fixed_nss = fixed_nss;
3947 3987
3988 vdev_param = ar->wmi.vdev_param->sgi;
3989 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
3990 force_sgi);
3991
3992 if (ret) {
3993 ath10k_warn("Could not set sgi param %d: %d\n",
3994 force_sgi, ret);
3995 ret = -EINVAL;
3996 goto exit;
3997 }
3998
3999 arvif->force_sgi = force_sgi;
4000
3948exit: 4001exit:
3949 mutex_unlock(&ar->conf_mutex); 4002 mutex_unlock(&ar->conf_mutex);
3950 return ret; 4003 return ret;
@@ -3959,6 +4012,11 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
3959 enum ieee80211_band band = ar->hw->conf.chandef.chan->band; 4012 enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
3960 u8 fixed_rate = WMI_FIXED_RATE_NONE; 4013 u8 fixed_rate = WMI_FIXED_RATE_NONE;
3961 u8 fixed_nss = ar->num_rf_chains; 4014 u8 fixed_nss = ar->num_rf_chains;
4015 u8 force_sgi;
4016
4017 force_sgi = mask->control[band].gi;
4018 if (force_sgi == NL80211_TXRATE_FORCE_LGI)
4019 return -EINVAL;
3962 4020
3963 if (!ath10k_default_bitrate_mask(ar, band, mask)) { 4021 if (!ath10k_default_bitrate_mask(ar, band, mask)) {
3964 if (!ath10k_get_fixed_rate_nss(mask, band, 4022 if (!ath10k_get_fixed_rate_nss(mask, band,
@@ -3967,7 +4025,13 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
3967 return -EINVAL; 4025 return -EINVAL;
3968 } 4026 }
3969 4027
3970 return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss); 4028 if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
4029 ath10k_warn("Could not force SGI usage for default rate settings\n");
4030 return -EINVAL;
4031 }
4032
4033 return ath10k_set_fixed_rate_param(arvif, fixed_rate,
4034 fixed_nss, force_sgi);
3971} 4035}
3972 4036
3973static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw, 4037static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
@@ -4060,6 +4124,16 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
4060 ieee80211_queue_work(hw, &arsta->update_wk); 4124 ieee80211_queue_work(hw, &arsta->update_wk);
4061} 4125}
4062 4126
4127static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4128{
4129 /*
4130 * FIXME: Return 0 for time being. Need to figure out whether FW
4131 * has the API to fetch 64-bit local TSF
4132 */
4133
4134 return 0;
4135}
4136
4063static const struct ieee80211_ops ath10k_ops = { 4137static const struct ieee80211_ops ath10k_ops = {
4064 .tx = ath10k_tx, 4138 .tx = ath10k_tx,
4065 .start = ath10k_start, 4139 .start = ath10k_start,
@@ -4085,6 +4159,7 @@ static const struct ieee80211_ops ath10k_ops = {
4085 .set_bitrate_mask = ath10k_set_bitrate_mask, 4159 .set_bitrate_mask = ath10k_set_bitrate_mask,
4086 .channel_switch_beacon = ath10k_channel_switch_beacon, 4160 .channel_switch_beacon = ath10k_channel_switch_beacon,
4087 .sta_rc_update = ath10k_sta_rc_update, 4161 .sta_rc_update = ath10k_sta_rc_update,
4162 .get_tsf = ath10k_get_tsf,
4088#ifdef CONFIG_PM 4163#ifdef CONFIG_PM
4089 .suspend = ath10k_suspend, 4164 .suspend = ath10k_suspend,
4090 .resume = ath10k_resume, 4165 .resume = ath10k_resume,
@@ -4361,7 +4436,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
4361 ath10k_get_arvif_iter, 4436 ath10k_get_arvif_iter,
4362 &arvif_iter); 4437 &arvif_iter);
4363 if (!arvif_iter.arvif) { 4438 if (!arvif_iter.arvif) {
4364 ath10k_warn("No VIF found for VDEV: %d\n", vdev_id); 4439 ath10k_warn("No VIF found for vdev %d\n", vdev_id);
4365 return NULL; 4440 return NULL;
4366 } 4441 }
4367 4442
@@ -4442,7 +4517,8 @@ int ath10k_mac_register(struct ath10k *ar)
4442 IEEE80211_HW_HAS_RATE_CONTROL | 4517 IEEE80211_HW_HAS_RATE_CONTROL |
4443 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 4518 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
4444 IEEE80211_HW_WANT_MONITOR_VIF | 4519 IEEE80211_HW_WANT_MONITOR_VIF |
4445 IEEE80211_HW_AP_LINK_PS; 4520 IEEE80211_HW_AP_LINK_PS |
4521 IEEE80211_HW_SPECTRUM_MGMT;
4446 4522
4447 /* MSDU can have HTT TX fragment pushed in front. The additional 4 4523 /* MSDU can have HTT TX fragment pushed in front. The additional 4
4448 * bytes is used for padding/alignment if necessary. */ 4524 * bytes is used for padding/alignment if necessary. */
@@ -4500,7 +4576,7 @@ int ath10k_mac_register(struct ath10k *ar)
4500 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 4576 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
4501 ath10k_reg_notifier); 4577 ath10k_reg_notifier);
4502 if (ret) { 4578 if (ret) {
4503 ath10k_err("Regulatory initialization failed\n"); 4579 ath10k_err("Regulatory initialization failed: %i\n", ret);
4504 goto err_free; 4580 goto err_free;
4505 } 4581 }
4506 4582
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 34f09106f423..9d242d801d9d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,12 +58,10 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, 58static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 u32 *data); 59 u32 *data);
60 60
61static void ath10k_pci_process_ce(struct ath10k *ar);
62static int ath10k_pci_post_rx(struct ath10k *ar); 61static int ath10k_pci_post_rx(struct ath10k *ar);
63static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, 62static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
64 int num); 63 int num);
65static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); 64static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
66static void ath10k_pci_stop_ce(struct ath10k *ar);
67static int ath10k_pci_cold_reset(struct ath10k *ar); 65static int ath10k_pci_cold_reset(struct ath10k *ar);
68static int ath10k_pci_warm_reset(struct ath10k *ar); 66static int ath10k_pci_warm_reset(struct ath10k *ar);
69static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 67static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
@@ -74,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
74static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 72static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
75 struct ath10k_ce_pipe *rx_pipe, 73 struct ath10k_ce_pipe *rx_pipe,
76 struct bmi_xfer *xfer); 74 struct bmi_xfer *xfer);
77static void ath10k_pci_cleanup_ce(struct ath10k *ar);
78 75
79static const struct ce_attr host_ce_config_wlan[] = { 76static const struct ce_attr host_ce_config_wlan[] = {
80 /* CE0: host->target HTC control and raw streams */ 77 /* CE0: host->target HTC control and raw streams */
@@ -679,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
679 } 676 }
680} 677}
681 678
682/*
683 * FIXME: Handle OOM properly.
684 */
685static inline
686struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
687{
688 struct ath10k_pci_compl *compl = NULL;
689
690 spin_lock_bh(&pipe_info->pipe_lock);
691 if (list_empty(&pipe_info->compl_free)) {
692 ath10k_warn("Completion buffers are full\n");
693 goto exit;
694 }
695 compl = list_first_entry(&pipe_info->compl_free,
696 struct ath10k_pci_compl, list);
697 list_del(&compl->list);
698exit:
699 spin_unlock_bh(&pipe_info->pipe_lock);
700 return compl;
701}
702
703/* Called by lower (CE) layer when a send to Target completes. */ 679/* Called by lower (CE) layer when a send to Target completes. */
704static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) 680static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
705{ 681{
706 struct ath10k *ar = ce_state->ar; 682 struct ath10k *ar = ce_state->ar;
707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 683 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
708 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 684 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
709 struct ath10k_pci_compl *compl;
710 void *transfer_context; 685 void *transfer_context;
711 u32 ce_data; 686 u32 ce_data;
712 unsigned int nbytes; 687 unsigned int nbytes;
@@ -715,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
715 while (ath10k_ce_completed_send_next(ce_state, &transfer_context, 690 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
716 &ce_data, &nbytes, 691 &ce_data, &nbytes,
717 &transfer_id) == 0) { 692 &transfer_id) == 0) {
718 compl = get_free_compl(pipe_info); 693 /* no need to call tx completion for NULL pointers */
719 if (!compl) 694 if (transfer_context == NULL)
720 break; 695 continue;
721
722 compl->state = ATH10K_PCI_COMPL_SEND;
723 compl->ce_state = ce_state;
724 compl->pipe_info = pipe_info;
725 compl->skb = transfer_context;
726 compl->nbytes = nbytes;
727 compl->transfer_id = transfer_id;
728 compl->flags = 0;
729 696
730 /* 697 cb->tx_completion(ar, transfer_context, transfer_id);
731 * Add the completion to the processing queue.
732 */
733 spin_lock_bh(&ar_pci->compl_lock);
734 list_add_tail(&compl->list, &ar_pci->compl_process);
735 spin_unlock_bh(&ar_pci->compl_lock);
736 } 698 }
737
738 ath10k_pci_process_ce(ar);
739} 699}
740 700
741/* Called by lower (CE) layer when data is received from the Target. */ 701/* Called by lower (CE) layer when data is received from the Target. */
@@ -744,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
744 struct ath10k *ar = ce_state->ar; 704 struct ath10k *ar = ce_state->ar;
745 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
746 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 706 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
747 struct ath10k_pci_compl *compl; 707 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
748 struct sk_buff *skb; 708 struct sk_buff *skb;
749 void *transfer_context; 709 void *transfer_context;
750 u32 ce_data; 710 u32 ce_data;
751 unsigned int nbytes; 711 unsigned int nbytes, max_nbytes;
752 unsigned int transfer_id; 712 unsigned int transfer_id;
753 unsigned int flags; 713 unsigned int flags;
714 int err;
754 715
755 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 716 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
756 &ce_data, &nbytes, &transfer_id, 717 &ce_data, &nbytes, &transfer_id,
757 &flags) == 0) { 718 &flags) == 0) {
758 compl = get_free_compl(pipe_info); 719 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
759 if (!compl) 720 if (unlikely(err)) {
760 break; 721 /* FIXME: retry */
761 722 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
762 compl->state = ATH10K_PCI_COMPL_RECV; 723 pipe_info->pipe_num, err);
763 compl->ce_state = ce_state; 724 }
764 compl->pipe_info = pipe_info;
765 compl->skb = transfer_context;
766 compl->nbytes = nbytes;
767 compl->transfer_id = transfer_id;
768 compl->flags = flags;
769 725
770 skb = transfer_context; 726 skb = transfer_context;
727 max_nbytes = skb->len + skb_tailroom(skb);
771 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 728 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
772 skb->len + skb_tailroom(skb), 729 max_nbytes, DMA_FROM_DEVICE);
773 DMA_FROM_DEVICE);
774 /*
775 * Add the completion to the processing queue.
776 */
777 spin_lock_bh(&ar_pci->compl_lock);
778 list_add_tail(&compl->list, &ar_pci->compl_process);
779 spin_unlock_bh(&ar_pci->compl_lock);
780 }
781 730
782 ath10k_pci_process_ce(ar); 731 if (unlikely(max_nbytes < nbytes)) {
732 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
733 nbytes, max_nbytes);
734 dev_kfree_skb_any(skb);
735 continue;
736 }
737
738 skb_put(skb, nbytes);
739 cb->rx_completion(ar, skb, pipe_info->pipe_num);
740 }
783} 741}
784 742
785/* Send the first nbytes bytes of the buffer */ 743static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
786static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, 744 struct ath10k_hif_sg_item *items, int n_items)
787 unsigned int transfer_id,
788 unsigned int bytes, struct sk_buff *nbuf)
789{ 745{
790 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
791 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 746 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
792 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]); 747 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
793 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl; 748 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
794 unsigned int len; 749 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
795 u32 flags = 0; 750 unsigned int nentries_mask = src_ring->nentries_mask;
796 int ret; 751 unsigned int sw_index = src_ring->sw_index;
752 unsigned int write_index = src_ring->write_index;
753 int err, i;
797 754
798 len = min(bytes, nbuf->len); 755 spin_lock_bh(&ar_pci->ce_lock);
799 bytes -= len;
800 756
801 if (len & 3) 757 if (unlikely(CE_RING_DELTA(nentries_mask,
802 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); 758 write_index, sw_index - 1) < n_items)) {
759 err = -ENOBUFS;
760 goto unlock;
761 }
803 762
804 ath10k_dbg(ATH10K_DBG_PCI, 763 for (i = 0; i < n_items - 1; i++) {
805 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", 764 ath10k_dbg(ATH10K_DBG_PCI,
806 nbuf->data, (unsigned long long) skb_cb->paddr, 765 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
807 nbuf->len, len); 766 i, items[i].paddr, items[i].len, n_items);
808 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, 767 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
809 "ath10k tx: data: ", 768 items[i].vaddr, items[i].len);
810 nbuf->data, nbuf->len);
811
812 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
813 flags);
814 if (ret)
815 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
816 769
817 return ret; 770 err = ath10k_ce_send_nolock(ce_pipe,
771 items[i].transfer_context,
772 items[i].paddr,
773 items[i].len,
774 items[i].transfer_id,
775 CE_SEND_FLAG_GATHER);
776 if (err)
777 goto unlock;
778 }
779
780 /* `i` is equal to `n_items -1` after for() */
781
782 ath10k_dbg(ATH10K_DBG_PCI,
783 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
784 i, items[i].paddr, items[i].len, n_items);
785 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
786 items[i].vaddr, items[i].len);
787
788 err = ath10k_ce_send_nolock(ce_pipe,
789 items[i].transfer_context,
790 items[i].paddr,
791 items[i].len,
792 items[i].transfer_id,
793 0);
794 if (err)
795 goto unlock;
796
797 err = 0;
798unlock:
799 spin_unlock_bh(&ar_pci->ce_lock);
800 return err;
818} 801}
819 802
820static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 803static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
@@ -903,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
903 sizeof(ar_pci->msg_callbacks_current)); 886 sizeof(ar_pci->msg_callbacks_current));
904} 887}
905 888
906static int ath10k_pci_alloc_compl(struct ath10k *ar)
907{
908 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
909 const struct ce_attr *attr;
910 struct ath10k_pci_pipe *pipe_info;
911 struct ath10k_pci_compl *compl;
912 int i, pipe_num, completions;
913
914 spin_lock_init(&ar_pci->compl_lock);
915 INIT_LIST_HEAD(&ar_pci->compl_process);
916
917 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
918 pipe_info = &ar_pci->pipe_info[pipe_num];
919
920 spin_lock_init(&pipe_info->pipe_lock);
921 INIT_LIST_HEAD(&pipe_info->compl_free);
922
923 /* Handle Diagnostic CE specially */
924 if (pipe_info->ce_hdl == ar_pci->ce_diag)
925 continue;
926
927 attr = &host_ce_config_wlan[pipe_num];
928 completions = 0;
929
930 if (attr->src_nentries)
931 completions += attr->src_nentries;
932
933 if (attr->dest_nentries)
934 completions += attr->dest_nentries;
935
936 for (i = 0; i < completions; i++) {
937 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
938 if (!compl) {
939 ath10k_warn("No memory for completion state\n");
940 ath10k_pci_cleanup_ce(ar);
941 return -ENOMEM;
942 }
943
944 compl->state = ATH10K_PCI_COMPL_FREE;
945 list_add_tail(&compl->list, &pipe_info->compl_free);
946 }
947 }
948
949 return 0;
950}
951
952static int ath10k_pci_setup_ce_irq(struct ath10k *ar) 889static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
953{ 890{
954 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 891 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -993,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
993 tasklet_kill(&ar_pci->pipe_info[i].intr); 930 tasklet_kill(&ar_pci->pipe_info[i].intr);
994} 931}
995 932
996static void ath10k_pci_stop_ce(struct ath10k *ar)
997{
998 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
999 struct ath10k_pci_compl *compl;
1000 struct sk_buff *skb;
1001
1002 /* Mark pending completions as aborted, so that upper layers free up
1003 * their associated resources */
1004 spin_lock_bh(&ar_pci->compl_lock);
1005 list_for_each_entry(compl, &ar_pci->compl_process, list) {
1006 skb = compl->skb;
1007 ATH10K_SKB_CB(skb)->is_aborted = true;
1008 }
1009 spin_unlock_bh(&ar_pci->compl_lock);
1010}
1011
1012static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1013{
1014 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1015 struct ath10k_pci_compl *compl, *tmp;
1016 struct ath10k_pci_pipe *pipe_info;
1017 struct sk_buff *netbuf;
1018 int pipe_num;
1019
1020 /* Free pending completions. */
1021 spin_lock_bh(&ar_pci->compl_lock);
1022 if (!list_empty(&ar_pci->compl_process))
1023 ath10k_warn("pending completions still present! possible memory leaks.\n");
1024
1025 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1026 list_del(&compl->list);
1027 netbuf = compl->skb;
1028 dev_kfree_skb_any(netbuf);
1029 kfree(compl);
1030 }
1031 spin_unlock_bh(&ar_pci->compl_lock);
1032
1033 /* Free unused completions for each pipe. */
1034 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1035 pipe_info = &ar_pci->pipe_info[pipe_num];
1036
1037 spin_lock_bh(&pipe_info->pipe_lock);
1038 list_for_each_entry_safe(compl, tmp,
1039 &pipe_info->compl_free, list) {
1040 list_del(&compl->list);
1041 kfree(compl);
1042 }
1043 spin_unlock_bh(&pipe_info->pipe_lock);
1044 }
1045}
1046
1047static void ath10k_pci_process_ce(struct ath10k *ar)
1048{
1049 struct ath10k_pci *ar_pci = ar->hif.priv;
1050 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1051 struct ath10k_pci_compl *compl;
1052 struct sk_buff *skb;
1053 unsigned int nbytes;
1054 int ret, send_done = 0;
1055
1056 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1057 * we must serialize all completion processing. */
1058
1059 spin_lock_bh(&ar_pci->compl_lock);
1060 if (ar_pci->compl_processing) {
1061 spin_unlock_bh(&ar_pci->compl_lock);
1062 return;
1063 }
1064 ar_pci->compl_processing = true;
1065 spin_unlock_bh(&ar_pci->compl_lock);
1066
1067 for (;;) {
1068 spin_lock_bh(&ar_pci->compl_lock);
1069 if (list_empty(&ar_pci->compl_process)) {
1070 spin_unlock_bh(&ar_pci->compl_lock);
1071 break;
1072 }
1073 compl = list_first_entry(&ar_pci->compl_process,
1074 struct ath10k_pci_compl, list);
1075 list_del(&compl->list);
1076 spin_unlock_bh(&ar_pci->compl_lock);
1077
1078 switch (compl->state) {
1079 case ATH10K_PCI_COMPL_SEND:
1080 cb->tx_completion(ar,
1081 compl->skb,
1082 compl->transfer_id);
1083 send_done = 1;
1084 break;
1085 case ATH10K_PCI_COMPL_RECV:
1086 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1087 if (ret) {
1088 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1089 compl->pipe_info->pipe_num, ret);
1090 break;
1091 }
1092
1093 skb = compl->skb;
1094 nbytes = compl->nbytes;
1095
1096 ath10k_dbg(ATH10K_DBG_PCI,
1097 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1098 skb, nbytes);
1099 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1100 "ath10k rx: ", skb->data, nbytes);
1101
1102 if (skb->len + skb_tailroom(skb) >= nbytes) {
1103 skb_trim(skb, 0);
1104 skb_put(skb, nbytes);
1105 cb->rx_completion(ar, skb,
1106 compl->pipe_info->pipe_num);
1107 } else {
1108 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1109 nbytes,
1110 skb->len + skb_tailroom(skb));
1111 }
1112 break;
1113 case ATH10K_PCI_COMPL_FREE:
1114 ath10k_warn("free completion cannot be processed\n");
1115 break;
1116 default:
1117 ath10k_warn("invalid completion state (%d)\n",
1118 compl->state);
1119 break;
1120 }
1121
1122 compl->state = ATH10K_PCI_COMPL_FREE;
1123
1124 /*
1125 * Add completion back to the pipe's free list.
1126 */
1127 spin_lock_bh(&compl->pipe_info->pipe_lock);
1128 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1129 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1130 }
1131
1132 spin_lock_bh(&ar_pci->compl_lock);
1133 ar_pci->compl_processing = false;
1134 spin_unlock_bh(&ar_pci->compl_lock);
1135}
1136
1137/* TODO - temporary mapping while we have too few CE's */ 933/* TODO - temporary mapping while we have too few CE's */
1138static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, 934static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1139 u16 service_id, u8 *ul_pipe, 935 u16 service_id, u8 *ul_pipe,
@@ -1305,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
1305 ath10k_pci_free_early_irq(ar); 1101 ath10k_pci_free_early_irq(ar);
1306 ath10k_pci_kill_tasklet(ar); 1102 ath10k_pci_kill_tasklet(ar);
1307 1103
1308 ret = ath10k_pci_alloc_compl(ar);
1309 if (ret) {
1310 ath10k_warn("failed to allocate CE completions: %d\n", ret);
1311 goto err_early_irq;
1312 }
1313
1314 ret = ath10k_pci_request_irq(ar); 1104 ret = ath10k_pci_request_irq(ar);
1315 if (ret) { 1105 if (ret) {
1316 ath10k_warn("failed to post RX buffers for all pipes: %d\n", 1106 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1317 ret); 1107 ret);
1318 goto err_free_compl; 1108 goto err_early_irq;
1319 } 1109 }
1320 1110
1321 ret = ath10k_pci_setup_ce_irq(ar); 1111 ret = ath10k_pci_setup_ce_irq(ar);
@@ -1339,10 +1129,6 @@ err_stop:
1339 ath10k_ce_disable_interrupts(ar); 1129 ath10k_ce_disable_interrupts(ar);
1340 ath10k_pci_free_irq(ar); 1130 ath10k_pci_free_irq(ar);
1341 ath10k_pci_kill_tasklet(ar); 1131 ath10k_pci_kill_tasklet(ar);
1342 ath10k_pci_stop_ce(ar);
1343 ath10k_pci_process_ce(ar);
1344err_free_compl:
1345 ath10k_pci_cleanup_ce(ar);
1346err_early_irq: 1132err_early_irq:
1347 /* Though there should be no interrupts (device was reset) 1133 /* Though there should be no interrupts (device was reset)
1348 * power_down() expects the early IRQ to be installed as per the 1134 * power_down() expects the early IRQ to be installed as per the
@@ -1413,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1413 1199
1414 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, 1200 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1415 &ce_data, &nbytes, &id) == 0) { 1201 &ce_data, &nbytes, &id) == 0) {
1416 /* 1202 /* no need to call tx completion for NULL pointers */
1417 * Indicate the completion to higer layer to free 1203 if (!netbuf)
1418 * the buffer
1419 */
1420
1421 if (!netbuf) {
1422 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1423 ce_hdl->id);
1424 continue; 1204 continue;
1425 }
1426 1205
1427 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1428 ar_pci->msg_callbacks_current.tx_completion(ar, 1206 ar_pci->msg_callbacks_current.tx_completion(ar,
1429 netbuf, 1207 netbuf,
1430 id); 1208 id);
@@ -1482,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1482 1260
1483 ath10k_pci_free_irq(ar); 1261 ath10k_pci_free_irq(ar);
1484 ath10k_pci_kill_tasklet(ar); 1262 ath10k_pci_kill_tasklet(ar);
1485 ath10k_pci_stop_ce(ar);
1486 1263
1487 ret = ath10k_pci_request_early_irq(ar); 1264 ret = ath10k_pci_request_early_irq(ar);
1488 if (ret) 1265 if (ret)
@@ -1492,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
1492 * not DMA nor interrupt. We process the leftovers and then free 1269 * not DMA nor interrupt. We process the leftovers and then free
1493 * everything else up. */ 1270 * everything else up. */
1494 1271
1495 ath10k_pci_process_ce(ar);
1496 ath10k_pci_cleanup_ce(ar);
1497 ath10k_pci_buffer_cleanup(ar); 1272 ath10k_pci_buffer_cleanup(ar);
1498 1273
1499 /* Make the sure the device won't access any structures on the host by 1274 /* Make the sure the device won't access any structures on the host by
@@ -2269,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
2269#endif 2044#endif
2270 2045
2271static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2046static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2272 .send_head = ath10k_pci_hif_send_head, 2047 .tx_sg = ath10k_pci_hif_tx_sg,
2273 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 2048 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2274 .start = ath10k_pci_hif_start, 2049 .start = ath10k_pci_hif_start,
2275 .stop = ath10k_pci_hif_stop, 2050 .stop = ath10k_pci_hif_stop,
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index a4f32038c440..b43fdb4f7319 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,23 +43,6 @@ struct bmi_xfer {
43 u32 resp_len; 43 u32 resp_len;
44}; 44};
45 45
46enum ath10k_pci_compl_state {
47 ATH10K_PCI_COMPL_FREE = 0,
48 ATH10K_PCI_COMPL_SEND,
49 ATH10K_PCI_COMPL_RECV,
50};
51
52struct ath10k_pci_compl {
53 struct list_head list;
54 enum ath10k_pci_compl_state state;
55 struct ath10k_ce_pipe *ce_state;
56 struct ath10k_pci_pipe *pipe_info;
57 struct sk_buff *skb;
58 unsigned int nbytes;
59 unsigned int transfer_id;
60 unsigned int flags;
61};
62
63/* 46/*
64 * PCI-specific Target state 47 * PCI-specific Target state
65 * 48 *
@@ -175,9 +158,6 @@ struct ath10k_pci_pipe {
175 /* protects compl_free and num_send_allowed */ 158 /* protects compl_free and num_send_allowed */
176 spinlock_t pipe_lock; 159 spinlock_t pipe_lock;
177 160
178 /* List of free CE completion slots */
179 struct list_head compl_free;
180
181 struct ath10k_pci *ar_pci; 161 struct ath10k_pci *ar_pci;
182 struct tasklet_struct intr; 162 struct tasklet_struct intr;
183}; 163};
@@ -205,14 +185,6 @@ struct ath10k_pci {
205 atomic_t keep_awake_count; 185 atomic_t keep_awake_count;
206 bool verified_awake; 186 bool verified_awake;
207 187
208 /* List of CE completions to be processed */
209 struct list_head compl_process;
210
211 /* protects compl_processing and compl_process */
212 spinlock_t compl_lock;
213
214 bool compl_processing;
215
216 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; 188 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
217 189
218 struct ath10k_hif_cb msg_callbacks_current; 190 struct ath10k_hif_cb msg_callbacks_current;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index ec6f82521b0e..0541dd939ce9 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -51,7 +51,8 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
51 struct ieee80211_tx_info *info; 51 struct ieee80211_tx_info *info;
52 struct ath10k_skb_cb *skb_cb; 52 struct ath10k_skb_cb *skb_cb;
53 struct sk_buff *msdu; 53 struct sk_buff *msdu;
54 int ret; 54
55 lockdep_assert_held(&htt->tx_lock);
55 56
56 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", 57 ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
57 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); 58 tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
@@ -65,12 +66,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
65 msdu = htt->pending_tx[tx_done->msdu_id]; 66 msdu = htt->pending_tx[tx_done->msdu_id];
66 skb_cb = ATH10K_SKB_CB(msdu); 67 skb_cb = ATH10K_SKB_CB(msdu);
67 68
68 ret = ath10k_skb_unmap(dev, msdu); 69 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
69 if (ret)
70 ath10k_warn("data skb unmap failed (%d)\n", ret);
71 70
72 if (skb_cb->htt.frag_len) 71 if (skb_cb->htt.txbuf)
73 skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); 72 dma_pool_free(htt->tx_pool,
73 skb_cb->htt.txbuf,
74 skb_cb->htt.txbuf_paddr);
74 75
75 ath10k_report_offchan_tx(htt->ar, msdu); 76 ath10k_report_offchan_tx(htt->ar, msdu);
76 77
@@ -92,13 +93,11 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
92 /* we do not own the msdu anymore */ 93 /* we do not own the msdu anymore */
93 94
94exit: 95exit:
95 spin_lock_bh(&htt->tx_lock);
96 htt->pending_tx[tx_done->msdu_id] = NULL; 96 htt->pending_tx[tx_done->msdu_id] = NULL;
97 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); 97 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
98 __ath10k_htt_tx_dec_pending(htt); 98 __ath10k_htt_tx_dec_pending(htt);
99 if (htt->num_pending_tx == 0) 99 if (htt->num_pending_tx == 0)
100 wake_up(&htt->empty_tx_wq); 100 wake_up(&htt->empty_tx_wq);
101 spin_unlock_bh(&htt->tx_lock);
102} 101}
103 102
104static const u8 rx_legacy_rate_idx[] = { 103static const u8 rx_legacy_rate_idx[] = {
@@ -258,6 +257,12 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
258 status->band = ch->band; 257 status->band = ch->band;
259 status->freq = ch->center_freq; 258 status->freq = ch->center_freq;
260 259
260 if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
261 /* TSF available only in 32-bit */
262 status->mactime = info->tsf & 0xffffffff;
263 status->flag |= RX_FLAG_MACTIME_END;
264 }
265
261 ath10k_dbg(ATH10K_DBG_DATA, 266 ath10k_dbg(ATH10K_DBG_DATA,
262 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n", 267 "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
263 info->skb, 268 info->skb,
@@ -378,7 +383,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
378 spin_lock_bh(&ar->data_lock); 383 spin_lock_bh(&ar->data_lock);
379 peer = ath10k_peer_find_by_id(ar, ev->peer_id); 384 peer = ath10k_peer_find_by_id(ar, ev->peer_id);
380 if (!peer) { 385 if (!peer) {
381 ath10k_warn("unknown peer id %d\n", ev->peer_id); 386 ath10k_warn("peer-unmap-event: unknown peer id %d\n",
387 ev->peer_id);
382 goto exit; 388 goto exit;
383 } 389 }
384 390
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 91e501b5499e..cb1f7b5bcf4c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1360,7 +1360,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1360 struct wmi_bcn_info *bcn_info; 1360 struct wmi_bcn_info *bcn_info;
1361 struct ath10k_vif *arvif; 1361 struct ath10k_vif *arvif;
1362 struct sk_buff *bcn; 1362 struct sk_buff *bcn;
1363 int vdev_id = 0; 1363 int ret, vdev_id = 0;
1364 1364
1365 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); 1365 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
1366 1366
@@ -1435,16 +1435,27 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1435 ath10k_warn("SWBA overrun on vdev %d\n", 1435 ath10k_warn("SWBA overrun on vdev %d\n",
1436 arvif->vdev_id); 1436 arvif->vdev_id);
1437 1437
1438 ath10k_skb_unmap(ar->dev, arvif->beacon); 1438 dma_unmap_single(arvif->ar->dev,
1439 ATH10K_SKB_CB(arvif->beacon)->paddr,
1440 arvif->beacon->len, DMA_TO_DEVICE);
1439 dev_kfree_skb_any(arvif->beacon); 1441 dev_kfree_skb_any(arvif->beacon);
1440 } 1442 }
1441 1443
1442 ath10k_skb_map(ar->dev, bcn); 1444 ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
1445 bcn->data, bcn->len,
1446 DMA_TO_DEVICE);
1447 ret = dma_mapping_error(arvif->ar->dev,
1448 ATH10K_SKB_CB(bcn)->paddr);
1449 if (ret) {
1450 ath10k_warn("failed to map beacon: %d\n", ret);
1451 goto skip;
1452 }
1443 1453
1444 arvif->beacon = bcn; 1454 arvif->beacon = bcn;
1445 arvif->beacon_sent = false; 1455 arvif->beacon_sent = false;
1446 1456
1447 ath10k_wmi_tx_beacon_nowait(arvif); 1457 ath10k_wmi_tx_beacon_nowait(arvif);
1458skip:
1448 spin_unlock_bh(&ar->data_lock); 1459 spin_unlock_bh(&ar->data_lock);
1449 } 1460 }
1450} 1461}
@@ -3382,7 +3393,6 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
3382 ci->max_power = ch->max_power; 3393 ci->max_power = ch->max_power;
3383 ci->reg_power = ch->max_reg_power; 3394 ci->reg_power = ch->max_reg_power;
3384 ci->antenna_max = ch->max_antenna_gain; 3395 ci->antenna_max = ch->max_antenna_gain;
3385 ci->antenna_max = 0;
3386 3396
3387 /* mode & flags share storage */ 3397 /* mode & flags share storage */
3388 ci->mode = ch->mode; 3398 ci->mode = ch->mode;